12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687106881068910690106911069210693106941069510696106971069810699107001070110702107031070410705107061070710708107091071010711107121071310714107151071610717107181071910720107211072210723107241072510726107271072810729107301073110732107331073410735107361073710738107391074010741107421074310744107451074610747107481074910750107511075210753107541075510756107571075810759107601076110762107631076410765107661076710768107691077010771107721077310774107751077610777107781077910780107811078210783107841078510786107871078810789107901079110792107931079410795107961079710798107991080010801108021080310804108051080610807108081080910810108111081210813108141081510816108171081810819108201082110822108231082410825108261082710828108291083010831108321083310834108351083610837108381083910840108411084210843108441084510846108471084810849108501085110852108531085410855108561085710858108591086010861108621086310864108651086610867108681086910870108711087210873108741087510876108771087810879108801088110882108831088410885108861088710888108891089010891108921089310894108951089610897108981089910900109011090210903109041090510906109071090810909109101091110912109131091410915109161091710918109191092010921109221092310924109251092610927109281092910930109311093210933109341093510936109371093810939109401094110942109431094410945109461094710948109491095010951109521095310954109551095610957109581095910960109611096210963109641096510966109671096810969109701097110972109731097410975109761097710978109791098010981109821098310984109851098610987109881098910990109911099210993109941099510996109971099810999110001100111002110031100411005110061100711008110091101011011110121101311014110151101611017110181101911020110211102211023110241102511026110271102811029110301103111032110331103411035110361103711038110391104011041110421104311044110451104611047110481104911050110511105211053110541105511056110571105811059110601106111062110631106411065110661106711068110691107011071110721107311074110751107611077110781107911080110811108211083110841108511086110871108811089110901109111092110931109411095110961109711098110991110011101111021110311104111051110611107111081110911110111111111211113111141111511116111171111811119111201112111122111231112411125111261112711128111291113011131111321113311134111351113611137111381113911140111411114211143111441114511146111471114811149111501115111152111531115411155111561115711158111591116011161111621116311164111651116611167111681116911170111711117211173111741117511176111771117811179111801118111182111831118411185111861118711188111891119011191111921119311194111951119611197111981119911200112011120211203112041120511206112071120811209112101121111212112131121411215112161121711218112191122011221112221122311224112251122611227112281122911230112311123211233112341123511236112371123811239112401124111242112431124411245112461124711248112491125011251112521125311254112551125611257112581125911260112611126211263112641126511266112671126811269112701127111272112731127411275112761127711278112791128011281112821128311284112851128611287112881128911290112911129211293112941129511296112971129811299113001130111302113031130411305113061130711308113091131011311113121131311314113151131611317113181131911320113211132211323113241132511326113271132811329113301133111332113331133411335113361133711338113391134011341113421134311344113451134611347113481134911350113511135211353113541135511356113571135811359113601136111362113631136411365113661136711368113691137011371113721137311374113751137611377113781137911380113811138211383113841138511386113871138811389113901139111392113931139411395113961139711398113991140011401114021140311404114051140611407114081140911410114111141211413114141141511416114171141811419114201142111422114231142411425114261142711428114291143011431114321143311434114351143611437114381143911440114411144211443114441144511446114471144811449114501145111452114531145411455114561145711458114591146011461114621146311464114651146611467114681146911470114711147211473114741147511476114771147811479114801148111482114831148411485114861148711488114891149011491114921149311494114951149611497114981149911500115011150211503115041150511506115071150811509115101151111512115131151411515115161151711518115191152011521115221152311524115251152611527115281152911530115311153211533115341153511536115371153811539115401154111542115431154411545115461154711548115491155011551115521155311554115551155611557115581155911560115611156211563115641156511566115671156811569115701157111572115731157411575115761157711578115791158011581115821158311584115851158611587115881158911590115911159211593115941159511596115971159811599116001160111602116031160411605116061160711608116091161011611116121161311614116151161611617116181161911620116211162211623116241162511626116271162811629116301163111632116331163411635116361163711638116391164011641116421164311644116451164611647116481164911650116511165211653116541165511656116571165811659116601166111662116631166411665116661166711668116691167011671116721167311674116751167611677116781167911680116811168211683116841168511686116871168811689116901169111692116931169411695116961169711698116991170011701117021170311704117051170611707117081170911710117111171211713117141171511716117171171811719117201172111722117231172411725117261172711728117291173011731117321173311734117351173611737117381173911740117411174211743117441174511746117471174811749117501175111752117531175411755117561175711758117591176011761117621176311764117651176611767117681176911770117711177211773117741177511776117771177811779117801178111782117831178411785117861178711788117891179011791117921179311794117951179611797117981179911800118011180211803118041180511806118071180811809118101181111812118131181411815118161181711818118191182011821118221182311824118251182611827118281182911830118311183211833118341183511836118371183811839118401184111842118431184411845118461184711848118491185011851118521185311854118551185611857118581185911860118611186211863118641186511866118671186811869118701187111872118731187411875118761187711878118791188011881118821188311884118851188611887118881188911890118911189211893118941189511896118971189811899119001190111902119031190411905119061190711908119091191011911119121191311914119151191611917119181191911920119211192211923119241192511926119271192811929119301193111932119331193411935119361193711938119391194011941119421194311944119451194611947119481194911950119511195211953119541195511956119571195811959119601196111962119631196411965119661196711968119691197011971119721197311974119751197611977119781197911980119811198211983119841198511986119871198811989119901199111992119931199411995119961199711998119991200012001120021200312004120051200612007120081200912010120111201212013120141201512016120171201812019120201202112022120231202412025120261202712028120291203012031120321203312034120351203612037120381203912040120411204212043120441204512046120471204812049120501205112052120531205412055120561205712058120591206012061120621206312064120651206612067120681206912070120711207212073120741207512076120771207812079120801208112082120831208412085120861208712088120891209012091120921209312094120951209612097120981209912100121011210212103121041210512106121071210812109121101211112112121131211412115121161211712118121191212012121121221212312124121251212612127121281212912130121311213212133121341213512136121371213812139121401214112142121431214412145121461214712148121491215012151121521215312154121551215612157121581215912160121611216212163121641216512166121671216812169121701217112172121731217412175121761217712178121791218012181121821218312184121851218612187121881218912190121911219212193121941219512196121971219812199122001220112202122031220412205122061220712208122091221012211122121221312214122151221612217122181221912220122211222212223122241222512226122271222812229122301223112232122331223412235122361223712238122391224012241122421224312244122451224612247122481224912250122511225212253122541225512256122571225812259122601226112262122631226412265122661226712268122691227012271122721227312274122751227612277122781227912280122811228212283122841228512286122871228812289122901229112292122931229412295122961229712298122991230012301123021230312304123051230612307123081230912310123111231212313123141231512316123171231812319123201232112322123231232412325123261232712328123291233012331123321233312334123351233612337123381233912340123411234212343123441234512346123471234812349123501235112352123531235412355123561235712358123591236012361123621236312364123651236612367123681236912370123711237212373123741237512376123771237812379123801238112382123831238412385123861238712388123891239012391123921239312394123951239612397123981239912400124011240212403124041240512406124071240812409124101241112412124131241412415124161241712418124191242012421124221242312424124251242612427124281242912430124311243212433124341243512436124371243812439124401244112442124431244412445124461244712448124491245012451124521245312454124551245612457124581245912460124611246212463124641246512466124671246812469124701247112472124731247412475124761247712478124791248012481124821248312484124851248612487124881248912490124911249212493124941249512496124971249812499125001250112502125031250412505125061250712508125091251012511125121251312514125151251612517125181251912520125211252212523125241252512526125271252812529125301253112532125331253412535125361253712538125391254012541125421254312544125451254612547125481254912550125511255212553125541255512556125571255812559125601256112562125631256412565125661256712568125691257012571125721257312574125751257612577125781257912580125811258212583125841258512586125871258812589125901259112592125931259412595125961259712598125991260012601126021260312604126051260612607126081260912610126111261212613126141261512616126171261812619126201262112622126231262412625126261262712628126291263012631126321263312634126351263612637126381263912640126411264212643126441264512646126471264812649126501265112652126531265412655126561265712658126591266012661126621266312664126651266612667126681266912670126711267212673126741267512676126771267812679126801268112682126831268412685126861268712688126891269012691126921269312694126951269612697126981269912700127011270212703127041270512706127071270812709127101271112712127131271412715127161271712718127191272012721127221272312724127251272612727127281272912730127311273212733127341273512736127371273812739127401274112742127431274412745127461274712748127491275012751127521275312754127551275612757127581275912760127611276212763127641276512766127671276812769127701277112772127731277412775127761277712778127791278012781127821278312784127851278612787127881278912790127911279212793127941279512796127971279812799128001280112802128031280412805128061280712808128091281012811128121281312814128151281612817128181281912820128211282212823128241282512826128271282812829128301283112832128331283412835128361283712838128391284012841128421284312844128451284612847128481284912850128511285212853128541285512856128571285812859128601286112862128631286412865128661286712868128691287012871128721287312874128751287612877128781287912880128811288212883128841288512886128871288812889128901289112892128931289412895128961289712898128991290012901129021290312904129051290612907129081290912910129111291212913129141291512916129171291812919129201292112922129231292412925129261292712928129291293012931129321293312934129351293612937129381293912940129411294212943129441294512946129471294812949129501295112952129531295412955129561295712958129591296012961129621296312964129651296612967129681296912970129711297212973129741297512976129771297812979129801298112982129831298412985129861298712988129891299012991129921299312994129951299612997129981299913000130011300213003130041300513006130071300813009130101301113012130131301413015130161301713018130191302013021130221302313024130251302613027130281302913030130311303213033130341303513036130371303813039130401304113042130431304413045130461304713048130491305013051130521305313054130551305613057130581305913060130611306213063130641306513066130671306813069130701307113072130731307413075130761307713078130791308013081130821308313084130851308613087130881308913090130911309213093130941309513096130971309813099131001310113102131031310413105131061310713108131091311013111131121311313114131151311613117131181311913120131211312213123131241312513126131271312813129131301313113132131331313413135131361313713138131391314013141131421314313144131451314613147131481314913150131511315213153131541315513156131571315813159131601316113162131631316413165131661316713168131691317013171131721317313174131751317613177131781317913180131811318213183131841318513186131871318813189131901319113192131931319413195131961319713198131991320013201132021320313204132051320613207132081320913210132111321213213132141321513216132171321813219132201322113222132231322413225132261322713228132291323013231132321323313234132351323613237132381323913240132411324213243132441324513246132471324813249132501325113252132531325413255132561325713258132591326013261132621326313264132651326613267132681326913270132711327213273132741327513276132771327813279132801328113282132831328413285132861328713288132891329013291132921329313294132951329613297132981329913300133011330213303133041330513306133071330813309133101331113312133131331413315133161331713318133191332013321133221332313324133251332613327133281332913330133311333213333133341333513336133371333813339133401334113342133431334413345133461334713348133491335013351133521335313354133551335613357133581335913360133611336213363133641336513366133671336813369133701337113372133731337413375133761337713378133791338013381133821338313384133851338613387133881338913390133911339213393133941339513396133971339813399134001340113402134031340413405134061340713408134091341013411134121341313414134151341613417134181341913420134211342213423134241342513426134271342813429134301343113432134331343413435134361343713438134391344013441134421344313444134451344613447134481344913450134511345213453134541345513456134571345813459134601346113462134631346413465134661346713468134691347013471134721347313474134751347613477134781347913480134811348213483134841348513486134871348813489134901349113492134931349413495134961349713498134991350013501135021350313504135051350613507135081350913510135111351213513135141351513516135171351813519135201352113522135231352413525135261352713528135291353013531135321353313534135351353613537135381353913540135411354213543135441354513546135471354813549135501355113552135531355413555135561355713558135591356013561135621356313564135651356613567135681356913570135711357213573135741357513576135771357813579135801358113582135831358413585135861358713588135891359013591135921359313594135951359613597135981359913600136011360213603136041360513606136071360813609136101361113612136131361413615136161361713618136191362013621136221362313624136251362613627136281362913630136311363213633136341363513636136371363813639136401364113642136431364413645136461364713648136491365013651136521365313654136551365613657136581365913660136611366213663136641366513666136671366813669136701367113672136731367413675136761367713678136791368013681136821368313684136851368613687136881368913690136911369213693136941369513696136971369813699137001370113702137031370413705137061370713708137091371013711137121371313714137151371613717137181371913720137211372213723137241372513726137271372813729137301373113732137331373413735137361373713738137391374013741137421374313744137451374613747137481374913750137511375213753137541375513756137571375813759137601376113762137631376413765137661376713768137691377013771137721377313774137751377613777137781377913780137811378213783137841378513786137871378813789137901379113792137931379413795137961379713798137991380013801138021380313804138051380613807138081380913810138111381213813138141381513816138171381813819138201382113822138231382413825138261382713828138291383013831138321383313834138351383613837138381383913840138411384213843138441384513846138471384813849138501385113852138531385413855138561385713858138591386013861138621386313864138651386613867138681386913870138711387213873138741387513876138771387813879138801388113882138831388413885138861388713888138891389013891138921389313894138951389613897138981389913900139011390213903139041390513906139071390813909139101391113912139131391413915139161391713918139191392013921139221392313924139251392613927139281392913930139311393213933139341393513936139371393813939139401394113942139431394413945139461394713948139491395013951139521395313954139551395613957139581395913960139611396213963139641396513966139671396813969139701397113972139731397413975139761397713978139791398013981139821398313984139851398613987139881398913990139911399213993139941399513996139971399813999140001400114002140031400414005140061400714008140091401014011140121401314014140151401614017140181401914020140211402214023140241402514026140271402814029140301403114032140331403414035140361403714038140391404014041140421404314044140451404614047140481404914050140511405214053140541405514056140571405814059140601406114062140631406414065140661406714068140691407014071140721407314074140751407614077140781407914080140811408214083140841408514086140871408814089140901409114092140931409414095140961409714098140991410014101141021410314104141051410614107141081410914110141111411214113141141411514116141171411814119141201412114122141231412414125141261412714128141291413014131141321413314134141351413614137141381413914140141411414214143141441414514146141471414814149141501415114152141531415414155141561415714158141591416014161141621416314164141651416614167141681416914170141711417214173141741417514176141771417814179141801418114182141831418414185141861418714188141891419014191141921419314194141951419614197141981419914200142011420214203142041420514206142071420814209142101421114212142131421414215142161421714218142191422014221142221422314224142251422614227142281422914230142311423214233142341423514236142371423814239142401424114242142431424414245142461424714248142491425014251142521425314254142551425614257142581425914260142611426214263142641426514266142671426814269142701427114272142731427414275142761427714278142791428014281142821428314284142851428614287142881428914290142911429214293142941429514296142971429814299143001430114302143031430414305143061430714308143091431014311143121431314314143151431614317143181431914320143211432214323143241432514326143271432814329143301433114332143331433414335143361433714338143391434014341143421434314344143451434614347143481434914350143511435214353143541435514356143571435814359143601436114362143631436414365143661436714368143691437014371143721437314374143751437614377143781437914380143811438214383143841438514386143871438814389143901439114392143931439414395143961439714398143991440014401144021440314404144051440614407144081440914410144111441214413144141441514416144171441814419144201442114422144231442414425144261442714428144291443014431144321443314434144351443614437144381443914440144411444214443144441444514446144471444814449144501445114452144531445414455144561445714458144591446014461144621446314464144651446614467144681446914470144711447214473144741447514476144771447814479144801448114482144831448414485144861448714488144891449014491144921449314494144951449614497144981449914500145011450214503145041450514506145071450814509145101451114512145131451414515145161451714518145191452014521145221452314524145251452614527145281452914530145311453214533145341453514536145371453814539145401454114542145431454414545145461454714548145491455014551145521455314554145551455614557145581455914560145611456214563145641456514566145671456814569145701457114572145731457414575145761457714578145791458014581145821458314584145851458614587145881458914590145911459214593145941459514596145971459814599146001460114602146031460414605146061460714608146091461014611146121461314614146151461614617146181461914620146211462214623146241462514626146271462814629146301463114632146331463414635146361463714638146391464014641146421464314644146451464614647146481464914650146511465214653146541465514656146571465814659146601466114662146631466414665146661466714668146691467014671146721467314674146751467614677146781467914680146811468214683146841468514686146871468814689146901469114692146931469414695146961469714698146991470014701147021470314704147051470614707147081470914710147111471214713147141471514716147171471814719147201472114722147231472414725147261472714728147291473014731147321473314734147351473614737147381473914740147411474214743147441474514746147471474814749147501475114752147531475414755147561475714758147591476014761147621476314764147651476614767147681476914770147711477214773147741477514776147771477814779147801478114782147831478414785147861478714788147891479014791147921479314794147951479614797147981479914800148011480214803148041480514806148071480814809148101481114812148131481414815148161481714818148191482014821148221482314824148251482614827148281482914830148311483214833148341483514836148371483814839148401484114842148431484414845148461484714848148491485014851148521485314854148551485614857148581485914860148611486214863148641486514866148671486814869148701487114872148731487414875148761487714878148791488014881148821488314884148851488614887148881488914890148911489214893148941489514896148971489814899149001490114902149031490414905149061490714908149091491014911149121491314914149151491614917149181491914920149211492214923149241492514926149271492814929149301493114932149331493414935149361493714938149391494014941149421494314944149451494614947149481494914950149511495214953149541495514956149571495814959149601496114962149631496414965149661496714968149691497014971149721497314974149751497614977149781497914980149811498214983149841498514986149871498814989149901499114992149931499414995149961499714998149991500015001150021500315004150051500615007150081500915010150111501215013150141501515016150171501815019150201502115022150231502415025150261502715028150291503015031150321503315034150351503615037150381503915040150411504215043150441504515046150471504815049150501505115052150531505415055150561505715058150591506015061150621506315064150651506615067150681506915070150711507215073150741507515076150771507815079150801508115082150831508415085150861508715088150891509015091150921509315094150951509615097150981509915100151011510215103151041510515106151071510815109151101511115112151131511415115151161511715118151191512015121151221512315124151251512615127151281512915130151311513215133151341513515136151371513815139151401514115142151431514415145151461514715148151491515015151151521515315154151551515615157151581515915160151611516215163151641516515166151671516815169151701517115172151731517415175151761517715178151791518015181151821518315184151851518615187151881518915190151911519215193151941519515196151971519815199152001520115202152031520415205152061520715208152091521015211152121521315214152151521615217152181521915220152211522215223152241522515226152271522815229152301523115232152331523415235152361523715238152391524015241152421524315244152451524615247152481524915250152511525215253152541525515256152571525815259152601526115262152631526415265152661526715268152691527015271152721527315274152751527615277152781527915280152811528215283152841528515286152871528815289152901529115292152931529415295152961529715298152991530015301153021530315304153051530615307153081530915310153111531215313153141531515316153171531815319153201532115322153231532415325153261532715328153291533015331153321533315334153351533615337153381533915340153411534215343153441534515346153471534815349153501535115352153531535415355153561535715358153591536015361153621536315364153651536615367153681536915370153711537215373153741537515376153771537815379153801538115382153831538415385153861538715388153891539015391153921539315394153951539615397153981539915400154011540215403154041540515406154071540815409154101541115412154131541415415154161541715418154191542015421154221542315424154251542615427154281542915430154311543215433154341543515436154371543815439154401544115442154431544415445154461544715448154491545015451154521545315454154551545615457154581545915460154611546215463154641546515466154671546815469154701547115472154731547415475154761547715478154791548015481154821548315484154851548615487154881548915490154911549215493154941549515496154971549815499155001550115502155031550415505155061550715508155091551015511155121551315514155151551615517155181551915520155211552215523155241552515526155271552815529155301553115532155331553415535155361553715538155391554015541155421554315544155451554615547155481554915550155511555215553155541555515556155571555815559155601556115562155631556415565155661556715568155691557015571155721557315574155751557615577155781557915580155811558215583155841558515586155871558815589155901559115592155931559415595155961559715598155991560015601156021560315604156051560615607156081560915610156111561215613156141561515616156171561815619156201562115622156231562415625156261562715628156291563015631156321563315634156351563615637156381563915640156411564215643156441564515646156471564815649156501565115652156531565415655156561565715658156591566015661156621566315664156651566615667156681566915670156711567215673156741567515676156771567815679156801568115682156831568415685156861568715688156891569015691156921569315694156951569615697156981569915700157011570215703157041570515706157071570815709157101571115712157131571415715157161571715718157191572015721157221572315724157251572615727157281572915730157311573215733157341573515736157371573815739157401574115742157431574415745157461574715748157491575015751157521575315754157551575615757157581575915760157611576215763157641576515766157671576815769157701577115772157731577415775157761577715778157791578015781157821578315784157851578615787157881578915790157911579215793157941579515796157971579815799158001580115802158031580415805158061580715808158091581015811158121581315814158151581615817158181581915820158211582215823158241582515826158271582815829158301583115832158331583415835158361583715838158391584015841158421584315844158451584615847158481584915850158511585215853158541585515856158571585815859158601586115862158631586415865158661586715868158691587015871158721587315874158751587615877158781587915880158811588215883158841588515886158871588815889158901589115892158931589415895158961589715898158991590015901159021590315904159051590615907159081590915910159111591215913159141591515916159171591815919159201592115922159231592415925159261592715928159291593015931159321593315934159351593615937159381593915940159411594215943159441594515946159471594815949159501595115952159531595415955159561595715958159591596015961159621596315964159651596615967159681596915970159711597215973159741597515976159771597815979159801598115982159831598415985159861598715988159891599015991159921599315994159951599615997159981599916000160011600216003160041600516006160071600816009160101601116012160131601416015160161601716018160191602016021160221602316024160251602616027160281602916030160311603216033160341603516036160371603816039160401604116042160431604416045160461604716048160491605016051160521605316054160551605616057160581605916060160611606216063160641606516066160671606816069160701607116072160731607416075160761607716078160791608016081160821608316084160851608616087160881608916090160911609216093160941609516096160971609816099161001610116102161031610416105161061610716108161091611016111161121611316114161151611616117161181611916120161211612216123161241612516126161271612816129161301613116132161331613416135161361613716138161391614016141161421614316144161451614616147161481614916150161511615216153161541615516156161571615816159161601616116162161631616416165161661616716168161691617016171161721617316174161751617616177161781617916180161811618216183161841618516186161871618816189161901619116192161931619416195161961619716198161991620016201162021620316204162051620616207162081620916210162111621216213162141621516216162171621816219162201622116222162231622416225162261622716228162291623016231162321623316234162351623616237162381623916240162411624216243162441624516246162471624816249162501625116252162531625416255162561625716258162591626016261162621626316264162651626616267162681626916270162711627216273162741627516276162771627816279162801628116282162831628416285162861628716288162891629016291162921629316294162951629616297162981629916300163011630216303163041630516306163071630816309163101631116312163131631416315163161631716318163191632016321163221632316324163251632616327163281632916330163311633216333163341633516336163371633816339163401634116342163431634416345163461634716348163491635016351163521635316354163551635616357163581635916360163611636216363163641636516366163671636816369163701637116372163731637416375163761637716378163791638016381163821638316384163851638616387163881638916390163911639216393163941639516396163971639816399164001640116402164031640416405164061640716408164091641016411164121641316414164151641616417164181641916420164211642216423164241642516426164271642816429164301643116432164331643416435164361643716438164391644016441164421644316444164451644616447164481644916450164511645216453164541645516456164571645816459164601646116462164631646416465164661646716468164691647016471164721647316474164751647616477164781647916480164811648216483164841648516486164871648816489164901649116492164931649416495164961649716498164991650016501165021650316504165051650616507165081650916510165111651216513165141651516516165171651816519165201652116522165231652416525165261652716528165291653016531165321653316534165351653616537165381653916540165411654216543165441654516546165471654816549165501655116552165531655416555165561655716558165591656016561165621656316564165651656616567165681656916570165711657216573165741657516576165771657816579165801658116582165831658416585165861658716588165891659016591165921659316594165951659616597165981659916600166011660216603166041660516606166071660816609166101661116612166131661416615166161661716618166191662016621166221662316624166251662616627166281662916630166311663216633166341663516636166371663816639166401664116642166431664416645166461664716648166491665016651166521665316654166551665616657166581665916660166611666216663166641666516666166671666816669166701667116672166731667416675166761667716678166791668016681166821668316684166851668616687166881668916690166911669216693166941669516696166971669816699167001670116702167031670416705167061670716708167091671016711167121671316714167151671616717167181671916720167211672216723167241672516726167271672816729167301673116732167331673416735167361673716738167391674016741167421674316744167451674616747167481674916750167511675216753167541675516756167571675816759167601676116762167631676416765167661676716768167691677016771167721677316774167751677616777167781677916780167811678216783167841678516786167871678816789167901679116792167931679416795167961679716798167991680016801168021680316804168051680616807168081680916810168111681216813168141681516816168171681816819168201682116822168231682416825168261682716828168291683016831168321683316834168351683616837168381683916840168411684216843168441684516846168471684816849168501685116852168531685416855168561685716858168591686016861168621686316864168651686616867168681686916870168711687216873168741687516876168771687816879168801688116882168831688416885168861688716888168891689016891168921689316894168951689616897168981689916900169011690216903169041690516906169071690816909169101691116912169131691416915169161691716918169191692016921169221692316924169251692616927169281692916930169311693216933169341693516936169371693816939169401694116942169431694416945169461694716948169491695016951169521695316954169551695616957169581695916960169611696216963169641696516966169671696816969169701697116972169731697416975169761697716978169791698016981169821698316984169851698616987169881698916990169911699216993169941699516996169971699816999170001700117002170031700417005170061700717008170091701017011170121701317014170151701617017170181701917020170211702217023170241702517026170271702817029170301703117032170331703417035170361703717038170391704017041170421704317044170451704617047170481704917050170511705217053170541705517056170571705817059170601706117062170631706417065170661706717068170691707017071170721707317074170751707617077170781707917080170811708217083170841708517086170871708817089170901709117092170931709417095170961709717098170991710017101171021710317104171051710617107171081710917110171111711217113171141711517116171171711817119171201712117122171231712417125171261712717128171291713017131171321713317134171351713617137171381713917140171411714217143171441714517146171471714817149171501715117152171531715417155171561715717158171591716017161171621716317164171651716617167171681716917170171711717217173171741717517176171771717817179171801718117182171831718417185171861718717188171891719017191171921719317194171951719617197171981719917200172011720217203172041720517206172071720817209172101721117212172131721417215172161721717218172191722017221172221722317224172251722617227172281722917230172311723217233172341723517236172371723817239172401724117242172431724417245172461724717248172491725017251172521725317254172551725617257172581725917260172611726217263172641726517266172671726817269172701727117272172731727417275172761727717278172791728017281172821728317284172851728617287172881728917290172911729217293172941729517296172971729817299173001730117302173031730417305173061730717308173091731017311173121731317314173151731617317173181731917320173211732217323173241732517326173271732817329173301733117332173331733417335173361733717338173391734017341173421734317344173451734617347173481734917350173511735217353173541735517356173571735817359173601736117362173631736417365173661736717368173691737017371173721737317374173751737617377173781737917380173811738217383173841738517386173871738817389173901739117392173931739417395173961739717398173991740017401174021740317404174051740617407174081740917410174111741217413174141741517416174171741817419174201742117422174231742417425174261742717428174291743017431174321743317434174351743617437174381743917440174411744217443174441744517446174471744817449174501745117452174531745417455174561745717458174591746017461174621746317464174651746617467174681746917470174711747217473174741747517476174771747817479174801748117482174831748417485174861748717488174891749017491174921749317494174951749617497174981749917500175011750217503175041750517506175071750817509175101751117512175131751417515175161751717518175191752017521175221752317524175251752617527175281752917530175311753217533175341753517536175371753817539175401754117542175431754417545175461754717548175491755017551175521755317554175551755617557175581755917560175611756217563175641756517566175671756817569175701757117572175731757417575175761757717578175791758017581175821758317584175851758617587175881758917590175911759217593175941759517596175971759817599176001760117602176031760417605176061760717608176091761017611176121761317614176151761617617176181761917620176211762217623176241762517626176271762817629176301763117632176331763417635176361763717638176391764017641176421764317644176451764617647176481764917650176511765217653176541765517656176571765817659176601766117662176631766417665176661766717668176691767017671176721767317674176751767617677176781767917680176811768217683176841768517686176871768817689176901769117692176931769417695176961769717698176991770017701177021770317704177051770617707177081770917710177111771217713177141771517716177171771817719177201772117722177231772417725177261772717728177291773017731177321773317734177351773617737177381773917740177411774217743177441774517746177471774817749177501775117752177531775417755177561775717758177591776017761177621776317764177651776617767177681776917770177711777217773177741777517776177771777817779177801778117782177831778417785177861778717788177891779017791177921779317794177951779617797177981779917800178011780217803178041780517806178071780817809178101781117812178131781417815178161781717818178191782017821178221782317824178251782617827178281782917830178311783217833178341783517836178371783817839178401784117842178431784417845178461784717848178491785017851178521785317854178551785617857178581785917860178611786217863178641786517866178671786817869178701787117872178731787417875178761787717878178791788017881178821788317884178851788617887178881788917890178911789217893178941789517896178971789817899179001790117902179031790417905179061790717908179091791017911179121791317914179151791617917179181791917920179211792217923179241792517926179271792817929179301793117932179331793417935179361793717938179391794017941179421794317944179451794617947179481794917950179511795217953179541795517956179571795817959179601796117962179631796417965179661796717968179691797017971179721797317974179751797617977179781797917980179811798217983179841798517986179871798817989179901799117992179931799417995179961799717998179991800018001180021800318004180051800618007180081800918010180111801218013180141801518016180171801818019180201802118022180231802418025180261802718028180291803018031180321803318034180351803618037180381803918040180411804218043180441804518046180471804818049180501805118052180531805418055180561805718058180591806018061180621806318064180651806618067180681806918070180711807218073180741807518076180771807818079180801808118082180831808418085180861808718088180891809018091180921809318094180951809618097180981809918100181011810218103181041810518106181071810818109181101811118112181131811418115181161811718118181191812018121181221812318124181251812618127181281812918130181311813218133181341813518136181371813818139181401814118142181431814418145181461814718148181491815018151181521815318154181551815618157181581815918160181611816218163181641816518166181671816818169181701817118172181731817418175181761817718178181791818018181181821818318184181851818618187181881818918190181911819218193181941819518196181971819818199182001820118202182031820418205182061820718208182091821018211182121821318214182151821618217182181821918220182211822218223182241822518226182271822818229182301823118232182331823418235182361823718238182391824018241182421824318244182451824618247182481824918250182511825218253182541825518256182571825818259182601826118262182631826418265182661826718268182691827018271182721827318274182751827618277182781827918280182811828218283182841828518286182871828818289182901829118292182931829418295182961829718298182991830018301183021830318304183051830618307183081830918310183111831218313183141831518316183171831818319183201832118322183231832418325183261832718328183291833018331183321833318334183351833618337183381833918340183411834218343183441834518346183471834818349183501835118352183531835418355183561835718358183591836018361183621836318364183651836618367183681836918370183711837218373183741837518376183771837818379183801838118382183831838418385183861838718388183891839018391183921839318394183951839618397183981839918400184011840218403184041840518406184071840818409184101841118412184131841418415184161841718418184191842018421184221842318424184251842618427184281842918430184311843218433184341843518436184371843818439184401844118442184431844418445184461844718448184491845018451184521845318454184551845618457184581845918460184611846218463184641846518466184671846818469184701847118472184731847418475184761847718478184791848018481184821848318484184851848618487184881848918490184911849218493184941849518496184971849818499185001850118502185031850418505185061850718508185091851018511185121851318514185151851618517185181851918520185211852218523185241852518526185271852818529185301853118532185331853418535185361853718538185391854018541185421854318544185451854618547185481854918550185511855218553185541855518556185571855818559185601856118562185631856418565185661856718568185691857018571185721857318574185751857618577185781857918580185811858218583185841858518586185871858818589185901859118592185931859418595185961859718598185991860018601186021860318604186051860618607186081860918610186111861218613186141861518616186171861818619186201862118622186231862418625186261862718628186291863018631186321863318634186351863618637186381863918640186411864218643186441864518646186471864818649186501865118652186531865418655186561865718658186591866018661186621866318664186651866618667186681866918670186711867218673186741867518676186771867818679186801868118682186831868418685186861868718688186891869018691186921869318694186951869618697186981869918700187011870218703187041870518706187071870818709187101871118712187131871418715187161871718718187191872018721187221872318724187251872618727187281872918730187311873218733187341873518736187371873818739187401874118742187431874418745187461874718748187491875018751187521875318754187551875618757187581875918760187611876218763187641876518766187671876818769187701877118772187731877418775187761877718778187791878018781187821878318784187851878618787187881878918790187911879218793187941879518796187971879818799188001880118802188031880418805188061880718808188091881018811188121881318814188151881618817188181881918820188211882218823188241882518826188271882818829188301883118832188331883418835188361883718838188391884018841188421884318844188451884618847188481884918850188511885218853188541885518856188571885818859188601886118862188631886418865188661886718868188691887018871188721887318874188751887618877188781887918880188811888218883188841888518886188871888818889188901889118892188931889418895188961889718898188991890018901189021890318904189051890618907189081890918910189111891218913189141891518916189171891818919189201892118922189231892418925189261892718928189291893018931189321893318934189351893618937189381893918940189411894218943189441894518946189471894818949189501895118952189531895418955189561895718958189591896018961189621896318964189651896618967189681896918970189711897218973189741897518976189771897818979189801898118982189831898418985189861898718988189891899018991189921899318994189951899618997189981899919000190011900219003190041900519006190071900819009190101901119012190131901419015190161901719018190191902019021190221902319024190251902619027190281902919030190311903219033190341903519036190371903819039190401904119042190431904419045190461904719048190491905019051190521905319054190551905619057190581905919060190611906219063190641906519066190671906819069190701907119072190731907419075190761907719078190791908019081190821908319084190851908619087190881908919090190911909219093190941909519096190971909819099191001910119102191031910419105191061910719108191091911019111191121911319114191151911619117191181911919120191211912219123191241912519126191271912819129191301913119132191331913419135191361913719138191391914019141191421914319144191451914619147191481914919150191511915219153191541915519156191571915819159191601916119162191631916419165191661916719168191691917019171191721917319174191751917619177191781917919180191811918219183191841918519186191871918819189191901919119192191931919419195191961919719198191991920019201192021920319204192051920619207192081920919210192111921219213192141921519216192171921819219192201922119222192231922419225192261922719228192291923019231192321923319234192351923619237192381923919240192411924219243192441924519246192471924819249192501925119252192531925419255192561925719258192591926019261192621926319264192651926619267192681926919270192711927219273192741927519276192771927819279192801928119282192831928419285192861928719288192891929019291192921929319294192951929619297192981929919300193011930219303193041930519306193071930819309193101931119312193131931419315193161931719318193191932019321193221932319324193251932619327193281932919330193311933219333193341933519336193371933819339193401934119342193431934419345193461934719348193491935019351193521935319354193551935619357193581935919360193611936219363193641936519366193671936819369193701937119372193731937419375193761937719378193791938019381193821938319384193851938619387193881938919390193911939219393193941939519396193971939819399194001940119402194031940419405194061940719408194091941019411194121941319414194151941619417194181941919420194211942219423194241942519426194271942819429194301943119432194331943419435194361943719438194391944019441194421944319444194451944619447194481944919450194511945219453194541945519456194571945819459194601946119462194631946419465194661946719468194691947019471194721947319474194751947619477194781947919480194811948219483194841948519486194871948819489194901949119492194931949419495194961949719498194991950019501195021950319504195051950619507195081950919510195111951219513195141951519516195171951819519195201952119522195231952419525195261952719528195291953019531195321953319534195351953619537195381953919540195411954219543195441954519546195471954819549195501955119552195531955419555195561955719558195591956019561195621956319564195651956619567195681956919570195711957219573195741957519576195771957819579195801958119582195831958419585195861958719588195891959019591195921959319594195951959619597195981959919600196011960219603196041960519606196071960819609196101961119612196131961419615196161961719618196191962019621196221962319624196251962619627196281962919630196311963219633196341963519636196371963819639196401964119642196431964419645196461964719648196491965019651196521965319654196551965619657196581965919660196611966219663196641966519666196671966819669196701967119672196731967419675196761967719678196791968019681196821968319684196851968619687196881968919690196911969219693196941969519696196971969819699197001970119702197031970419705197061970719708197091971019711197121971319714197151971619717197181971919720197211972219723197241972519726197271972819729197301973119732197331973419735197361973719738197391974019741197421974319744197451974619747197481974919750197511975219753197541975519756197571975819759197601976119762197631976419765197661976719768197691977019771197721977319774197751977619777197781977919780197811978219783197841978519786197871978819789197901979119792197931979419795197961979719798197991980019801198021980319804198051980619807198081980919810198111981219813198141981519816198171981819819198201982119822198231982419825198261982719828198291983019831198321983319834198351983619837198381983919840198411984219843198441984519846198471984819849198501985119852198531985419855198561985719858198591986019861198621986319864198651986619867198681986919870198711987219873198741987519876198771987819879198801988119882198831988419885198861988719888198891989019891198921989319894198951989619897198981989919900199011990219903199041990519906199071990819909199101991119912199131991419915199161991719918199191992019921199221992319924199251992619927199281992919930199311993219933199341993519936199371993819939199401994119942199431994419945199461994719948199491995019951199521995319954199551995619957199581995919960199611996219963199641996519966199671996819969199701997119972199731997419975199761997719978199791998019981199821998319984199851998619987199881998919990199911999219993199941999519996199971999819999200002000120002200032000420005200062000720008200092001020011200122001320014200152001620017200182001920020200212002220023200242002520026200272002820029200302003120032200332003420035200362003720038200392004020041200422004320044200452004620047200482004920050200512005220053200542005520056200572005820059200602006120062200632006420065200662006720068200692007020071200722007320074200752007620077200782007920080200812008220083200842008520086200872008820089200902009120092200932009420095200962009720098200992010020101201022010320104201052010620107201082010920110201112011220113201142011520116201172011820119201202012120122201232012420125201262012720128201292013020131201322013320134201352013620137201382013920140201412014220143201442014520146201472014820149201502015120152201532015420155201562015720158201592016020161201622016320164201652016620167201682016920170201712017220173201742017520176201772017820179201802018120182201832018420185201862018720188201892019020191201922019320194201952019620197201982019920200202012020220203202042020520206202072020820209202102021120212202132021420215202162021720218202192022020221202222022320224202252022620227202282022920230202312023220233202342023520236202372023820239202402024120242202432024420245202462024720248202492025020251202522025320254202552025620257202582025920260202612026220263202642026520266202672026820269202702027120272202732027420275202762027720278202792028020281202822028320284202852028620287202882028920290202912029220293202942029520296202972029820299203002030120302203032030420305203062030720308203092031020311203122031320314203152031620317203182031920320203212032220323203242032520326203272032820329203302033120332203332033420335203362033720338203392034020341203422034320344203452034620347203482034920350203512035220353203542035520356203572035820359203602036120362203632036420365203662036720368203692037020371203722037320374203752037620377203782037920380203812038220383203842038520386203872038820389203902039120392203932039420395203962039720398203992040020401204022040320404204052040620407204082040920410204112041220413204142041520416204172041820419204202042120422204232042420425204262042720428204292043020431204322043320434204352043620437204382043920440204412044220443204442044520446204472044820449204502045120452204532045420455204562045720458204592046020461204622046320464204652046620467204682046920470204712047220473204742047520476204772047820479204802048120482204832048420485204862048720488204892049020491204922049320494204952049620497204982049920500205012050220503205042050520506205072050820509205102051120512205132051420515205162051720518205192052020521205222052320524205252052620527205282052920530205312053220533205342053520536205372053820539205402054120542205432054420545205462054720548205492055020551205522055320554205552055620557205582055920560205612056220563205642056520566205672056820569205702057120572205732057420575205762057720578205792058020581205822058320584205852058620587205882058920590205912059220593205942059520596205972059820599206002060120602206032060420605206062060720608206092061020611206122061320614206152061620617206182061920620206212062220623206242062520626206272062820629206302063120632206332063420635206362063720638206392064020641206422064320644206452064620647206482064920650206512065220653206542065520656206572065820659206602066120662206632066420665206662066720668206692067020671206722067320674206752067620677206782067920680206812068220683206842068520686206872068820689206902069120692206932069420695206962069720698206992070020701207022070320704207052070620707207082070920710207112071220713207142071520716207172071820719207202072120722207232072420725207262072720728207292073020731207322073320734207352073620737207382073920740207412074220743207442074520746207472074820749207502075120752207532075420755207562075720758207592076020761207622076320764207652076620767207682076920770207712077220773207742077520776207772077820779207802078120782207832078420785207862078720788207892079020791207922079320794207952079620797207982079920800208012080220803208042080520806208072080820809208102081120812208132081420815208162081720818208192082020821208222082320824208252082620827208282082920830208312083220833208342083520836208372083820839208402084120842208432084420845208462084720848208492085020851208522085320854208552085620857208582085920860208612086220863208642086520866208672086820869208702087120872208732087420875208762087720878208792088020881208822088320884208852088620887208882088920890208912089220893208942089520896208972089820899209002090120902209032090420905209062090720908209092091020911209122091320914209152091620917209182091920920209212092220923209242092520926209272092820929209302093120932209332093420935209362093720938209392094020941209422094320944209452094620947209482094920950209512095220953209542095520956209572095820959209602096120962209632096420965209662096720968209692097020971209722097320974209752097620977209782097920980209812098220983209842098520986209872098820989209902099120992209932099420995209962099720998209992100021001210022100321004210052100621007210082100921010210112101221013210142101521016210172101821019210202102121022210232102421025210262102721028210292103021031210322103321034210352103621037210382103921040210412104221043210442104521046210472104821049210502105121052210532105421055210562105721058210592106021061210622106321064210652106621067210682106921070210712107221073210742107521076210772107821079210802108121082210832108421085210862108721088210892109021091210922109321094210952109621097210982109921100211012110221103211042110521106211072110821109211102111121112211132111421115211162111721118211192112021121211222112321124211252112621127211282112921130211312113221133211342113521136211372113821139211402114121142211432114421145211462114721148211492115021151211522115321154211552115621157211582115921160211612116221163211642116521166211672116821169211702117121172211732117421175211762117721178211792118021181211822118321184211852118621187211882118921190211912119221193211942119521196211972119821199212002120121202212032120421205212062120721208212092121021211212122121321214212152121621217212182121921220212212122221223212242122521226212272122821229212302123121232212332123421235212362123721238212392124021241212422124321244212452124621247212482124921250212512125221253212542125521256212572125821259212602126121262212632126421265212662126721268212692127021271212722127321274212752127621277212782127921280212812128221283212842128521286212872128821289212902129121292212932129421295212962129721298212992130021301213022130321304213052130621307213082130921310213112131221313213142131521316213172131821319213202132121322213232132421325213262132721328213292133021331213322133321334213352133621337213382133921340213412134221343213442134521346213472134821349213502135121352213532135421355213562135721358213592136021361213622136321364213652136621367213682136921370213712137221373213742137521376213772137821379213802138121382213832138421385213862138721388213892139021391213922139321394213952139621397213982139921400214012140221403214042140521406214072140821409214102141121412214132141421415214162141721418214192142021421214222142321424214252142621427214282142921430214312143221433214342143521436214372143821439214402144121442214432144421445214462144721448214492145021451214522145321454214552145621457214582145921460214612146221463214642146521466214672146821469214702147121472214732147421475214762147721478214792148021481214822148321484214852148621487214882148921490214912149221493214942149521496214972149821499215002150121502215032150421505215062150721508215092151021511215122151321514215152151621517215182151921520215212152221523215242152521526215272152821529215302153121532215332153421535215362153721538215392154021541215422154321544215452154621547215482154921550215512155221553215542155521556215572155821559215602156121562215632156421565215662156721568215692157021571215722157321574215752157621577215782157921580215812158221583215842158521586215872158821589215902159121592215932159421595215962159721598215992160021601216022160321604216052160621607216082160921610216112161221613216142161521616216172161821619216202162121622216232162421625216262162721628216292163021631216322163321634216352163621637216382163921640216412164221643216442164521646216472164821649216502165121652216532165421655216562165721658216592166021661216622166321664216652166621667216682166921670216712167221673216742167521676216772167821679216802168121682216832168421685216862168721688216892169021691216922169321694216952169621697216982169921700217012170221703217042170521706217072170821709217102171121712217132171421715217162171721718217192172021721217222172321724217252172621727217282172921730217312173221733217342173521736217372173821739217402174121742217432174421745217462174721748217492175021751217522175321754217552175621757217582175921760217612176221763217642176521766217672176821769217702177121772217732177421775217762177721778217792178021781217822178321784217852178621787217882178921790217912179221793217942179521796217972179821799218002180121802218032180421805218062180721808218092181021811218122181321814218152181621817218182181921820218212182221823218242182521826218272182821829218302183121832218332183421835218362183721838218392184021841218422184321844218452184621847218482184921850218512185221853218542185521856218572185821859218602186121862218632186421865218662186721868218692187021871218722187321874218752187621877218782187921880218812188221883218842188521886218872188821889218902189121892218932189421895218962189721898218992190021901219022190321904219052190621907219082190921910219112191221913219142191521916219172191821919219202192121922219232192421925219262192721928219292193021931219322193321934219352193621937219382193921940219412194221943219442194521946219472194821949219502195121952219532195421955219562195721958219592196021961219622196321964219652196621967219682196921970219712197221973219742197521976219772197821979219802198121982219832198421985219862198721988219892199021991219922199321994219952199621997219982199922000220012200222003220042200522006220072200822009220102201122012220132201422015220162201722018220192202022021220222202322024220252202622027220282202922030220312203222033220342203522036220372203822039220402204122042220432204422045220462204722048220492205022051220522205322054220552205622057220582205922060220612206222063220642206522066220672206822069220702207122072220732207422075220762207722078220792208022081220822208322084220852208622087220882208922090220912209222093220942209522096220972209822099221002210122102221032210422105221062210722108221092211022111221122211322114221152211622117221182211922120221212212222123221242212522126221272212822129221302213122132221332213422135221362213722138221392214022141221422214322144221452214622147221482214922150221512215222153221542215522156221572215822159221602216122162221632216422165221662216722168221692217022171221722217322174221752217622177221782217922180221812218222183221842218522186221872218822189221902219122192221932219422195221962219722198221992220022201222022220322204222052220622207222082220922210222112221222213222142221522216222172221822219222202222122222222232222422225222262222722228222292223022231222322223322234222352223622237222382223922240222412224222243222442224522246222472224822249222502225122252222532225422255222562225722258222592226022261222622226322264222652226622267222682226922270222712227222273222742227522276222772227822279222802228122282222832228422285222862228722288222892229022291222922229322294222952229622297222982229922300223012230222303223042230522306223072230822309223102231122312223132231422315223162231722318223192232022321223222232322324223252232622327223282232922330223312233222333223342233522336223372233822339223402234122342223432234422345223462234722348223492235022351223522235322354223552235622357223582235922360223612236222363223642236522366223672236822369223702237122372223732237422375223762237722378223792238022381223822238322384223852238622387223882238922390223912239222393223942239522396223972239822399224002240122402224032240422405224062240722408224092241022411224122241322414224152241622417224182241922420224212242222423224242242522426224272242822429224302243122432224332243422435224362243722438224392244022441224422244322444224452244622447224482244922450224512245222453224542245522456224572245822459224602246122462224632246422465224662246722468224692247022471224722247322474224752247622477224782247922480224812248222483224842248522486224872248822489224902249122492224932249422495224962249722498224992250022501225022250322504225052250622507225082250922510225112251222513225142251522516225172251822519225202252122522225232252422525225262252722528225292253022531225322253322534225352253622537225382253922540225412254222543225442254522546225472254822549225502255122552225532255422555225562255722558225592256022561225622256322564225652256622567225682256922570225712257222573225742257522576225772257822579225802258122582225832258422585225862258722588225892259022591225922259322594225952259622597225982259922600226012260222603226042260522606226072260822609226102261122612226132261422615226162261722618226192262022621226222262322624226252262622627226282262922630226312263222633226342263522636226372263822639226402264122642226432264422645226462264722648226492265022651226522265322654226552265622657226582265922660226612266222663226642266522666226672266822669226702267122672226732267422675226762267722678226792268022681226822268322684226852268622687226882268922690226912269222693226942269522696226972269822699227002270122702227032270422705227062270722708227092271022711227122271322714227152271622717227182271922720227212272222723227242272522726227272272822729227302273122732227332273422735227362273722738227392274022741227422274322744227452274622747227482274922750227512275222753227542275522756227572275822759227602276122762227632276422765227662276722768227692277022771227722277322774227752277622777227782277922780227812278222783227842278522786227872278822789227902279122792227932279422795227962279722798227992280022801228022280322804228052280622807228082280922810228112281222813228142281522816228172281822819228202282122822228232282422825228262282722828228292283022831228322283322834228352283622837228382283922840228412284222843228442284522846228472284822849228502285122852228532285422855228562285722858228592286022861228622286322864228652286622867228682286922870228712287222873228742287522876228772287822879228802288122882228832288422885228862288722888228892289022891228922289322894228952289622897228982289922900229012290222903229042290522906229072290822909229102291122912229132291422915229162291722918229192292022921229222292322924229252292622927229282292922930229312293222933229342293522936229372293822939229402294122942229432294422945229462294722948229492295022951229522295322954229552295622957229582295922960229612296222963229642296522966229672296822969229702297122972229732297422975229762297722978229792298022981229822298322984229852298622987229882298922990229912299222993229942299522996229972299822999230002300123002230032300423005230062300723008230092301023011230122301323014230152301623017230182301923020230212302223023230242302523026230272302823029230302303123032230332303423035230362303723038230392304023041230422304323044230452304623047230482304923050230512305223053230542305523056230572305823059230602306123062230632306423065230662306723068230692307023071230722307323074230752307623077230782307923080230812308223083230842308523086230872308823089230902309123092230932309423095230962309723098230992310023101231022310323104231052310623107231082310923110231112311223113231142311523116231172311823119231202312123122231232312423125231262312723128231292313023131231322313323134231352313623137231382313923140231412314223143231442314523146231472314823149231502315123152231532315423155231562315723158231592316023161231622316323164231652316623167231682316923170231712317223173231742317523176231772317823179231802318123182231832318423185231862318723188231892319023191231922319323194231952319623197231982319923200232012320223203232042320523206232072320823209232102321123212232132321423215232162321723218232192322023221232222322323224232252322623227232282322923230232312323223233232342323523236232372323823239232402324123242232432324423245232462324723248232492325023251232522325323254232552325623257232582325923260232612326223263232642326523266232672326823269232702327123272232732327423275232762327723278232792328023281232822328323284232852328623287232882328923290232912329223293232942329523296232972329823299233002330123302233032330423305233062330723308233092331023311233122331323314233152331623317233182331923320233212332223323233242332523326233272332823329233302333123332233332333423335233362333723338233392334023341233422334323344233452334623347233482334923350233512335223353233542335523356233572335823359233602336123362233632336423365233662336723368233692337023371233722337323374233752337623377233782337923380233812338223383233842338523386233872338823389233902339123392233932339423395233962339723398233992340023401234022340323404234052340623407234082340923410234112341223413234142341523416234172341823419234202342123422234232342423425234262342723428234292343023431234322343323434234352343623437234382343923440234412344223443234442344523446234472344823449234502345123452234532345423455234562345723458234592346023461234622346323464234652346623467234682346923470234712347223473234742347523476234772347823479234802348123482234832348423485234862348723488234892349023491234922349323494234952349623497234982349923500235012350223503235042350523506235072350823509235102351123512235132351423515235162351723518235192352023521235222352323524235252352623527235282352923530235312353223533235342353523536235372353823539235402354123542235432354423545235462354723548235492355023551235522355323554235552355623557235582355923560235612356223563235642356523566235672356823569235702357123572235732357423575235762357723578235792358023581235822358323584235852358623587235882358923590235912359223593235942359523596235972359823599236002360123602236032360423605236062360723608236092361023611236122361323614236152361623617236182361923620236212362223623236242362523626236272362823629236302363123632236332363423635236362363723638236392364023641236422364323644236452364623647236482364923650236512365223653236542365523656236572365823659236602366123662236632366423665236662366723668236692367023671236722367323674236752367623677236782367923680236812368223683236842368523686236872368823689236902369123692236932369423695236962369723698236992370023701237022370323704237052370623707237082370923710237112371223713237142371523716237172371823719237202372123722237232372423725237262372723728237292373023731237322373323734237352373623737237382373923740237412374223743237442374523746237472374823749237502375123752237532375423755237562375723758237592376023761237622376323764237652376623767237682376923770237712377223773237742377523776237772377823779237802378123782237832378423785237862378723788237892379023791237922379323794237952379623797237982379923800238012380223803238042380523806238072380823809238102381123812238132381423815238162381723818238192382023821238222382323824238252382623827238282382923830238312383223833238342383523836238372383823839238402384123842238432384423845238462384723848238492385023851238522385323854238552385623857238582385923860238612386223863238642386523866238672386823869238702387123872238732387423875238762387723878238792388023881238822388323884238852388623887238882388923890238912389223893238942389523896238972389823899239002390123902239032390423905239062390723908239092391023911239122391323914239152391623917239182391923920239212392223923239242392523926239272392823929239302393123932239332393423935239362393723938239392394023941239422394323944239452394623947239482394923950239512395223953239542395523956239572395823959239602396123962239632396423965239662396723968239692397023971239722397323974239752397623977239782397923980239812398223983239842398523986239872398823989239902399123992239932399423995239962399723998239992400024001240022400324004240052400624007240082400924010240112401224013240142401524016240172401824019240202402124022240232402424025240262402724028240292403024031240322403324034240352403624037240382403924040240412404224043240442404524046240472404824049240502405124052240532405424055240562405724058240592406024061240622406324064240652406624067240682406924070240712407224073240742407524076240772407824079240802408124082240832408424085240862408724088240892409024091240922409324094240952409624097240982409924100241012410224103241042410524106241072410824109241102411124112241132411424115241162411724118241192412024121241222412324124241252412624127241282412924130241312413224133241342413524136241372413824139241402414124142241432414424145241462414724148241492415024151241522415324154241552415624157241582415924160241612416224163241642416524166241672416824169241702417124172241732417424175241762417724178241792418024181241822418324184241852418624187241882418924190241912419224193241942419524196241972419824199242002420124202242032420424205242062420724208242092421024211242122421324214242152421624217242182421924220242212422224223242242422524226242272422824229242302423124232242332423424235242362423724238242392424024241242422424324244242452424624247242482424924250242512425224253242542425524256242572425824259242602426124262242632426424265242662426724268242692427024271242722427324274242752427624277242782427924280242812428224283242842428524286242872428824289242902429124292242932429424295242962429724298242992430024301243022430324304243052430624307243082430924310243112431224313243142431524316243172431824319243202432124322243232432424325243262432724328243292433024331243322433324334243352433624337243382433924340243412434224343243442434524346243472434824349243502435124352243532435424355243562435724358243592436024361243622436324364243652436624367243682436924370243712437224373243742437524376243772437824379243802438124382243832438424385243862438724388243892439024391243922439324394243952439624397243982439924400244012440224403244042440524406244072440824409244102441124412244132441424415244162441724418244192442024421244222442324424244252442624427244282442924430244312443224433244342443524436244372443824439244402444124442244432444424445244462444724448244492445024451244522445324454244552445624457244582445924460244612446224463244642446524466244672446824469244702447124472244732447424475244762447724478244792448024481244822448324484244852448624487244882448924490244912449224493244942449524496244972449824499245002450124502245032450424505245062450724508245092451024511245122451324514245152451624517245182451924520245212452224523245242452524526245272452824529245302453124532245332453424535245362453724538245392454024541245422454324544245452454624547245482454924550245512455224553245542455524556245572455824559245602456124562245632456424565245662456724568245692457024571245722457324574245752457624577245782457924580245812458224583245842458524586245872458824589245902459124592245932459424595245962459724598245992460024601246022460324604246052460624607246082460924610246112461224613246142461524616246172461824619246202462124622246232462424625246262462724628246292463024631246322463324634246352463624637246382463924640246412464224643246442464524646246472464824649246502465124652246532465424655246562465724658246592466024661246622466324664246652466624667246682466924670246712467224673246742467524676246772467824679246802468124682246832468424685246862468724688246892469024691246922469324694246952469624697246982469924700247012470224703247042470524706247072470824709247102471124712247132471424715247162471724718247192472024721247222472324724247252472624727247282472924730247312473224733247342473524736247372473824739247402474124742247432474424745247462474724748247492475024751247522475324754247552475624757247582475924760247612476224763247642476524766247672476824769247702477124772247732477424775247762477724778247792478024781247822478324784247852478624787247882478924790247912479224793247942479524796247972479824799248002480124802248032480424805248062480724808248092481024811248122481324814248152481624817248182481924820248212482224823248242482524826248272482824829248302483124832248332483424835248362483724838248392484024841248422484324844248452484624847248482484924850248512485224853248542485524856248572485824859248602486124862248632486424865248662486724868248692487024871248722487324874248752487624877248782487924880248812488224883248842488524886248872488824889248902489124892248932489424895248962489724898248992490024901249022490324904249052490624907249082490924910249112491224913249142491524916249172491824919249202492124922249232492424925249262492724928249292493024931249322493324934249352493624937249382493924940249412494224943249442494524946249472494824949249502495124952249532495424955249562495724958249592496024961249622496324964249652496624967249682496924970249712497224973249742497524976249772497824979249802498124982249832498424985249862498724988249892499024991249922499324994249952499624997249982499925000250012500225003250042500525006250072500825009250102501125012250132501425015250162501725018250192502025021250222502325024250252502625027250282502925030250312503225033250342503525036250372503825039250402504125042250432504425045250462504725048250492505025051250522505325054250552505625057250582505925060250612506225063250642506525066250672506825069250702507125072250732507425075250762507725078250792508025081250822508325084250852508625087250882508925090250912509225093250942509525096250972509825099251002510125102251032510425105251062510725108251092511025111251122511325114251152511625117251182511925120251212512225123251242512525126251272512825129251302513125132251332513425135251362513725138251392514025141251422514325144251452514625147251482514925150251512515225153251542515525156251572515825159251602516125162251632516425165251662516725168251692517025171251722517325174251752517625177251782517925180251812518225183251842518525186251872518825189251902519125192251932519425195251962519725198251992520025201252022520325204252052520625207252082520925210252112521225213252142521525216252172521825219252202522125222252232522425225252262522725228252292523025231252322523325234252352523625237252382523925240252412524225243252442524525246252472524825249252502525125252252532525425255252562525725258252592526025261252622526325264252652526625267252682526925270252712527225273252742527525276252772527825279252802528125282252832528425285252862528725288252892529025291252922529325294252952529625297252982529925300253012530225303253042530525306253072530825309253102531125312253132531425315253162531725318253192532025321253222532325324253252532625327253282532925330253312533225333253342533525336253372533825339253402534125342253432534425345253462534725348253492535025351253522535325354253552535625357253582535925360253612536225363253642536525366253672536825369253702537125372253732537425375253762537725378253792538025381253822538325384253852538625387253882538925390253912539225393253942539525396253972539825399254002540125402254032540425405254062540725408254092541025411254122541325414254152541625417254182541925420254212542225423254242542525426254272542825429254302543125432254332543425435254362543725438254392544025441254422544325444254452544625447254482544925450254512545225453254542545525456254572545825459254602546125462254632546425465254662546725468254692547025471254722547325474254752547625477254782547925480254812548225483254842548525486254872548825489254902549125492254932549425495254962549725498254992550025501255022550325504255052550625507255082550925510255112551225513255142551525516255172551825519255202552125522255232552425525255262552725528255292553025531255322553325534255352553625537255382553925540255412554225543255442554525546255472554825549255502555125552255532555425555255562555725558255592556025561255622556325564255652556625567255682556925570255712557225573255742557525576255772557825579255802558125582255832558425585255862558725588255892559025591255922559325594255952559625597255982559925600256012560225603256042560525606256072560825609256102561125612256132561425615256162561725618256192562025621256222562325624256252562625627256282562925630256312563225633256342563525636256372563825639256402564125642256432564425645256462564725648256492565025651256522565325654256552565625657256582565925660256612566225663256642566525666256672566825669256702567125672256732567425675256762567725678256792568025681256822568325684256852568625687256882568925690256912569225693256942569525696256972569825699257002570125702257032570425705257062570725708257092571025711257122571325714257152571625717257182571925720257212572225723257242572525726257272572825729257302573125732257332573425735257362573725738257392574025741257422574325744257452574625747257482574925750257512575225753257542575525756257572575825759257602576125762257632576425765257662576725768257692577025771257722577325774257752577625777257782577925780257812578225783257842578525786257872578825789257902579125792257932579425795257962579725798257992580025801258022580325804258052580625807258082580925810258112581225813258142581525816258172581825819258202582125822258232582425825258262582725828258292583025831258322583325834258352583625837258382583925840258412584225843258442584525846258472584825849258502585125852258532585425855258562585725858258592586025861258622586325864258652586625867258682586925870258712587225873258742587525876258772587825879258802588125882258832588425885258862588725888258892589025891258922589325894258952589625897258982589925900259012590225903259042590525906259072590825909259102591125912259132591425915259162591725918259192592025921259222592325924259252592625927259282592925930259312593225933259342593525936259372593825939259402594125942259432594425945259462594725948259492595025951259522595325954259552595625957259582595925960259612596225963259642596525966259672596825969259702597125972259732597425975259762597725978259792598025981259822598325984259852598625987259882598925990259912599225993 |
- diff -Nur gcc-4.4.6.orig/gcc/builtins.c gcc-4.4.6/gcc/builtins.c
- --- gcc-4.4.6.orig/gcc/builtins.c 2010-12-07 19:56:56.000000000 +0100
- +++ gcc-4.4.6/gcc/builtins.c 2011-10-22 19:23:08.512581300 +0200
- @@ -11108,7 +11108,7 @@
-
- do
- {
- - code = va_arg (ap, enum tree_code);
- + code = va_arg (ap, int);
- switch (code)
- {
- case 0:
- diff -Nur gcc-4.4.6.orig/gcc/calls.c gcc-4.4.6/gcc/calls.c
- --- gcc-4.4.6.orig/gcc/calls.c 2010-09-24 17:07:36.000000000 +0200
- +++ gcc-4.4.6/gcc/calls.c 2011-10-22 19:23:08.512581300 +0200
- @@ -3447,7 +3447,7 @@
- for (; count < nargs; count++)
- {
- rtx val = va_arg (p, rtx);
- - enum machine_mode mode = va_arg (p, enum machine_mode);
- + enum machine_mode mode = va_arg (p, int);
-
- /* We cannot convert the arg value to the mode the library wants here;
- must do it earlier where we know the signedness of the arg. */
- diff -Nur gcc-4.4.6.orig/gcc/config/avr32/avr32-elf.h gcc-4.4.6/gcc/config/avr32/avr32-elf.h
- --- gcc-4.4.6.orig/gcc/config/avr32/avr32-elf.h 1970-01-01 01:00:00.000000000 +0100
- +++ gcc-4.4.6/gcc/config/avr32/avr32-elf.h 2011-10-22 19:23:08.516581300 +0200
- @@ -0,0 +1,91 @@
- +/*
- + Elf specific definitions.
- + Copyright 2003,2004,2005,2006,2007,2008,2009 Atmel Corporation.
- +
- + This file is part of GCC.
- +
- + This program is free software; you can redistribute it and/or modify
- + it under the terms of the GNU General Public License as published by
- + the Free Software Foundation; either version 2 of the License, or
- + (at your option) any later version.
- +
- + This program is distributed in the hope that it will be useful,
- + but WITHOUT ANY WARRANTY; without even the implied warranty of
- + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- + GNU General Public License for more details.
- +
- + You should have received a copy of the GNU General Public License
- + along with this program; if not, write to the Free Software
- + Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
- +
- +
- +/*****************************************************************************
- + * Controlling the Compiler Driver, 'gcc'
- + *****************************************************************************/
- +
- +/* Run-time Target Specification. */
- +#undef TARGET_VERSION
- +#define TARGET_VERSION fputs (" (AVR32 GNU with ELF)", stderr);
- +
- +/*
- +Another C string constant used much like LINK_SPEC. The
- +difference between the two is that STARTFILE_SPEC is used at
- +the very beginning of the command given to the linker.
- +
- +If this macro is not defined, a default is provided that loads the
- +standard C startup file from the usual place. See gcc.c.
- +*/
- +#if 0
- +#undef STARTFILE_SPEC
- +#define STARTFILE_SPEC "crt0%O%s crti%O%s crtbegin%O%s"
- +#endif
- +#undef STARTFILE_SPEC
- +#define STARTFILE_SPEC "%{mflashvault: crtfv.o%s} %{!mflashvault: crt0.o%s} \
- + crti.o%s crtbegin.o%s"
- +
- +#undef LINK_SPEC
- +#define LINK_SPEC "%{muse-oscall:--defsym __do_not_use_oscall_coproc__=0} %{mrelax|O*:%{mno-relax|O0|O1: ;:--relax}} %{mpart=uc3a3revd:-mavr32elf_uc3a3256s;:%{mpart=*:-mavr32elf_%*}} %{mcpu=*:-mavr32elf_%*}"
- +
- +
- +/*
- +Another C string constant used much like LINK_SPEC. The
- +difference between the two is that ENDFILE_SPEC is used at
- +the very end of the command given to the linker.
- +
- +Do not define this macro if it does not need to do anything.
- +*/
- +#undef ENDFILE_SPEC
- +#define ENDFILE_SPEC "crtend%O%s crtn%O%s"
- +
- +
- +/* Target CPU builtins. */
- +#define TARGET_CPU_CPP_BUILTINS() \
- + do \
- + { \
- + builtin_define ("__avr32__"); \
- + builtin_define ("__AVR32__"); \
- + builtin_define ("__AVR32_ELF__"); \
- + builtin_define (avr32_part->macro); \
- + builtin_define (avr32_arch->macro); \
- + if (avr32_arch->uarch_type == UARCH_TYPE_AVR32A) \
- + builtin_define ("__AVR32_AVR32A__"); \
- + else \
- + builtin_define ("__AVR32_AVR32B__"); \
- + if (TARGET_UNALIGNED_WORD) \
- + builtin_define ("__AVR32_HAS_UNALIGNED_WORD__"); \
- + if (TARGET_SIMD) \
- + builtin_define ("__AVR32_HAS_SIMD__"); \
- + if (TARGET_DSP) \
- + builtin_define ("__AVR32_HAS_DSP__"); \
- + if (TARGET_RMW) \
- + builtin_define ("__AVR32_HAS_RMW__"); \
- + if (TARGET_BRANCH_PRED) \
- + builtin_define ("__AVR32_HAS_BRANCH_PRED__"); \
- + if (TARGET_FAST_FLOAT) \
- + builtin_define ("__AVR32_FAST_FLOAT__"); \
- + if (TARGET_FLASHVAULT) \
- + builtin_define ("__AVR32_FLASHVAULT__"); \
- + if (TARGET_NO_MUL_INSNS) \
- + builtin_define ("__AVR32_NO_MUL__"); \
- + } \
- + while (0)
- diff -Nur gcc-4.4.6.orig/gcc/config/avr32/avr32-modes.def gcc-4.4.6/gcc/config/avr32/avr32-modes.def
- --- gcc-4.4.6.orig/gcc/config/avr32/avr32-modes.def 1970-01-01 01:00:00.000000000 +0100
- +++ gcc-4.4.6/gcc/config/avr32/avr32-modes.def 2011-10-22 19:23:08.524581303 +0200
- @@ -0,0 +1 @@
- +VECTOR_MODES (INT, 4); /* V4QI V2HI */
- diff -Nur gcc-4.4.6.orig/gcc/config/avr32/avr32-protos.h gcc-4.4.6/gcc/config/avr32/avr32-protos.h
- --- gcc-4.4.6.orig/gcc/config/avr32/avr32-protos.h 1970-01-01 01:00:00.000000000 +0100
- +++ gcc-4.4.6/gcc/config/avr32/avr32-protos.h 2011-10-22 19:23:08.524581303 +0200
- @@ -0,0 +1,196 @@
- +/*
- + Prototypes for exported functions defined in avr32.c
- + Copyright 2003,2004,2005,2006,2007,2008,2009 Atmel Corporation.
- +
- + This file is part of GCC.
- +
- + This program is free software; you can redistribute it and/or modify
- + it under the terms of the GNU General Public License as published by
- + the Free Software Foundation; either version 2 of the License, or
- + (at your option) any later version.
- +
- + This program is distributed in the hope that it will be useful,
- + but WITHOUT ANY WARRANTY; without even the implied warranty of
- + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- + GNU General Public License for more details.
- +
- + You should have received a copy of the GNU General Public License
- + along with this program; if not, write to the Free Software
- + Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
- +
- +
- +#ifndef AVR32_PROTOS_H
- +#define AVR32_PROTOS_H
- +
- +extern const int swap_reg[];
- +
- +extern int avr32_valid_macmac_bypass (rtx, rtx);
- +extern int avr32_valid_mulmac_bypass (rtx, rtx);
- +
- +extern int avr32_decode_lcomm_symbol_offset (rtx, int *);
- +extern void avr32_encode_lcomm_symbol_offset (tree, char *, int);
- +
- +extern const char *avr32_strip_name_encoding (const char *);
- +
- +extern rtx avr32_get_note_reg_equiv (rtx insn);
- +
- +extern int avr32_use_return_insn (int iscond);
- +
- +extern void avr32_make_reglist16 (int reglist16_vect, char *reglist16_string);
- +
- +extern void avr32_make_reglist8 (int reglist8_vect, char *reglist8_string);
- +extern void avr32_make_fp_reglist_w (int reglist_mask, char *reglist_string);
- +extern void avr32_make_fp_reglist_d (int reglist_mask, char *reglist_string);
- +
- +extern void avr32_output_return_instruction (int single_ret_inst,
- + int iscond, rtx cond,
- + rtx r12_imm);
- +extern void avr32_expand_prologue (void);
- +extern void avr32_set_return_address (rtx source, rtx scratch);
- +
- +extern int avr32_hard_regno_mode_ok (int regno, enum machine_mode mode);
- +extern int avr32_extra_constraint_s (rtx value, const int strict);
- +extern int avr32_eh_return_data_regno (const int n);
- +extern int avr32_initial_elimination_offset (const int from, const int to);
- +extern rtx avr32_function_arg (CUMULATIVE_ARGS * cum, enum machine_mode mode,
- + tree type, int named);
- +extern void avr32_init_cumulative_args (CUMULATIVE_ARGS * cum, tree fntype,
- + rtx libname, tree fndecl);
- +extern void avr32_function_arg_advance (CUMULATIVE_ARGS * cum,
- + enum machine_mode mode,
- + tree type, int named);
- +#ifdef ARGS_SIZE_RTX
- +/* expr.h defines ARGS_SIZE_RTX and `enum direction'. */
- +extern enum direction avr32_function_arg_padding (enum machine_mode mode,
- + tree type);
- +#endif /* ARGS_SIZE_RTX */
- +extern rtx avr32_function_value (tree valtype, tree func, bool outgoing);
- +extern rtx avr32_libcall_value (enum machine_mode mode);
- +extern int avr32_sched_use_dfa_pipeline_interface (void);
- +extern bool avr32_return_in_memory (tree type, tree fntype);
- +extern void avr32_regs_to_save (char *operand);
- +extern void avr32_target_asm_function_prologue (FILE * file,
- + HOST_WIDE_INT size);
- +extern void avr32_target_asm_function_epilogue (FILE * file,
- + HOST_WIDE_INT size);
- +extern void avr32_trampoline_template (FILE * file);
- +extern void avr32_initialize_trampoline (rtx addr, rtx fnaddr,
- + rtx static_chain);
- +extern int avr32_legitimate_address (enum machine_mode mode, rtx x,
- + int strict);
- +extern int avr32_legitimate_constant_p (rtx x);
- +
- +extern int avr32_legitimate_pic_operand_p (rtx x);
- +
- +extern rtx avr32_find_symbol (rtx x);
- +extern void avr32_select_section (rtx exp, int reloc, int align);
- +extern void avr32_encode_section_info (tree decl, rtx rtl, int first);
- +extern void avr32_asm_file_end (FILE * stream);
- +extern void avr32_asm_output_ascii (FILE * stream, char *ptr, int len);
- +extern void avr32_asm_output_common (FILE * stream, const char *name,
- + int size, int rounded);
- +extern void avr32_asm_output_label (FILE * stream, const char *name);
- +extern void avr32_asm_declare_object_name (FILE * stream, char *name,
- + tree decl);
- +extern void avr32_asm_globalize_label (FILE * stream, const char *name);
- +extern void avr32_asm_weaken_label (FILE * stream, const char *name);
- +extern void avr32_asm_output_external (FILE * stream, tree decl,
- + const char *name);
- +extern void avr32_asm_output_external_libcall (FILE * stream, rtx symref);
- +extern void avr32_asm_output_labelref (FILE * stream, const char *name);
- +extern void avr32_notice_update_cc (rtx exp, rtx insn);
- +extern void avr32_print_operand (FILE * stream, rtx x, int code);
- +extern void avr32_print_operand_address (FILE * stream, rtx x);
- +
- +extern int avr32_symbol (rtx x);
- +
- +extern void avr32_select_rtx_section (enum machine_mode mode, rtx x,
- + unsigned HOST_WIDE_INT align);
- +
- +extern int avr32_load_multiple_operation (rtx op, enum machine_mode mode);
- +extern int avr32_store_multiple_operation (rtx op, enum machine_mode mode);
- +
- +extern int avr32_const_ok_for_constraint_p (HOST_WIDE_INT value, char c,
- + const char *str);
- +
- +extern bool avr32_cannot_force_const_mem (rtx x);
- +
- +extern void avr32_init_builtins (void);
- +
- +extern rtx avr32_expand_builtin (tree exp, rtx target, rtx subtarget,
- + enum machine_mode mode, int ignore);
- +
- +extern bool avr32_must_pass_in_stack (enum machine_mode mode, tree type);
- +
- +extern bool avr32_strict_argument_naming (CUMULATIVE_ARGS * ca);
- +
- +extern bool avr32_pass_by_reference (CUMULATIVE_ARGS * cum,
- + enum machine_mode mode,
- + tree type, bool named);
- +
- +extern rtx avr32_gen_load_multiple (rtx * regs, int count, rtx from,
- + int write_back, int in_struct_p,
- + int scalar_p);
- +extern rtx avr32_gen_store_multiple (rtx * regs, int count, rtx to,
- + int in_struct_p, int scalar_p);
- +extern int avr32_gen_movmemsi (rtx * operands);
- +
- +extern int avr32_rnd_operands (rtx add, rtx shift);
- +extern int avr32_adjust_insn_length (rtx insn, int length);
- +
- +extern int symbol_mentioned_p (rtx x);
- +extern int label_mentioned_p (rtx x);
- +extern rtx legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg);
- +extern int avr32_address_register_rtx_p (rtx x, int strict_p);
- +extern int avr32_legitimate_index_p (enum machine_mode mode, rtx index,
- + int strict_p);
- +
- +extern int avr32_const_double_immediate (rtx value);
- +extern void avr32_init_expanders (void);
- +extern rtx avr32_return_addr (int count, rtx frame);
- +extern bool avr32_got_mentioned_p (rtx addr);
- +
- +extern void avr32_final_prescan_insn (rtx insn, rtx * opvec, int noperands);
- +
- +extern int avr32_expand_movcc (enum machine_mode mode, rtx operands[]);
- +extern int avr32_expand_addcc (enum machine_mode mode, rtx operands[]);
- +#ifdef RTX_CODE
- +extern int avr32_expand_scc (RTX_CODE cond, rtx * operands);
- +#endif
- +
- +extern int avr32_store_bypass (rtx insn_out, rtx insn_in);
- +extern int avr32_mul_waw_bypass (rtx insn_out, rtx insn_in);
- +extern int avr32_valid_load_double_bypass (rtx insn_out, rtx insn_in);
- +extern int avr32_valid_load_quad_bypass (rtx insn_out, rtx insn_in);
- +extern rtx avr32_output_cmp (rtx cond, enum machine_mode mode,
- + rtx op0, rtx op1);
- +
- +rtx get_next_insn_cond (rtx cur_insn);
- +int set_next_insn_cond (rtx cur_insn, rtx cond);
- +rtx next_insn_emits_cmp (rtx cur_insn);
- +void avr32_override_options (void);
- +void avr32_load_pic_register (void);
- +#ifdef GCC_BASIC_BLOCK_H
- +rtx avr32_ifcvt_modify_insn (ce_if_block_t *ce_info, rtx pattern, rtx insn,
- + int *num_true_changes);
- +rtx avr32_ifcvt_modify_test (ce_if_block_t *ce_info, rtx test );
- +void avr32_ifcvt_modify_cancel ( ce_if_block_t *ce_info, int *num_true_changes);
- +#endif
- +void avr32_optimization_options (int level, int size);
- +int avr32_const_ok_for_move (HOST_WIDE_INT c);
- +
- +void avr32_split_const_expr (enum machine_mode mode,
- + enum machine_mode new_mode,
- + rtx expr,
- + rtx *split_expr);
- +void avr32_get_intval (enum machine_mode mode,
- + rtx const_expr,
- + HOST_WIDE_INT *val);
- +
- +int avr32_cond_imm_clobber_splittable (rtx insn,
- + rtx operands[]);
- +
- +bool avr32_flashvault_call(tree decl);
- +extern void avr32_emit_swdivsf (rtx, rtx, rtx);
- +
- +#endif /* AVR32_PROTOS_H */
- diff -Nur gcc-4.4.6.orig/gcc/config/avr32/avr32.c gcc-4.4.6/gcc/config/avr32/avr32.c
- --- gcc-4.4.6.orig/gcc/config/avr32/avr32.c 1970-01-01 01:00:00.000000000 +0100
- +++ gcc-4.4.6/gcc/config/avr32/avr32.c 2011-10-22 19:23:08.516581300 +0200
- @@ -0,0 +1,8087 @@
- +/*
- + Target hooks and helper functions for AVR32.
- + Copyright 2003,2004,2005,2006,2007,2008,2009,2010 Atmel Corporation.
- +
- + This file is part of GCC.
- +
- + This program is free software; you can redistribute it and/or modify
- + it under the terms of the GNU General Public License as published by
- + the Free Software Foundation; either version 2 of the License, or
- + (at your option) any later version.
- +
- + This program is distributed in the hope that it will be useful,
- + but WITHOUT ANY WARRANTY; without even the implied warranty of
- + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- + GNU General Public License for more details.
- +
- + You should have received a copy of the GNU General Public License
- + along with this program; if not, write to the Free Software
- + Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
- +
- +#include "config.h"
- +#include "system.h"
- +#include "coretypes.h"
- +#include "tm.h"
- +#include "rtl.h"
- +#include "tree.h"
- +#include "obstack.h"
- +#include "regs.h"
- +#include "hard-reg-set.h"
- +#include "real.h"
- +#include "insn-config.h"
- +#include "conditions.h"
- +#include "output.h"
- +#include "insn-attr.h"
- +#include "flags.h"
- +#include "reload.h"
- +#include "function.h"
- +#include "expr.h"
- +#include "optabs.h"
- +#include "toplev.h"
- +#include "recog.h"
- +#include "ggc.h"
- +#include "except.h"
- +#include "c-pragma.h"
- +#include "integrate.h"
- +#include "tm_p.h"
- +#include "langhooks.h"
- +#include "hooks.h"
- +#include "df.h"
- +
- +#include "target.h"
- +#include "target-def.h"
- +
- +#include <ctype.h>
- +
- +
- +
- +/* Global variables. */
- +typedef struct minipool_node Mnode;
- +typedef struct minipool_fixup Mfix;
- +
- +/* Obstack for minipool constant handling. */
- +static struct obstack minipool_obstack;
- +static char *minipool_startobj;
- +static rtx minipool_vector_label;
- +
- +/* True if we are currently building a constant table. */
- +int making_const_table;
- +
- +tree fndecl_attribute_args = NULL_TREE;
- +
- +
- +/* Function prototypes. */
- +static unsigned long avr32_isr_value (tree);
- +static unsigned long avr32_compute_func_type (void);
- +static tree avr32_handle_isr_attribute (tree *, tree, tree, int, bool *);
- +static tree avr32_handle_acall_attribute (tree *, tree, tree, int, bool *);
- +static tree avr32_handle_fndecl_attribute (tree * node, tree name, tree args,
- + int flags, bool * no_add_attrs);
- +static void avr32_reorg (void);
- +bool avr32_return_in_msb (tree type);
- +bool avr32_vector_mode_supported (enum machine_mode mode);
- +static void avr32_init_libfuncs (void);
- +static void avr32_file_end (void);
- +static void flashvault_decl_list_add (unsigned int vector_num, const char *name);
- +
- +
- +
- +static void
- +avr32_add_gc_roots (void)
- +{
- + gcc_obstack_init (&minipool_obstack);
- + minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
- +}
- +
- +
- +/* List of all known AVR32 parts */
- +static const struct part_type_s avr32_part_types[] = {
- + /* name, part_type, architecture type, macro */
- + {"none", PART_TYPE_AVR32_NONE, ARCH_TYPE_AVR32_AP, "__AVR32__"},
- + {"ap7000", PART_TYPE_AVR32_AP7000, ARCH_TYPE_AVR32_AP, "__AVR32_AP7000__"},
- + {"ap7001", PART_TYPE_AVR32_AP7001, ARCH_TYPE_AVR32_AP, "__AVR32_AP7001__"},
- + {"ap7002", PART_TYPE_AVR32_AP7002, ARCH_TYPE_AVR32_AP, "__AVR32_AP7002__"},
- + {"ap7200", PART_TYPE_AVR32_AP7200, ARCH_TYPE_AVR32_AP, "__AVR32_AP7200__"},
- + {"uc3a0128", PART_TYPE_AVR32_UC3A0128, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A0128__"},
- + {"uc3a0256", PART_TYPE_AVR32_UC3A0256, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A0256__"},
- + {"uc3a0512", PART_TYPE_AVR32_UC3A0512, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A0512__"},
- + {"uc3a0512es", PART_TYPE_AVR32_UC3A0512ES, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3A0512ES__"},
- + {"uc3a1128", PART_TYPE_AVR32_UC3A1128, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A1128__"},
- + {"uc3a1256", PART_TYPE_AVR32_UC3A1256, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A1256__"},
- + {"uc3a1512", PART_TYPE_AVR32_UC3A1512, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A1512__"},
- + {"uc3a1512es", PART_TYPE_AVR32_UC3A1512ES, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3A1512ES__"},
- + {"uc3a3revd", PART_TYPE_AVR32_UC3A3REVD, ARCH_TYPE_AVR32_UCR2NOMUL, "__AVR32_UC3A3256S__"},
- + {"uc3a364", PART_TYPE_AVR32_UC3A364, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A364__"},
- + {"uc3a364s", PART_TYPE_AVR32_UC3A364S, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A364S__"},
- + {"uc3a3128", PART_TYPE_AVR32_UC3A3128, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A3128__"},
- + {"uc3a3128s", PART_TYPE_AVR32_UC3A3128S, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A3128S__"},
- + {"uc3a3256", PART_TYPE_AVR32_UC3A3256, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A3256__"},
- + {"uc3a3256s", PART_TYPE_AVR32_UC3A3256S, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A3256S__"},
- + {"uc3a464", PART_TYPE_AVR32_UC3A464, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A464__"},
- + {"uc3a464s", PART_TYPE_AVR32_UC3A464S, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A464S__"},
- + {"uc3a4128", PART_TYPE_AVR32_UC3A4128, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A4128__"},
- + {"uc3a4128s", PART_TYPE_AVR32_UC3A4128S, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A4128S__"},
- + {"uc3a4256", PART_TYPE_AVR32_UC3A4256, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A4256__"},
- + {"uc3a4256s", PART_TYPE_AVR32_UC3A4256S, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A4256S__"},
- + {"uc3b064", PART_TYPE_AVR32_UC3B064, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B064__"},
- + {"uc3b0128", PART_TYPE_AVR32_UC3B0128, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B0128__"},
- + {"uc3b0256", PART_TYPE_AVR32_UC3B0256, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B0256__"},
- + {"uc3b0256es", PART_TYPE_AVR32_UC3B0256ES, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B0256ES__"},
- + {"uc3b0512", PART_TYPE_AVR32_UC3B0512, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3B0512__"},
- + {"uc3b0512revc", PART_TYPE_AVR32_UC3B0512REVC, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3B0512REVC__"},
- + {"uc3b164", PART_TYPE_AVR32_UC3B164, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B164__"},
- + {"uc3b1128", PART_TYPE_AVR32_UC3B1128, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B1128__"},
- + {"uc3b1256", PART_TYPE_AVR32_UC3B1256, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B1256__"},
- + {"uc3b1256es", PART_TYPE_AVR32_UC3B1256ES, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B1256ES__"},
- + {"uc3b1512", PART_TYPE_AVR32_UC3B1512, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3B1512__"},
- + {"uc3b1512revc", PART_TYPE_AVR32_UC3B1512REVC, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3B1512REVC__"},
- + {"uc64d3", PART_TYPE_AVR32_UC64D3, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC64D3__"},
- + {"uc128d3", PART_TYPE_AVR32_UC128D3, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC128D3__"},
- + {"uc64d4", PART_TYPE_AVR32_UC64D4, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC64D4__"},
- + {"uc128d4", PART_TYPE_AVR32_UC128D4, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC128D4__"},
- + {"uc3c0512crevc", PART_TYPE_AVR32_UC3C0512CREVC, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3C0512CREVC__"},
- + {"uc3c1512crevc", PART_TYPE_AVR32_UC3C1512CREVC, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3C1512CREVC__"},
- + {"uc3c2512crevc", PART_TYPE_AVR32_UC3C2512CREVC, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3C2512CREVC__"},
- + {"uc3l0256", PART_TYPE_AVR32_UC3L0256, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3L0256__"},
- + {"uc3l0128", PART_TYPE_AVR32_UC3L0128, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3L0128__"},
- + {"uc3l064", PART_TYPE_AVR32_UC3L064, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3L064__"},
- + {"uc3l032", PART_TYPE_AVR32_UC3L032, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3L032__"},
- + {"uc3l016", PART_TYPE_AVR32_UC3L016, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3L016__"},
- + {"uc3l064revb", PART_TYPE_AVR32_UC3L064REVB, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3L064REVB__"},
- + {"uc64l3u", PART_TYPE_AVR32_UC64L3U, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC64L3U__"},
- + {"uc128l3u", PART_TYPE_AVR32_UC128L3U, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC128L3U__"},
- + {"uc256l3u", PART_TYPE_AVR32_UC256L3U, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC256L3U__"},
- + {"uc64l4u", PART_TYPE_AVR32_UC64L4U, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC64L4U__"},
- + {"uc128l4u", PART_TYPE_AVR32_UC128L4U, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC128L4U__"},
- + {"uc256l4u", PART_TYPE_AVR32_UC256L4U, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC256L4U__"},
- + {"uc3c064c", PART_TYPE_AVR32_UC3C064C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C064C__"},
- + {"uc3c0128c", PART_TYPE_AVR32_UC3C0128C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C0128C__"},
- + {"uc3c0256c", PART_TYPE_AVR32_UC3C0256C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C0256C__"},
- + {"uc3c0512c", PART_TYPE_AVR32_UC3C0512C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C0512C__"},
- + {"uc3c164c", PART_TYPE_AVR32_UC3C164C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C164C__"},
- + {"uc3c1128c", PART_TYPE_AVR32_UC3C1128C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C1128C__"},
- + {"uc3c1256c", PART_TYPE_AVR32_UC3C1256C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C1256C__"},
- + {"uc3c1512c", PART_TYPE_AVR32_UC3C1512C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C1512C__"},
- + {"uc3c264c", PART_TYPE_AVR32_UC3C264C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C264C__"},
- + {"uc3c2128c", PART_TYPE_AVR32_UC3C2128C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C2128C__"},
- + {"uc3c2256c", PART_TYPE_AVR32_UC3C2256C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C2256C__"},
- + {"uc3c2512c", PART_TYPE_AVR32_UC3C2512C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C2512C__"},
- + {"mxt768e", PART_TYPE_AVR32_MXT768E, ARCH_TYPE_AVR32_UCR3, "__AVR32_MXT768E__"},
- + {NULL, 0, 0, NULL}
- +};
- +
- +/* List of all known AVR32 architectures */
- +static const struct arch_type_s avr32_arch_types[] = {
- + /* name, architecture type, microarchitecture type, feature flags, macro */
- + {"ap", ARCH_TYPE_AVR32_AP, UARCH_TYPE_AVR32B,
- + (FLAG_AVR32_HAS_DSP
- + | FLAG_AVR32_HAS_SIMD
- + | FLAG_AVR32_HAS_UNALIGNED_WORD
- + | FLAG_AVR32_HAS_BRANCH_PRED | FLAG_AVR32_HAS_RETURN_STACK
- + | FLAG_AVR32_HAS_CACHES),
- + "__AVR32_AP__"},
- + {"ucr1", ARCH_TYPE_AVR32_UCR1, UARCH_TYPE_AVR32A,
- + (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW),
- + "__AVR32_UC__=1"},
- + {"ucr2", ARCH_TYPE_AVR32_UCR2, UARCH_TYPE_AVR32A,
- + (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW
- + | FLAG_AVR32_HAS_V2_INSNS),
- + "__AVR32_UC__=2"},
- + {"ucr2nomul", ARCH_TYPE_AVR32_UCR2NOMUL, UARCH_TYPE_AVR32A,
- + (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW
- + | FLAG_AVR32_HAS_V2_INSNS | FLAG_AVR32_HAS_NO_MUL_INSNS),
- + "__AVR32_UC__=2"},
- + {"ucr3", ARCH_TYPE_AVR32_UCR3, UARCH_TYPE_AVR32A,
- + (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW
- + | FLAG_AVR32_HAS_V2_INSNS),
- + "__AVR32_UC__=3"},
- + {"ucr3fp", ARCH_TYPE_AVR32_UCR3FP, UARCH_TYPE_AVR32A,
- + (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW | FLAG_AVR32_HAS_FPU
- + | FLAG_AVR32_HAS_V2_INSNS),
- + "__AVR32_UC__=3"},
- + {NULL, 0, 0, 0, NULL}
- +};
- +
- +/* Default arch name */
- +const char *avr32_arch_name = "none";
- +const char *avr32_part_name = "none";
- +
- +const struct part_type_s *avr32_part;
- +const struct arch_type_s *avr32_arch;
- +
- +
- +/* FIXME: needs to use GC. */
- +struct flashvault_decl_list
- +{
- + struct flashvault_decl_list *next;
- + unsigned int vector_num;
- + const char *name;
- +};
- +
- +static struct flashvault_decl_list *flashvault_decl_list_head = NULL;
- +
- +
- +/* Set default target_flags. */
- +#undef TARGET_DEFAULT_TARGET_FLAGS
- +#define TARGET_DEFAULT_TARGET_FLAGS \
- + (MASK_HAS_ASM_ADDR_PSEUDOS | MASK_MD_REORG_OPTIMIZATION | MASK_COND_EXEC_BEFORE_RELOAD)
- +
- +void
- +avr32_optimization_options (int level, int size)
- +{
- + if (AVR32_ALWAYS_PIC)
- + flag_pic = 1;
- +
- + /* Enable section anchors if optimization is enabled. */
- + if (level > 0 || size)
- + flag_section_anchors = 2;
- +}
- +
- +
- +/* Override command line options */
- +void
- +avr32_override_options (void)
- +{
- + const struct part_type_s *part;
- + const struct arch_type_s *arch, *part_arch;
- +
- + /*Add backward compability*/
- + if (strcmp ("uc", avr32_arch_name)== 0)
- + {
- + fprintf (stderr, "Warning: Deprecated arch `%s' specified. "
- + "Please use '-march=ucr1' instead. "
- + "Using arch 'ucr1'\n",
- + avr32_arch_name);
- + avr32_arch_name="ucr1";
- + }
- +
- + /* Check if arch type is set. */
- + for (arch = avr32_arch_types; arch->name; arch++)
- + {
- + if (strcmp (arch->name, avr32_arch_name) == 0)
- + break;
- + }
- + avr32_arch = arch;
- +
- + if (!arch->name && strcmp("none", avr32_arch_name) != 0)
- + {
- + fprintf (stderr, "Unknown arch `%s' specified\n"
- + "Known arch names:\n"
- + "\tuc (deprecated)\n",
- + avr32_arch_name);
- + for (arch = avr32_arch_types; arch->name; arch++)
- + fprintf (stderr, "\t%s\n", arch->name);
- + avr32_arch = &avr32_arch_types[ARCH_TYPE_AVR32_AP];
- + }
- +
- + /* Check if part type is set. */
- + for (part = avr32_part_types; part->name; part++)
- + if (strcmp (part->name, avr32_part_name) == 0)
- + break;
- +
- + avr32_part = part;
- + if (!part->name)
- + {
- + fprintf (stderr, "Unknown part `%s' specified\nKnown part names:\n",
- + avr32_part_name);
- + for (part = avr32_part_types; part->name; part++)
- + {
- + if (strcmp("none", part->name) != 0)
- + fprintf (stderr, "\t%s\n", part->name);
- + }
- + /* Set default to NONE*/
- + avr32_part = &avr32_part_types[PART_TYPE_AVR32_NONE];
- + }
- +
- + /* NB! option -march= overrides option -mpart
- + * if both are used at the same time */
- + if (!arch->name)
- + avr32_arch = &avr32_arch_types[avr32_part->arch_type];
- +
- + /* When architecture implied by -mpart and one passed in -march are
- + * conflicting, issue an error message */
- + part_arch = &avr32_arch_types[avr32_part->arch_type];
- + if (strcmp("none",avr32_part_name) && strcmp("none", avr32_arch_name) && strcmp(avr32_arch_name,part_arch->name))
- + error ("Conflicting architectures implied by -mpart and -march\n");
- +
- + /* If optimization level is two or greater, then align start of loops to a
- + word boundary since this will allow folding the first insn of the loop.
- + Do this only for targets supporting branch prediction. */
- + if (optimize >= 2 && TARGET_BRANCH_PRED)
- + align_loops = 2;
- +
- +
- + /* Enable fast-float library if unsafe math optimizations
- + are used. */
- + if (flag_unsafe_math_optimizations)
- + target_flags |= MASK_FAST_FLOAT;
- +
- + /* Check if we should set avr32_imm_in_const_pool
- + based on if caches are present or not. */
- + if ( avr32_imm_in_const_pool == -1 )
- + {
- + if ( TARGET_CACHES )
- + avr32_imm_in_const_pool = 1;
- + else
- + avr32_imm_in_const_pool = 0;
- + }
- +
- + if (TARGET_NO_PIC)
- + flag_pic = 0;
- + avr32_add_gc_roots ();
- +}
- +
- +
- +/*
- +If defined, a function that outputs the assembler code for entry to a
- +function. The prologue is responsible for setting up the stack frame,
- +initializing the frame pointer register, saving registers that must be
- +saved, and allocating size additional bytes of storage for the
- +local variables. size is an integer. file is a stdio
- +stream to which the assembler code should be output.
- +
- +The label for the beginning of the function need not be output by this
- +macro. That has already been done when the macro is run.
- +
- +To determine which registers to save, the macro can refer to the array
- +regs_ever_live: element r is nonzero if hard register
- +r is used anywhere within the function. This implies the function
- +prologue should save register r, provided it is not one of the
- +call-used registers. (TARGET_ASM_FUNCTION_EPILOGUE must likewise use
- +regs_ever_live.)
- +
- +On machines that have ``register windows'', the function entry code does
- +not save on the stack the registers that are in the windows, even if
- +they are supposed to be preserved by function calls; instead it takes
- +appropriate steps to ``push'' the register stack, if any non-call-used
- +registers are used in the function.
- +
- +On machines where functions may or may not have frame-pointers, the
- +function entry code must vary accordingly; it must set up the frame
- +pointer if one is wanted, and not otherwise. To determine whether a
- +frame pointer is in wanted, the macro can refer to the variable
- +frame_pointer_needed. The variable's value will be 1 at run
- +time in a function that needs a frame pointer. (see Elimination).
- +
- +The function entry code is responsible for allocating any stack space
- +required for the function. This stack space consists of the regions
- +listed below. In most cases, these regions are allocated in the
- +order listed, with the last listed region closest to the top of the
- +stack (the lowest address if STACK_GROWS_DOWNWARD is defined, and
- +the highest address if it is not defined). You can use a different order
- +for a machine if doing so is more convenient or required for
- +compatibility reasons. Except in cases where required by standard
- +or by a debugger, there is no reason why the stack layout used by GCC
- +need agree with that used by other compilers for a machine.
- +*/
- +
- +#undef TARGET_ASM_FUNCTION_PROLOGUE
- +#define TARGET_ASM_FUNCTION_PROLOGUE avr32_target_asm_function_prologue
- +
- +#undef TARGET_ASM_FILE_END
- +#define TARGET_ASM_FILE_END avr32_file_end
- +
- +#undef TARGET_DEFAULT_SHORT_ENUMS
- +#define TARGET_DEFAULT_SHORT_ENUMS hook_bool_void_false
- +
- +#undef TARGET_PROMOTE_FUNCTION_ARGS
- +#define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
- +
- +#undef TARGET_PROMOTE_FUNCTION_RETURN
- +#define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
- +
- +#undef TARGET_PROMOTE_PROTOTYPES
- +#define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
- +
- +#undef TARGET_MUST_PASS_IN_STACK
- +#define TARGET_MUST_PASS_IN_STACK avr32_must_pass_in_stack
- +
- +#undef TARGET_PASS_BY_REFERENCE
- +#define TARGET_PASS_BY_REFERENCE avr32_pass_by_reference
- +
- +#undef TARGET_STRICT_ARGUMENT_NAMING
- +#define TARGET_STRICT_ARGUMENT_NAMING avr32_strict_argument_naming
- +
- +#undef TARGET_VECTOR_MODE_SUPPORTED_P
- +#define TARGET_VECTOR_MODE_SUPPORTED_P avr32_vector_mode_supported
- +
- +#undef TARGET_RETURN_IN_MEMORY
- +#define TARGET_RETURN_IN_MEMORY avr32_return_in_memory
- +
- +#undef TARGET_RETURN_IN_MSB
- +#define TARGET_RETURN_IN_MSB avr32_return_in_msb
- +
- +#undef TARGET_ENCODE_SECTION_INFO
- +#define TARGET_ENCODE_SECTION_INFO avr32_encode_section_info
- +
- +#undef TARGET_ARG_PARTIAL_BYTES
- +#define TARGET_ARG_PARTIAL_BYTES avr32_arg_partial_bytes
- +
- +#undef TARGET_STRIP_NAME_ENCODING
- +#define TARGET_STRIP_NAME_ENCODING avr32_strip_name_encoding
- +
- +#define streq(string1, string2) (strcmp (string1, string2) == 0)
- +
- +#undef TARGET_NARROW_VOLATILE_BITFIELD
- +#define TARGET_NARROW_VOLATILE_BITFIELD hook_bool_void_false
- +
- +#undef TARGET_ATTRIBUTE_TABLE
- +#define TARGET_ATTRIBUTE_TABLE avr32_attribute_table
- +
- +#undef TARGET_COMP_TYPE_ATTRIBUTES
- +#define TARGET_COMP_TYPE_ATTRIBUTES avr32_comp_type_attributes
- +
- +
- +#undef TARGET_RTX_COSTS
- +#define TARGET_RTX_COSTS avr32_rtx_costs
- +
- +#undef TARGET_CANNOT_FORCE_CONST_MEM
- +#define TARGET_CANNOT_FORCE_CONST_MEM avr32_cannot_force_const_mem
- +
- +#undef TARGET_ASM_INTEGER
- +#define TARGET_ASM_INTEGER avr32_assemble_integer
- +
- +#undef TARGET_FUNCTION_VALUE
- +#define TARGET_FUNCTION_VALUE avr32_function_value
- +
- +#undef TARGET_MIN_ANCHOR_OFFSET
- +#define TARGET_MIN_ANCHOR_OFFSET (0)
- +
- +#undef TARGET_MAX_ANCHOR_OFFSET
- +#define TARGET_MAX_ANCHOR_OFFSET ((1 << 15) - 1)
- +#undef TARGET_SECONDARY_RELOAD
- +#define TARGET_SECONDARY_RELOAD avr32_secondary_reload
- +
- +
- +/*
- + * Defining the option, -mlist-devices to list the devices supported by gcc.
- + * This option should be used while printing target-help to list all the
- + * supported devices.
- + */
- +#undef TARGET_HELP
- +#define TARGET_HELP avr32_target_help
- +
- +void avr32_target_help ()
- +{
- + if (avr32_list_supported_parts)
- + {
- + const struct part_type_s *list;
- + fprintf (stdout, "List of parts supported by avr32-gcc:\n");
- + for (list = avr32_part_types; list->name; list++)
- + {
- + if (strcmp("none", list->name) != 0)
- + fprintf (stdout, "%-20s%s\n", list->name, list->macro);
- + }
- + fprintf (stdout, "\n\n");
- + }
- +}
- +
- +enum reg_class
- +avr32_secondary_reload (bool in_p, rtx x, enum reg_class class,
- + enum machine_mode mode, secondary_reload_info *sri)
- +{
- +
- + if ( avr32_rmw_memory_operand (x, mode) )
- + {
- + if (!in_p)
- + sri->icode = CODE_FOR_reload_out_rmw_memory_operand;
- + else
- + sri->icode = CODE_FOR_reload_in_rmw_memory_operand;
- + }
- + return NO_REGS;
- +
- +}
- +/*
- + * Switches to the appropriate section for output of constant pool
- + * entry x in mode. You can assume that x is some kind of constant in
- + * RTL. The argument mode is redundant except in the case of a
- + * const_int rtx. Select the section by calling readonly_data_ section
- + * or one of the alternatives for other sections. align is the
- + * constant alignment in bits.
- + *
- + * The default version of this function takes care of putting symbolic
- + * constants in flag_ pic mode in data_section and everything else in
- + * readonly_data_section.
- + */
- +//#undef TARGET_ASM_SELECT_RTX_SECTION
- +//#define TARGET_ASM_SELECT_RTX_SECTION avr32_select_rtx_section
- +
- +
- +/*
- + * If non-null, this hook performs a target-specific pass over the
- + * instruction stream. The compiler will run it at all optimization
- + * levels, just before the point at which it normally does
- + * delayed-branch scheduling.
- + *
- + * The exact purpose of the hook varies from target to target. Some
- + * use it to do transformations that are necessary for correctness,
- + * such as laying out in-function constant pools or avoiding hardware
- + * hazards. Others use it as an opportunity to do some
- + * machine-dependent optimizations.
- + *
- + * You need not implement the hook if it has nothing to do. The
- + * default definition is null.
- + */
- +#undef TARGET_MACHINE_DEPENDENT_REORG
- +#define TARGET_MACHINE_DEPENDENT_REORG avr32_reorg
- +
- +/* Target hook for assembling integer objects.
- + Need to handle integer vectors */
- +static bool
- +avr32_assemble_integer (rtx x, unsigned int size, int aligned_p)
- +{
- + if (avr32_vector_mode_supported (GET_MODE (x)))
- + {
- + int i, units;
- +
- + if (GET_CODE (x) != CONST_VECTOR)
- + abort ();
- +
- + units = CONST_VECTOR_NUNITS (x);
- +
- + switch (GET_MODE (x))
- + {
- + case V2HImode:
- + size = 2;
- + break;
- + case V4QImode:
- + size = 1;
- + break;
- + default:
- + abort ();
- + }
- +
- + for (i = 0; i < units; i++)
- + {
- + rtx elt;
- +
- + elt = CONST_VECTOR_ELT (x, i);
- + assemble_integer (elt, size, i == 0 ? 32 : size * BITS_PER_UNIT, 1);
- + }
- +
- + return true;
- + }
- +
- + return default_assemble_integer (x, size, aligned_p);
- +}
- +
- +
- +/*
- + * This target hook describes the relative costs of RTL expressions.
- + *
- + * The cost may depend on the precise form of the expression, which is
- + * available for examination in x, and the rtx code of the expression
- + * in which it is contained, found in outer_code. code is the
- + * expression code--redundant, since it can be obtained with GET_CODE
- + * (x).
- + *
- + * In implementing this hook, you can use the construct COSTS_N_INSNS
- + * (n) to specify a cost equal to n fast instructions.
- + *
- + * On entry to the hook, *total contains a default estimate for the
- + * cost of the expression. The hook should modify this value as
- + * necessary. Traditionally, the default costs are COSTS_N_INSNS (5)
- + * for multiplications, COSTS_N_INSNS (7) for division and modulus
- + * operations, and COSTS_N_INSNS (1) for all other operations.
- + *
- + * When optimizing for code size, i.e. when optimize_size is non-zero,
- + * this target hook should be used to estimate the relative size cost
- + * of an expression, again relative to COSTS_N_INSNS.
- + *
- + * The hook returns true when all subexpressions of x have been
- + * processed, and false when rtx_cost should recurse.
- + */
- +
- +/* Worker routine for avr32_rtx_costs. */
- +static inline int
- +avr32_rtx_costs_1 (rtx x, enum rtx_code code ATTRIBUTE_UNUSED,
- + enum rtx_code outer ATTRIBUTE_UNUSED)
- +{
- + enum machine_mode mode = GET_MODE (x);
- +
- + switch (GET_CODE (x))
- + {
- + case MEM:
- + /* Using pre decrement / post increment memory operations on the
- + avr32_uc architecture means that two writebacks must be performed
- + and hence two cycles are needed. */
- + if (!optimize_size
- + && GET_MODE_SIZE (mode) <= 2 * UNITS_PER_WORD
- + && TARGET_ARCH_UC
- + && (GET_CODE (XEXP (x, 0)) == PRE_DEC
- + || GET_CODE (XEXP (x, 0)) == POST_INC))
- + return COSTS_N_INSNS (5);
- +
- + /* Memory costs quite a lot for the first word, but subsequent words
- + load at the equivalent of a single insn each. */
- + if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
- + return COSTS_N_INSNS (3 + (GET_MODE_SIZE (mode) / UNITS_PER_WORD));
- +
- + return COSTS_N_INSNS (4);
- + case SYMBOL_REF:
- + case CONST:
- + /* These are valid for the pseudo insns: lda.w and call which operates
- + on direct addresses. We assume that the cost of a lda.w is the same
- + as the cost of a ld.w insn. */
- + return (outer == SET) ? COSTS_N_INSNS (4) : COSTS_N_INSNS (1);
- + case DIV:
- + case MOD:
- + case UDIV:
- + case UMOD:
- + return optimize_size ? COSTS_N_INSNS (1) : COSTS_N_INSNS (16);
- +
- + case ROTATE:
- + case ROTATERT:
- + if (mode == TImode)
- + return COSTS_N_INSNS (100);
- +
- + if (mode == DImode)
- + return COSTS_N_INSNS (10);
- + return COSTS_N_INSNS (4);
- + case ASHIFT:
- + case LSHIFTRT:
- + case ASHIFTRT:
- + case NOT:
- + if (mode == TImode)
- + return COSTS_N_INSNS (10);
- +
- + if (mode == DImode)
- + return COSTS_N_INSNS (4);
- + return COSTS_N_INSNS (1);
- + case PLUS:
- + case MINUS:
- + case NEG:
- + case COMPARE:
- + case ABS:
- + if (GET_MODE_CLASS (mode) == MODE_FLOAT)
- + return COSTS_N_INSNS (100);
- +
- + if (mode == TImode)
- + return COSTS_N_INSNS (50);
- +
- + if (mode == DImode)
- + return COSTS_N_INSNS (2);
- + return COSTS_N_INSNS (1);
- +
- + case MULT:
- + {
- + if (GET_MODE_CLASS (mode) == MODE_FLOAT)
- + return COSTS_N_INSNS (300);
- +
- + if (mode == TImode)
- + return COSTS_N_INSNS (16);
- +
- + if (mode == DImode)
- + return COSTS_N_INSNS (4);
- +
- + if (mode == HImode)
- + return COSTS_N_INSNS (2);
- +
- + return COSTS_N_INSNS (3);
- + }
- + case IF_THEN_ELSE:
- + if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
- + return COSTS_N_INSNS (4);
- + return COSTS_N_INSNS (1);
- + case SIGN_EXTEND:
- + case ZERO_EXTEND:
- + /* Sign/Zero extensions of registers cost quite much since these
- + instrcutions only take one register operand which means that gcc
- + often must insert some move instrcutions */
- + if (mode == QImode || mode == HImode)
- + return (COSTS_N_INSNS (GET_CODE (XEXP (x, 0)) == MEM ? 0 : 1));
- + return COSTS_N_INSNS (4);
- + case UNSPEC:
- + /* divmod operations */
- + if (XINT (x, 1) == UNSPEC_UDIVMODSI4_INTERNAL
- + || XINT (x, 1) == UNSPEC_DIVMODSI4_INTERNAL)
- + {
- + return optimize_size ? COSTS_N_INSNS (1) : COSTS_N_INSNS (16);
- + }
- + /* Fallthrough */
- + default:
- + return COSTS_N_INSNS (1);
- + }
- +}
- +
- +
- +static bool
- +avr32_rtx_costs (rtx x, int code, int outer_code, int *total)
- +{
- + *total = avr32_rtx_costs_1 (x, code, outer_code);
- + return true;
- +}
- +
- +
- +bool
- +avr32_cannot_force_const_mem (rtx x ATTRIBUTE_UNUSED)
- +{
- + /* Do not want symbols in the constant pool when compiling pic or if using
- + address pseudo instructions. */
- + return ((flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS)
- + && avr32_find_symbol (x) != NULL_RTX);
- +}
- +
- +
- +/* Table of machine attributes. */
- +const struct attribute_spec avr32_attribute_table[] = {
- + /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
- + /* Interrupt Service Routines have special prologue and epilogue
- + requirements. */
- + {"isr", 0, 1, false, false, false, avr32_handle_isr_attribute},
- + {"interrupt", 0, 1, false, false, false, avr32_handle_isr_attribute},
- + {"acall", 0, 1, false, true, true, avr32_handle_acall_attribute},
- + {"naked", 0, 0, true, false, false, avr32_handle_fndecl_attribute},
- + {"rmw_addressable", 0, 0, true, false, false, NULL},
- + {"flashvault", 0, 1, true, false, false, avr32_handle_fndecl_attribute},
- + {"flashvault_impl", 0, 1, true, false, false, avr32_handle_fndecl_attribute},
- + {NULL, 0, 0, false, false, false, NULL}
- +};
- +
- +
- +typedef struct
- +{
- + const char *const arg;
- + const unsigned long return_value;
- +}
- +isr_attribute_arg;
- +
- +
- +static const isr_attribute_arg isr_attribute_args[] = {
- + {"FULL", AVR32_FT_ISR_FULL},
- + {"full", AVR32_FT_ISR_FULL},
- + {"HALF", AVR32_FT_ISR_HALF},
- + {"half", AVR32_FT_ISR_HALF},
- + {"NONE", AVR32_FT_ISR_NONE},
- + {"none", AVR32_FT_ISR_NONE},
- + {"UNDEF", AVR32_FT_ISR_NONE},
- + {"undef", AVR32_FT_ISR_NONE},
- + {"SWI", AVR32_FT_ISR_NONE},
- + {"swi", AVR32_FT_ISR_NONE},
- + {NULL, AVR32_FT_ISR_NONE}
- +};
- +
- +
- +/* Returns the (interrupt) function type of the current
- + function, or AVR32_FT_UNKNOWN if the type cannot be determined. */
- +static unsigned long
- +avr32_isr_value (tree argument)
- +{
- + const isr_attribute_arg *ptr;
- + const char *arg;
- +
- + /* No argument - default to ISR_NONE. */
- + if (argument == NULL_TREE)
- + return AVR32_FT_ISR_NONE;
- +
- + /* Get the value of the argument. */
- + if (TREE_VALUE (argument) == NULL_TREE
- + || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
- + return AVR32_FT_UNKNOWN;
- +
- + arg = TREE_STRING_POINTER (TREE_VALUE (argument));
- +
- + /* Check it against the list of known arguments. */
- + for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
- + if (streq (arg, ptr->arg))
- + return ptr->return_value;
- +
- + /* An unrecognized interrupt type. */
- + return AVR32_FT_UNKNOWN;
- +}
- +
- +
- +/*
- +These hooks specify assembly directives for creating certain kinds
- +of integer object. The TARGET_ASM_BYTE_OP directive creates a
- +byte-sized object, the TARGET_ASM_ALIGNED_HI_OP one creates an
- +aligned two-byte object, and so on. Any of the hooks may be
- +NULL, indicating that no suitable directive is available.
- +
- +The compiler will print these strings at the start of a new line,
- +followed immediately by the object's initial value. In most cases,
- +the string should contain a tab, a pseudo-op, and then another tab.
- +*/
- +#undef TARGET_ASM_BYTE_OP
- +#define TARGET_ASM_BYTE_OP "\t.byte\t"
- +#undef TARGET_ASM_ALIGNED_HI_OP
- +#define TARGET_ASM_ALIGNED_HI_OP "\t.align 1\n\t.short\t"
- +#undef TARGET_ASM_ALIGNED_SI_OP
- +#define TARGET_ASM_ALIGNED_SI_OP "\t.align 2\n\t.int\t"
- +#undef TARGET_ASM_ALIGNED_DI_OP
- +#define TARGET_ASM_ALIGNED_DI_OP NULL
- +#undef TARGET_ASM_ALIGNED_TI_OP
- +#define TARGET_ASM_ALIGNED_TI_OP NULL
- +#undef TARGET_ASM_UNALIGNED_HI_OP
- +#define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
- +#undef TARGET_ASM_UNALIGNED_SI_OP
- +#define TARGET_ASM_UNALIGNED_SI_OP "\t.int\t"
- +#undef TARGET_ASM_UNALIGNED_DI_OP
- +#define TARGET_ASM_UNALIGNED_DI_OP NULL
- +#undef TARGET_ASM_UNALIGNED_TI_OP
- +#define TARGET_ASM_UNALIGNED_TI_OP NULL
- +
- +#undef TARGET_ASM_OUTPUT_MI_THUNK
- +#define TARGET_ASM_OUTPUT_MI_THUNK avr32_output_mi_thunk
- +
- +#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
- +#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
- +
- +
- +static void
- +avr32_output_mi_thunk (FILE * file,
- + tree thunk ATTRIBUTE_UNUSED,
- + HOST_WIDE_INT delta,
- + HOST_WIDE_INT vcall_offset, tree function)
- + {
- + int mi_delta = delta;
- + int this_regno =
- + (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function) ?
- + INTERNAL_REGNUM (11) : INTERNAL_REGNUM (12));
- +
- +
- + if (!avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21")
- + || vcall_offset)
- + {
- + fputs ("\tpushm\tlr\n", file);
- + }
- +
- +
- + if (mi_delta != 0)
- + {
- + if (avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21"))
- + {
- + fprintf (file, "\tsub\t%s, %d\n", reg_names[this_regno], -mi_delta);
- + }
- + else
- + {
- + /* Immediate is larger than k21 we must make us a temp register by
- + pushing a register to the stack. */
- + fprintf (file, "\tmov\tlr, lo(%d)\n", mi_delta);
- + fprintf (file, "\torh\tlr, hi(%d)\n", mi_delta);
- + fprintf (file, "\tadd\t%s, lr\n", reg_names[this_regno]);
- + }
- + }
- +
- +
- + if (vcall_offset != 0)
- + {
- + fprintf (file, "\tld.w\tlr, %s[0]\n", reg_names[this_regno]);
- + fprintf (file, "\tld.w\tlr, lr[%i]\n", (int) vcall_offset);
- + fprintf (file, "\tadd\t%s, lr\n", reg_names[this_regno]);
- + }
- +
- +
- + if (!avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21")
- + || vcall_offset)
- + {
- + fputs ("\tpopm\tlr\n", file);
- + }
- +
- + /* Jump to the function. We assume that we can use an rjmp since the
- + function to jump to is local and probably not too far away from
- + the thunk. If this assumption proves to be wrong we could implement
- + this jump by calculating the offset between the jump source and destination
- + and put this in the constant pool and then perform an add to pc.
- + This would also be legitimate PIC code. But for now we hope that an rjmp
- + will be sufficient...
- + */
- + fputs ("\trjmp\t", file);
- + assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
- + fputc ('\n', file);
- + }
- +
- +
- +/* Implements target hook vector_mode_supported. */
- +bool
- +avr32_vector_mode_supported (enum machine_mode mode)
- +{
- + if ((mode == V2HImode) || (mode == V4QImode))
- + return true;
- +
- + return false;
- +}
- +
- +
- +#undef TARGET_INIT_LIBFUNCS
- +#define TARGET_INIT_LIBFUNCS avr32_init_libfuncs
- +
- +#undef TARGET_INIT_BUILTINS
- +#define TARGET_INIT_BUILTINS avr32_init_builtins
- +
- +#undef TARGET_EXPAND_BUILTIN
- +#define TARGET_EXPAND_BUILTIN avr32_expand_builtin
- +
- +tree int_ftype_int, int_ftype_void, short_ftype_short, void_ftype_int_int,
- + void_ftype_ptr_int;
- +tree void_ftype_int, void_ftype_ulong, void_ftype_void, int_ftype_ptr_int;
- +tree short_ftype_short, int_ftype_int_short, int_ftype_short_short,
- + short_ftype_short_short;
- +tree int_ftype_int_int, longlong_ftype_int_short, longlong_ftype_short_short;
- +tree void_ftype_int_int_int_int_int, void_ftype_int_int_int;
- +tree longlong_ftype_int_int, void_ftype_int_int_longlong;
- +tree int_ftype_int_int_int, longlong_ftype_longlong_int_short;
- +tree longlong_ftype_longlong_short_short, int_ftype_int_short_short;
- +
- +#define def_builtin(NAME, TYPE, CODE) \
- + add_builtin_function ((NAME), (TYPE), (CODE), \
- + BUILT_IN_MD, NULL, NULL_TREE)
- +
- +#define def_mbuiltin(MASK, NAME, TYPE, CODE) \
- + do \
- + { \
- + if ((MASK)) \
- + add_builtin_function ((NAME), (TYPE), (CODE), \
- + BUILT_IN_MD, NULL, NULL_TREE); \
- + } \
- + while (0)
- +
- +struct builtin_description
- +{
- + const unsigned int mask;
- + const enum insn_code icode;
- + const char *const name;
- + const int code;
- + const enum rtx_code comparison;
- + const unsigned int flag;
- + const tree *ftype;
- +};
- +
- +static const struct builtin_description bdesc_2arg[] = {
- +
- +#define DSP_BUILTIN(code, builtin, ftype) \
- + { 1, CODE_FOR_##code, "__builtin_" #code , \
- + AVR32_BUILTIN_##builtin, 0, 0, ftype }
- +
- + DSP_BUILTIN (mulsathh_h, MULSATHH_H, &short_ftype_short_short),
- + DSP_BUILTIN (mulsathh_w, MULSATHH_W, &int_ftype_short_short),
- + DSP_BUILTIN (mulsatrndhh_h, MULSATRNDHH_H, &short_ftype_short_short),
- + DSP_BUILTIN (mulsatrndwh_w, MULSATRNDWH_W, &int_ftype_int_short),
- + DSP_BUILTIN (mulsatwh_w, MULSATWH_W, &int_ftype_int_short),
- + DSP_BUILTIN (satadd_h, SATADD_H, &short_ftype_short_short),
- + DSP_BUILTIN (satsub_h, SATSUB_H, &short_ftype_short_short),
- + DSP_BUILTIN (satadd_w, SATADD_W, &int_ftype_int_int),
- + DSP_BUILTIN (satsub_w, SATSUB_W, &int_ftype_int_int),
- + DSP_BUILTIN (mulwh_d, MULWH_D, &longlong_ftype_int_short),
- + DSP_BUILTIN (mulnwh_d, MULNWH_D, &longlong_ftype_int_short)
- +};
- +
- +
- +void
- +avr32_init_builtins (void)
- +{
- + unsigned int i;
- + const struct builtin_description *d;
- + tree endlink = void_list_node;
- + tree int_endlink = tree_cons (NULL_TREE, integer_type_node, endlink);
- + tree longlong_endlink =
- + tree_cons (NULL_TREE, long_long_integer_type_node, endlink);
- + tree short_endlink =
- + tree_cons (NULL_TREE, short_integer_type_node, endlink);
- + tree void_endlink = tree_cons (NULL_TREE, void_type_node, endlink);
- +
- + /* int func (int) */
- + int_ftype_int = build_function_type (integer_type_node, int_endlink);
- +
- + /* short func (short) */
- + short_ftype_short
- + = build_function_type (short_integer_type_node, short_endlink);
- +
- + /* short func (short, short) */
- + short_ftype_short_short
- + = build_function_type (short_integer_type_node,
- + tree_cons (NULL_TREE, short_integer_type_node,
- + short_endlink));
- +
- + /* long long func (long long, short, short) */
- + longlong_ftype_longlong_short_short
- + = build_function_type (long_long_integer_type_node,
- + tree_cons (NULL_TREE, long_long_integer_type_node,
- + tree_cons (NULL_TREE,
- + short_integer_type_node,
- + short_endlink)));
- +
- + /* long long func (short, short) */
- + longlong_ftype_short_short
- + = build_function_type (long_long_integer_type_node,
- + tree_cons (NULL_TREE, short_integer_type_node,
- + short_endlink));
- +
- + /* int func (int, int) */
- + int_ftype_int_int
- + = build_function_type (integer_type_node,
- + tree_cons (NULL_TREE, integer_type_node,
- + int_endlink));
- +
- + /* long long func (int, int) */
- + longlong_ftype_int_int
- + = build_function_type (long_long_integer_type_node,
- + tree_cons (NULL_TREE, integer_type_node,
- + int_endlink));
- +
- + /* long long int func (long long, int, short) */
- + longlong_ftype_longlong_int_short
- + = build_function_type (long_long_integer_type_node,
- + tree_cons (NULL_TREE, long_long_integer_type_node,
- + tree_cons (NULL_TREE, integer_type_node,
- + short_endlink)));
- +
- + /* long long int func (int, short) */
- + longlong_ftype_int_short
- + = build_function_type (long_long_integer_type_node,
- + tree_cons (NULL_TREE, integer_type_node,
- + short_endlink));
- +
- + /* int func (int, short, short) */
- + int_ftype_int_short_short
- + = build_function_type (integer_type_node,
- + tree_cons (NULL_TREE, integer_type_node,
- + tree_cons (NULL_TREE,
- + short_integer_type_node,
- + short_endlink)));
- +
- + /* int func (short, short) */
- + int_ftype_short_short
- + = build_function_type (integer_type_node,
- + tree_cons (NULL_TREE, short_integer_type_node,
- + short_endlink));
- +
- + /* int func (int, short) */
- + int_ftype_int_short
- + = build_function_type (integer_type_node,
- + tree_cons (NULL_TREE, integer_type_node,
- + short_endlink));
- +
- + /* void func (int, int) */
- + void_ftype_int_int
- + = build_function_type (void_type_node,
- + tree_cons (NULL_TREE, integer_type_node,
- + int_endlink));
- +
- + /* void func (int, int, int) */
- + void_ftype_int_int_int
- + = build_function_type (void_type_node,
- + tree_cons (NULL_TREE, integer_type_node,
- + tree_cons (NULL_TREE, integer_type_node,
- + int_endlink)));
- +
- + /* void func (int, int, long long) */
- + void_ftype_int_int_longlong
- + = build_function_type (void_type_node,
- + tree_cons (NULL_TREE, integer_type_node,
- + tree_cons (NULL_TREE, integer_type_node,
- + longlong_endlink)));
- +
- + /* void func (int, int, int, int, int) */
- + void_ftype_int_int_int_int_int
- + = build_function_type (void_type_node,
- + tree_cons (NULL_TREE, integer_type_node,
- + tree_cons (NULL_TREE, integer_type_node,
- + tree_cons (NULL_TREE,
- + integer_type_node,
- + tree_cons
- + (NULL_TREE,
- + integer_type_node,
- + int_endlink)))));
- +
- + /* void func (void *, int) */
- + void_ftype_ptr_int
- + = build_function_type (void_type_node,
- + tree_cons (NULL_TREE, ptr_type_node, int_endlink));
- +
- + /* void func (int) */
- + void_ftype_int = build_function_type (void_type_node, int_endlink);
- +
- + /* void func (ulong) */
- + void_ftype_ulong = build_function_type_list (void_type_node,
- + long_unsigned_type_node, NULL_TREE);
- +
- + /* void func (void) */
- + void_ftype_void = build_function_type (void_type_node, void_endlink);
- +
- + /* int func (void) */
- + int_ftype_void = build_function_type (integer_type_node, void_endlink);
- +
- + /* int func (void *, int) */
- + int_ftype_ptr_int
- + = build_function_type (integer_type_node,
- + tree_cons (NULL_TREE, ptr_type_node, int_endlink));
- +
- + /* int func (int, int, int) */
- + int_ftype_int_int_int
- + = build_function_type (integer_type_node,
- + tree_cons (NULL_TREE, integer_type_node,
- + tree_cons (NULL_TREE, integer_type_node,
- + int_endlink)));
- +
- + /* Initialize avr32 builtins. */
- + def_builtin ("__builtin_mfsr", int_ftype_int, AVR32_BUILTIN_MFSR);
- + def_builtin ("__builtin_mtsr", void_ftype_int_int, AVR32_BUILTIN_MTSR);
- + def_builtin ("__builtin_mfdr", int_ftype_int, AVR32_BUILTIN_MFDR);
- + def_builtin ("__builtin_mtdr", void_ftype_int_int, AVR32_BUILTIN_MTDR);
- + def_builtin ("__builtin_cache", void_ftype_ptr_int, AVR32_BUILTIN_CACHE);
- + def_builtin ("__builtin_sync", void_ftype_int, AVR32_BUILTIN_SYNC);
- + def_builtin ("__builtin_ssrf", void_ftype_int, AVR32_BUILTIN_SSRF);
- + def_builtin ("__builtin_csrf", void_ftype_int, AVR32_BUILTIN_CSRF);
- + def_builtin ("__builtin_tlbr", void_ftype_void, AVR32_BUILTIN_TLBR);
- + def_builtin ("__builtin_tlbs", void_ftype_void, AVR32_BUILTIN_TLBS);
- + def_builtin ("__builtin_tlbw", void_ftype_void, AVR32_BUILTIN_TLBW);
- + def_builtin ("__builtin_breakpoint", void_ftype_void,
- + AVR32_BUILTIN_BREAKPOINT);
- + def_builtin ("__builtin_xchg", int_ftype_ptr_int, AVR32_BUILTIN_XCHG);
- + def_builtin ("__builtin_ldxi", int_ftype_ptr_int, AVR32_BUILTIN_LDXI);
- + def_builtin ("__builtin_bswap_16", short_ftype_short,
- + AVR32_BUILTIN_BSWAP16);
- + def_builtin ("__builtin_bswap_32", int_ftype_int, AVR32_BUILTIN_BSWAP32);
- + def_builtin ("__builtin_cop", void_ftype_int_int_int_int_int,
- + AVR32_BUILTIN_COP);
- + def_builtin ("__builtin_mvcr_w", int_ftype_int_int, AVR32_BUILTIN_MVCR_W);
- + def_builtin ("__builtin_mvrc_w", void_ftype_int_int_int,
- + AVR32_BUILTIN_MVRC_W);
- + def_builtin ("__builtin_mvcr_d", longlong_ftype_int_int,
- + AVR32_BUILTIN_MVCR_D);
- + def_builtin ("__builtin_mvrc_d", void_ftype_int_int_longlong,
- + AVR32_BUILTIN_MVRC_D);
- + def_builtin ("__builtin_sats", int_ftype_int_int_int, AVR32_BUILTIN_SATS);
- + def_builtin ("__builtin_satu", int_ftype_int_int_int, AVR32_BUILTIN_SATU);
- + def_builtin ("__builtin_satrnds", int_ftype_int_int_int,
- + AVR32_BUILTIN_SATRNDS);
- + def_builtin ("__builtin_satrndu", int_ftype_int_int_int,
- + AVR32_BUILTIN_SATRNDU);
- + def_builtin ("__builtin_musfr", void_ftype_int, AVR32_BUILTIN_MUSFR);
- + def_builtin ("__builtin_mustr", int_ftype_void, AVR32_BUILTIN_MUSTR);
- + def_builtin ("__builtin_macsathh_w", int_ftype_int_short_short,
- + AVR32_BUILTIN_MACSATHH_W);
- + def_builtin ("__builtin_macwh_d", longlong_ftype_longlong_int_short,
- + AVR32_BUILTIN_MACWH_D);
- + def_builtin ("__builtin_machh_d", longlong_ftype_longlong_short_short,
- + AVR32_BUILTIN_MACHH_D);
- + def_builtin ("__builtin_mems", void_ftype_ptr_int, AVR32_BUILTIN_MEMS);
- + def_builtin ("__builtin_memt", void_ftype_ptr_int, AVR32_BUILTIN_MEMT);
- + def_builtin ("__builtin_memc", void_ftype_ptr_int, AVR32_BUILTIN_MEMC);
- + def_builtin ("__builtin_sleep", void_ftype_int, AVR32_BUILTIN_SLEEP);
- + def_builtin ("__builtin_avr32_delay_cycles", void_ftype_int, AVR32_BUILTIN_DELAY_CYCLES);
- +
- + /* Add all builtins that are more or less simple operations on two
- + operands. */
- + for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
- + {
- + /* Use one of the operands; the target can have a different mode for
- + mask-generating compares. */
- +
- + if (d->name == 0)
- + continue;
- +
- + def_mbuiltin (d->mask, d->name, *(d->ftype), d->code);
- + }
- +}
- +
- +
- +/* Subroutine of avr32_expand_builtin to take care of binop insns. */
- +static rtx
- +avr32_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
- +{
- + rtx pat;
- + tree arg0 = CALL_EXPR_ARG (exp,0);
- + tree arg1 = CALL_EXPR_ARG (exp,1);
- + rtx op0 = expand_normal (arg0);
- + rtx op1 = expand_normal (arg1);
- + enum machine_mode tmode = insn_data[icode].operand[0].mode;
- + enum machine_mode mode0 = insn_data[icode].operand[1].mode;
- + enum machine_mode mode1 = insn_data[icode].operand[2].mode;
- +
- + if (!target
- + || GET_MODE (target) != tmode
- + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
- + target = gen_reg_rtx (tmode);
- +
- + /* In case the insn wants input operands in modes different from the
- + result, abort. */
- + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
- + {
- + /* If op0 is already a reg we must cast it to the correct mode. */
- + if (REG_P (op0))
- + op0 = convert_to_mode (mode0, op0, 1);
- + else
- + op0 = copy_to_mode_reg (mode0, op0);
- + }
- + if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
- + {
- + /* If op1 is already a reg we must cast it to the correct mode. */
- + if (REG_P (op1))
- + op1 = convert_to_mode (mode1, op1, 1);
- + else
- + op1 = copy_to_mode_reg (mode1, op1);
- + }
- + pat = GEN_FCN (icode) (target, op0, op1);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- + return target;
- +}
- +
- +
- +/* Expand an expression EXP that calls a built-in function,
- + with result going to TARGET if that's convenient
- + (and in mode MODE if that's convenient).
- + SUBTARGET may be used as the target for computing one of EXP's operands.
- + IGNORE is nonzero if the value is to be ignored. */
- +rtx
- +avr32_expand_builtin (tree exp,
- + rtx target,
- + rtx subtarget ATTRIBUTE_UNUSED,
- + enum machine_mode mode ATTRIBUTE_UNUSED,
- + int ignore ATTRIBUTE_UNUSED)
- +{
- + const struct builtin_description *d;
- + unsigned int i;
- + enum insn_code icode = 0;
- + tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
- + tree arg0, arg1, arg2;
- + rtx op0, op1, op2, pat;
- + enum machine_mode tmode, mode0, mode1;
- + enum machine_mode arg0_mode;
- + int fcode = DECL_FUNCTION_CODE (fndecl);
- +
- + switch (fcode)
- + {
- + default:
- + break;
- +
- + case AVR32_BUILTIN_SATS:
- + case AVR32_BUILTIN_SATU:
- + case AVR32_BUILTIN_SATRNDS:
- + case AVR32_BUILTIN_SATRNDU:
- + {
- + const char *fname;
- + switch (fcode)
- + {
- + default:
- + case AVR32_BUILTIN_SATS:
- + icode = CODE_FOR_sats;
- + fname = "sats";
- + break;
- + case AVR32_BUILTIN_SATU:
- + icode = CODE_FOR_satu;
- + fname = "satu";
- + break;
- + case AVR32_BUILTIN_SATRNDS:
- + icode = CODE_FOR_satrnds;
- + fname = "satrnds";
- + break;
- + case AVR32_BUILTIN_SATRNDU:
- + icode = CODE_FOR_satrndu;
- + fname = "satrndu";
- + break;
- + }
- +
- + arg0 = CALL_EXPR_ARG (exp,0);
- + arg1 = CALL_EXPR_ARG (exp,1);
- + arg2 = CALL_EXPR_ARG (exp,2);
- + op0 = expand_normal (arg0);
- + op1 = expand_normal (arg1);
- + op2 = expand_normal (arg2);
- +
- + tmode = insn_data[icode].operand[0].mode;
- +
- +
- + if (target == 0
- + || GET_MODE (target) != tmode
- + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
- + target = gen_reg_rtx (tmode);
- +
- +
- + if (!(*insn_data[icode].operand[0].predicate) (op0, GET_MODE (op0)))
- + {
- + op0 = copy_to_mode_reg (insn_data[icode].operand[0].mode, op0);
- + }
- +
- + if (!(*insn_data[icode].operand[1].predicate) (op1, SImode))
- + {
- + error ("Parameter 2 to __builtin_%s should be a constant number.",
- + fname);
- + return NULL_RTX;
- + }
- +
- + if (!(*insn_data[icode].operand[1].predicate) (op2, SImode))
- + {
- + error ("Parameter 3 to __builtin_%s should be a constant number.",
- + fname);
- + return NULL_RTX;
- + }
- +
- + emit_move_insn (target, op0);
- + pat = GEN_FCN (icode) (target, op1, op2);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- +
- + return target;
- + }
- + case AVR32_BUILTIN_MUSTR:
- + icode = CODE_FOR_mustr;
- + tmode = insn_data[icode].operand[0].mode;
- +
- + if (target == 0
- + || GET_MODE (target) != tmode
- + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
- + target = gen_reg_rtx (tmode);
- + pat = GEN_FCN (icode) (target);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- + return target;
- +
- + case AVR32_BUILTIN_MFSR:
- + icode = CODE_FOR_mfsr;
- + arg0 = CALL_EXPR_ARG (exp,0);
- + op0 = expand_normal (arg0);
- + tmode = insn_data[icode].operand[0].mode;
- + mode0 = insn_data[icode].operand[1].mode;
- +
- + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
- + {
- + error ("Parameter 1 to __builtin_mfsr must be a constant number");
- + }
- +
- + if (target == 0
- + || GET_MODE (target) != tmode
- + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
- + target = gen_reg_rtx (tmode);
- + pat = GEN_FCN (icode) (target, op0);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- + return target;
- + case AVR32_BUILTIN_MTSR:
- + icode = CODE_FOR_mtsr;
- + arg0 = CALL_EXPR_ARG (exp,0);
- + arg1 = CALL_EXPR_ARG (exp,1);
- + op0 = expand_normal (arg0);
- + op1 = expand_normal (arg1);
- + mode0 = insn_data[icode].operand[0].mode;
- + mode1 = insn_data[icode].operand[1].mode;
- +
- + if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
- + {
- + error ("Parameter 1 to __builtin_mtsr must be a constant number");
- + return gen_reg_rtx (mode0);
- + }
- + if (!(*insn_data[icode].operand[1].predicate) (op1, mode1))
- + op1 = copy_to_mode_reg (mode1, op1);
- + pat = GEN_FCN (icode) (op0, op1);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- + return NULL_RTX;
- + case AVR32_BUILTIN_MFDR:
- + icode = CODE_FOR_mfdr;
- + arg0 = CALL_EXPR_ARG (exp,0);
- + op0 = expand_normal (arg0);
- + tmode = insn_data[icode].operand[0].mode;
- + mode0 = insn_data[icode].operand[1].mode;
- +
- + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
- + {
- + error ("Parameter 1 to __builtin_mfdr must be a constant number");
- + }
- +
- + if (target == 0
- + || GET_MODE (target) != tmode
- + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
- + target = gen_reg_rtx (tmode);
- + pat = GEN_FCN (icode) (target, op0);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- + return target;
- + case AVR32_BUILTIN_MTDR:
- + icode = CODE_FOR_mtdr;
- + arg0 = CALL_EXPR_ARG (exp,0);
- + arg1 = CALL_EXPR_ARG (exp,1);
- + op0 = expand_normal (arg0);
- + op1 = expand_normal (arg1);
- + mode0 = insn_data[icode].operand[0].mode;
- + mode1 = insn_data[icode].operand[1].mode;
- +
- + if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
- + {
- + error ("Parameter 1 to __builtin_mtdr must be a constant number");
- + return gen_reg_rtx (mode0);
- + }
- + if (!(*insn_data[icode].operand[1].predicate) (op1, mode1))
- + op1 = copy_to_mode_reg (mode1, op1);
- + pat = GEN_FCN (icode) (op0, op1);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- + return NULL_RTX;
- + case AVR32_BUILTIN_CACHE:
- + icode = CODE_FOR_cache;
- + arg0 = CALL_EXPR_ARG (exp,0);
- + arg1 = CALL_EXPR_ARG (exp,1);
- + op0 = expand_normal (arg0);
- + op1 = expand_normal (arg1);
- + mode0 = insn_data[icode].operand[0].mode;
- + mode1 = insn_data[icode].operand[1].mode;
- +
- + if (!(*insn_data[icode].operand[1].predicate) (op1, mode1))
- + {
- + error ("Parameter 2 to __builtin_cache must be a constant number");
- + return gen_reg_rtx (mode1);
- + }
- +
- + if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
- + op0 = copy_to_mode_reg (mode0, op0);
- +
- + pat = GEN_FCN (icode) (op0, op1);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- + return NULL_RTX;
- + case AVR32_BUILTIN_SYNC:
- + case AVR32_BUILTIN_MUSFR:
- + case AVR32_BUILTIN_SSRF:
- + case AVR32_BUILTIN_CSRF:
- + {
- + const char *fname;
- + switch (fcode)
- + {
- + default:
- + case AVR32_BUILTIN_SYNC:
- + icode = CODE_FOR_sync;
- + fname = "sync";
- + break;
- + case AVR32_BUILTIN_MUSFR:
- + icode = CODE_FOR_musfr;
- + fname = "musfr";
- + break;
- + case AVR32_BUILTIN_SSRF:
- + icode = CODE_FOR_ssrf;
- + fname = "ssrf";
- + break;
- + case AVR32_BUILTIN_CSRF:
- + icode = CODE_FOR_csrf;
- + fname = "csrf";
- + break;
- + }
- +
- + arg0 = CALL_EXPR_ARG (exp,0);
- + op0 = expand_normal (arg0);
- + mode0 = insn_data[icode].operand[0].mode;
- +
- + if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
- + {
- + if (icode == CODE_FOR_musfr)
- + op0 = copy_to_mode_reg (mode0, op0);
- + else
- + {
- + error ("Parameter to __builtin_%s is illegal.", fname);
- + return gen_reg_rtx (mode0);
- + }
- + }
- + pat = GEN_FCN (icode) (op0);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- + return NULL_RTX;
- + }
- + case AVR32_BUILTIN_TLBR:
- + icode = CODE_FOR_tlbr;
- + pat = GEN_FCN (icode) (NULL_RTX);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- + return NULL_RTX;
- + case AVR32_BUILTIN_TLBS:
- + icode = CODE_FOR_tlbs;
- + pat = GEN_FCN (icode) (NULL_RTX);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- + return NULL_RTX;
- + case AVR32_BUILTIN_TLBW:
- + icode = CODE_FOR_tlbw;
- + pat = GEN_FCN (icode) (NULL_RTX);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- + return NULL_RTX;
- + case AVR32_BUILTIN_BREAKPOINT:
- + icode = CODE_FOR_breakpoint;
- + pat = GEN_FCN (icode) (NULL_RTX);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- + return NULL_RTX;
- + case AVR32_BUILTIN_XCHG:
- + icode = CODE_FOR_sync_lock_test_and_setsi;
- + arg0 = CALL_EXPR_ARG (exp,0);
- + arg1 = CALL_EXPR_ARG (exp,1);
- + op0 = expand_normal (arg0);
- + op1 = expand_normal (arg1);
- + tmode = insn_data[icode].operand[0].mode;
- + mode0 = insn_data[icode].operand[1].mode;
- + mode1 = insn_data[icode].operand[2].mode;
- +
- + if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
- + {
- + op1 = copy_to_mode_reg (mode1, op1);
- + }
- +
- + op0 = force_reg (GET_MODE (op0), op0);
- + op0 = gen_rtx_MEM (GET_MODE (op0), op0);
- + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
- + {
- + error
- + ("Parameter 1 to __builtin_xchg must be a pointer to an integer.");
- + }
- +
- + if (target == 0
- + || GET_MODE (target) != tmode
- + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
- + target = gen_reg_rtx (tmode);
- + pat = GEN_FCN (icode) (target, op0, op1);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- + return target;
- + case AVR32_BUILTIN_LDXI:
- + icode = CODE_FOR_ldxi;
- + arg0 = CALL_EXPR_ARG (exp,0);
- + arg1 = CALL_EXPR_ARG (exp,1);
- + arg2 = CALL_EXPR_ARG (exp,2);
- + op0 = expand_normal (arg0);
- + op1 = expand_normal (arg1);
- + op2 = expand_normal (arg2);
- + tmode = insn_data[icode].operand[0].mode;
- + mode0 = insn_data[icode].operand[1].mode;
- + mode1 = insn_data[icode].operand[2].mode;
- +
- + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
- + {
- + op0 = copy_to_mode_reg (mode0, op0);
- + }
- +
- + if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
- + {
- + op1 = copy_to_mode_reg (mode1, op1);
- + }
- +
- + if (!(*insn_data[icode].operand[3].predicate) (op2, SImode))
- + {
- + error
- + ("Parameter 3 to __builtin_ldxi must be a valid extract shift operand: (0|8|16|24)");
- + return gen_reg_rtx (mode0);
- + }
- +
- + if (target == 0
- + || GET_MODE (target) != tmode
- + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
- + target = gen_reg_rtx (tmode);
- + pat = GEN_FCN (icode) (target, op0, op1, op2);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- + return target;
- + case AVR32_BUILTIN_BSWAP16:
- + {
- + icode = CODE_FOR_bswap_16;
- + arg0 = CALL_EXPR_ARG (exp,0);
- + arg0_mode = TYPE_MODE (TREE_TYPE (arg0));
- + mode0 = insn_data[icode].operand[1].mode;
- + if (arg0_mode != mode0)
- + arg0 = build1 (NOP_EXPR,
- + (*lang_hooks.types.type_for_mode) (mode0, 0), arg0);
- +
- + op0 = expand_expr (arg0, NULL_RTX, HImode, 0);
- + tmode = insn_data[icode].operand[0].mode;
- +
- +
- + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
- + {
- + if ( CONST_INT_P (op0) )
- + {
- + HOST_WIDE_INT val = ( ((INTVAL (op0)&0x00ff) << 8) |
- + ((INTVAL (op0)&0xff00) >> 8) );
- + /* Sign extend 16-bit value to host wide int */
- + val <<= (HOST_BITS_PER_WIDE_INT - 16);
- + val >>= (HOST_BITS_PER_WIDE_INT - 16);
- + op0 = GEN_INT(val);
- + if (target == 0
- + || GET_MODE (target) != tmode
- + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
- + target = gen_reg_rtx (tmode);
- + emit_move_insn(target, op0);
- + return target;
- + }
- + else
- + op0 = copy_to_mode_reg (mode0, op0);
- + }
- +
- + if (target == 0
- + || GET_MODE (target) != tmode
- + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
- + {
- + target = gen_reg_rtx (tmode);
- + }
- +
- +
- + pat = GEN_FCN (icode) (target, op0);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- +
- + return target;
- + }
- + case AVR32_BUILTIN_BSWAP32:
- + {
- + icode = CODE_FOR_bswap_32;
- + arg0 = CALL_EXPR_ARG (exp,0);
- + op0 = expand_normal (arg0);
- + tmode = insn_data[icode].operand[0].mode;
- + mode0 = insn_data[icode].operand[1].mode;
- +
- + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
- + {
- + if ( CONST_INT_P (op0) )
- + {
- + HOST_WIDE_INT val = ( ((INTVAL (op0)&0x000000ff) << 24) |
- + ((INTVAL (op0)&0x0000ff00) << 8) |
- + ((INTVAL (op0)&0x00ff0000) >> 8) |
- + ((INTVAL (op0)&0xff000000) >> 24) );
- + /* Sign extend 32-bit value to host wide int */
- + val <<= (HOST_BITS_PER_WIDE_INT - 32);
- + val >>= (HOST_BITS_PER_WIDE_INT - 32);
- + op0 = GEN_INT(val);
- + if (target == 0
- + || GET_MODE (target) != tmode
- + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
- + target = gen_reg_rtx (tmode);
- + emit_move_insn(target, op0);
- + return target;
- + }
- + else
- + op0 = copy_to_mode_reg (mode0, op0);
- + }
- +
- + if (target == 0
- + || GET_MODE (target) != tmode
- + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
- + target = gen_reg_rtx (tmode);
- +
- +
- + pat = GEN_FCN (icode) (target, op0);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- +
- + return target;
- + }
- + case AVR32_BUILTIN_MVCR_W:
- + case AVR32_BUILTIN_MVCR_D:
- + {
- + arg0 = CALL_EXPR_ARG (exp,0);
- + arg1 = CALL_EXPR_ARG (exp,1);
- + op0 = expand_normal (arg0);
- + op1 = expand_normal (arg1);
- +
- + if (fcode == AVR32_BUILTIN_MVCR_W)
- + icode = CODE_FOR_mvcrsi;
- + else
- + icode = CODE_FOR_mvcrdi;
- +
- + tmode = insn_data[icode].operand[0].mode;
- +
- + if (target == 0
- + || GET_MODE (target) != tmode
- + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
- + target = gen_reg_rtx (tmode);
- +
- + if (!(*insn_data[icode].operand[1].predicate) (op0, SImode))
- + {
- + error
- + ("Parameter 1 to __builtin_cop is not a valid coprocessor number.");
- + error ("Number should be between 0 and 7.");
- + return NULL_RTX;
- + }
- +
- + if (!(*insn_data[icode].operand[2].predicate) (op1, SImode))
- + {
- + error
- + ("Parameter 2 to __builtin_cop is not a valid coprocessor register number.");
- + error ("Number should be between 0 and 15.");
- + return NULL_RTX;
- + }
- +
- + pat = GEN_FCN (icode) (target, op0, op1);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- +
- + return target;
- + }
- + case AVR32_BUILTIN_MACSATHH_W:
- + case AVR32_BUILTIN_MACWH_D:
- + case AVR32_BUILTIN_MACHH_D:
- + {
- + arg0 = CALL_EXPR_ARG (exp,0);
- + arg1 = CALL_EXPR_ARG (exp,1);
- + arg2 = CALL_EXPR_ARG (exp,2);
- + op0 = expand_normal (arg0);
- + op1 = expand_normal (arg1);
- + op2 = expand_normal (arg2);
- +
- + icode = ((fcode == AVR32_BUILTIN_MACSATHH_W) ? CODE_FOR_macsathh_w :
- + (fcode == AVR32_BUILTIN_MACWH_D) ? CODE_FOR_macwh_d :
- + CODE_FOR_machh_d);
- +
- + tmode = insn_data[icode].operand[0].mode;
- + mode0 = insn_data[icode].operand[1].mode;
- + mode1 = insn_data[icode].operand[2].mode;
- +
- +
- + if (!target
- + || GET_MODE (target) != tmode
- + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
- + target = gen_reg_rtx (tmode);
- +
- + if (!(*insn_data[icode].operand[0].predicate) (op0, tmode))
- + {
- + /* If op0 is already a reg we must cast it to the correct mode. */
- + if (REG_P (op0))
- + op0 = convert_to_mode (tmode, op0, 1);
- + else
- + op0 = copy_to_mode_reg (tmode, op0);
- + }
- +
- + if (!(*insn_data[icode].operand[1].predicate) (op1, mode0))
- + {
- + /* If op1 is already a reg we must cast it to the correct mode. */
- + if (REG_P (op1))
- + op1 = convert_to_mode (mode0, op1, 1);
- + else
- + op1 = copy_to_mode_reg (mode0, op1);
- + }
- +
- + if (!(*insn_data[icode].operand[2].predicate) (op2, mode1))
- + {
- + /* If op1 is already a reg we must cast it to the correct mode. */
- + if (REG_P (op2))
- + op2 = convert_to_mode (mode1, op2, 1);
- + else
- + op2 = copy_to_mode_reg (mode1, op2);
- + }
- +
- + emit_move_insn (target, op0);
- +
- + pat = GEN_FCN (icode) (target, op1, op2);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- + return target;
- + }
- + case AVR32_BUILTIN_MVRC_W:
- + case AVR32_BUILTIN_MVRC_D:
- + {
- + arg0 = CALL_EXPR_ARG (exp,0);
- + arg1 = CALL_EXPR_ARG (exp,1);
- + arg2 = CALL_EXPR_ARG (exp,2);
- + op0 = expand_normal (arg0);
- + op1 = expand_normal (arg1);
- + op2 = expand_normal (arg2);
- +
- + if (fcode == AVR32_BUILTIN_MVRC_W)
- + icode = CODE_FOR_mvrcsi;
- + else
- + icode = CODE_FOR_mvrcdi;
- +
- + if (!(*insn_data[icode].operand[0].predicate) (op0, SImode))
- + {
- + error ("Parameter 1 is not a valid coprocessor number.");
- + error ("Number should be between 0 and 7.");
- + return NULL_RTX;
- + }
- +
- + if (!(*insn_data[icode].operand[1].predicate) (op1, SImode))
- + {
- + error ("Parameter 2 is not a valid coprocessor register number.");
- + error ("Number should be between 0 and 15.");
- + return NULL_RTX;
- + }
- +
- + if (GET_CODE (op2) == CONST_INT
- + || GET_CODE (op2) == CONST
- + || GET_CODE (op2) == SYMBOL_REF || GET_CODE (op2) == LABEL_REF)
- + {
- + op2 = force_const_mem (insn_data[icode].operand[2].mode, op2);
- + }
- +
- + if (!(*insn_data[icode].operand[2].predicate) (op2, GET_MODE (op2)))
- + op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
- +
- +
- + pat = GEN_FCN (icode) (op0, op1, op2);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- +
- + return NULL_RTX;
- + }
- + case AVR32_BUILTIN_COP:
- + {
- + rtx op3, op4;
- + tree arg3, arg4;
- + icode = CODE_FOR_cop;
- + arg0 = CALL_EXPR_ARG (exp,0);
- + arg1 = CALL_EXPR_ARG (exp,1);
- + arg2 = CALL_EXPR_ARG (exp,2);
- + arg3 = CALL_EXPR_ARG (exp,3);
- + arg4 = CALL_EXPR_ARG (exp,4);
- + op0 = expand_normal (arg0);
- + op1 = expand_normal (arg1);
- + op2 = expand_normal (arg2);
- + op3 = expand_normal (arg3);
- + op4 = expand_normal (arg4);
- +
- + if (!(*insn_data[icode].operand[0].predicate) (op0, SImode))
- + {
- + error
- + ("Parameter 1 to __builtin_cop is not a valid coprocessor number.");
- + error ("Number should be between 0 and 7.");
- + return NULL_RTX;
- + }
- +
- + if (!(*insn_data[icode].operand[1].predicate) (op1, SImode))
- + {
- + error
- + ("Parameter 2 to __builtin_cop is not a valid coprocessor register number.");
- + error ("Number should be between 0 and 15.");
- + return NULL_RTX;
- + }
- +
- + if (!(*insn_data[icode].operand[2].predicate) (op2, SImode))
- + {
- + error
- + ("Parameter 3 to __builtin_cop is not a valid coprocessor register number.");
- + error ("Number should be between 0 and 15.");
- + return NULL_RTX;
- + }
- +
- + if (!(*insn_data[icode].operand[3].predicate) (op3, SImode))
- + {
- + error
- + ("Parameter 4 to __builtin_cop is not a valid coprocessor register number.");
- + error ("Number should be between 0 and 15.");
- + return NULL_RTX;
- + }
- +
- + if (!(*insn_data[icode].operand[4].predicate) (op4, SImode))
- + {
- + error
- + ("Parameter 5 to __builtin_cop is not a valid coprocessor operation.");
- + error ("Number should be between 0 and 127.");
- + return NULL_RTX;
- + }
- +
- + pat = GEN_FCN (icode) (op0, op1, op2, op3, op4);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- +
- + return target;
- + }
- +
- + case AVR32_BUILTIN_MEMS:
- + case AVR32_BUILTIN_MEMC:
- + case AVR32_BUILTIN_MEMT:
- + {
- + if (!TARGET_RMW)
- + error ("Trying to use __builtin_mem(s/c/t) when target does not support RMW insns.");
- +
- + switch (fcode) {
- + case AVR32_BUILTIN_MEMS:
- + icode = CODE_FOR_iorsi3;
- + break;
- + case AVR32_BUILTIN_MEMC:
- + icode = CODE_FOR_andsi3;
- + break;
- + case AVR32_BUILTIN_MEMT:
- + icode = CODE_FOR_xorsi3;
- + break;
- + }
- + arg0 = CALL_EXPR_ARG (exp,0);
- + arg1 = CALL_EXPR_ARG (exp,1);
- + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
- + if ( GET_CODE (op0) == SYMBOL_REF )
- + // This symbol must be RMW addressable
- + SYMBOL_REF_FLAGS (op0) |= (1 << SYMBOL_FLAG_RMW_ADDR_SHIFT);
- + op0 = gen_rtx_MEM(SImode, op0);
- + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
- + mode0 = insn_data[icode].operand[1].mode;
- +
- +
- + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
- + {
- + error ("Parameter 1 to __builtin_mem(s/c/t) must be a Ks15<<2 address or a rmw addressable symbol.");
- + }
- +
- + if ( !CONST_INT_P (op1)
- + || INTVAL (op1) > 31
- + || INTVAL (op1) < 0 )
- + error ("Parameter 2 to __builtin_mem(s/c/t) must be a constant between 0 and 31.");
- +
- + if ( fcode == AVR32_BUILTIN_MEMC )
- + op1 = GEN_INT((~(1 << INTVAL(op1)))&0xffffffff);
- + else
- + op1 = GEN_INT((1 << INTVAL(op1))&0xffffffff);
- + pat = GEN_FCN (icode) (op0, op0, op1);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- + return op0;
- + }
- +
- + case AVR32_BUILTIN_SLEEP:
- + {
- + arg0 = CALL_EXPR_ARG (exp, 0);
- + op0 = expand_normal (arg0);
- + int intval = INTVAL(op0);
- +
- + /* Check if the argument if integer and if the value of integer
- + is greater than 0. */
- +
- + if (!CONSTANT_P (op0))
- + error ("Parameter 1 to __builtin_sleep() is not a valid integer.");
- + if (intval < 0 )
- + error ("Parameter 1 to __builtin_sleep() should be an integer greater than 0.");
- +
- + int strncmpval = strncmp (avr32_part_name,"uc3l", 4);
- +
- + /* Check if op0 is less than 7 for uc3l* and less than 6 for other
- + devices. By this check we are avoiding if operand is less than
- + 256. For more devices, add more such checks. */
- +
- + if ( strncmpval == 0 && intval >= 7)
- + error ("Parameter 1 to __builtin_sleep() should be less than or equal to 7.");
- + else if ( strncmp != 0 && intval >= 6)
- + error ("Parameter 1 to __builtin_sleep() should be less than or equal to 6.");
- +
- + emit_insn (gen_sleep(op0));
- + return target;
- +
- + }
- + case AVR32_BUILTIN_DELAY_CYCLES:
- + {
- + arg0 = CALL_EXPR_ARG (exp, 0);
- + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
- +
- + if (TARGET_ARCH_AP)
- + error (" __builtin_avr32_delay_cycles() not supported for \'%s\' architecture.", avr32_arch_name);
- + if (!CONSTANT_P (op0))
- + error ("Parameter 1 to __builtin_avr32_delay_cycles() should be an integer.");
- + emit_insn (gen_delay_cycles (op0));
- + return 0;
- +
- + }
- +
- + }
- +
- + for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
- + if (d->code == fcode)
- + return avr32_expand_binop_builtin (d->icode, exp, target);
- +
- +
- + /* @@@ Should really do something sensible here. */
- + return NULL_RTX;
- +}
- +
- +
- +/* Handle an "interrupt" or "isr" attribute;
- + arguments as in struct attribute_spec.handler. */
- +static tree
- +avr32_handle_isr_attribute (tree * node, tree name, tree args,
- + int flags, bool * no_add_attrs)
- +{
- + if (DECL_P (*node))
- + {
- + if (TREE_CODE (*node) != FUNCTION_DECL)
- + {
- + warning (OPT_Wattributes,"`%s' attribute only applies to functions",
- + IDENTIFIER_POINTER (name));
- + *no_add_attrs = true;
- + }
- + /* FIXME: the argument if any is checked for type attributes; should it
- + be checked for decl ones? */
- + }
- + else
- + {
- + if (TREE_CODE (*node) == FUNCTION_TYPE
- + || TREE_CODE (*node) == METHOD_TYPE)
- + {
- + if (avr32_isr_value (args) == AVR32_FT_UNKNOWN)
- + {
- + warning (OPT_Wattributes,"`%s' attribute ignored", IDENTIFIER_POINTER (name));
- + *no_add_attrs = true;
- + }
- + }
- + else if (TREE_CODE (*node) == POINTER_TYPE
- + && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
- + || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
- + && avr32_isr_value (args) != AVR32_FT_UNKNOWN)
- + {
- + *node = build_variant_type_copy (*node);
- + TREE_TYPE (*node) = build_type_attribute_variant
- + (TREE_TYPE (*node),
- + tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
- + *no_add_attrs = true;
- + }
- + else
- + {
- + /* Possibly pass this attribute on from the type to a decl. */
- + if (flags & ((int) ATTR_FLAG_DECL_NEXT
- + | (int) ATTR_FLAG_FUNCTION_NEXT
- + | (int) ATTR_FLAG_ARRAY_NEXT))
- + {
- + *no_add_attrs = true;
- + return tree_cons (name, args, NULL_TREE);
- + }
- + else
- + {
- + warning (OPT_Wattributes,"`%s' attribute ignored", IDENTIFIER_POINTER (name));
- + }
- + }
- + }
- +
- + return NULL_TREE;
- +}
- +
- +
- +/* Handle an attribute requiring a FUNCTION_DECL;
- + arguments as in struct attribute_spec.handler. */
- +static tree
- +avr32_handle_fndecl_attribute (tree * node, tree name,
- + tree args,
- + int flags ATTRIBUTE_UNUSED,
- + bool * no_add_attrs)
- +{
- + if (TREE_CODE (*node) != FUNCTION_DECL)
- + {
- + warning (OPT_Wattributes,"%qs attribute only applies to functions",
- + IDENTIFIER_POINTER (name));
- + *no_add_attrs = true;
- + return NULL_TREE;
- + }
- +
- + fndecl_attribute_args = args;
- + if (args == NULL_TREE)
- + return NULL_TREE;
- +
- + tree value = TREE_VALUE (args);
- + if (TREE_CODE (value) != INTEGER_CST)
- + {
- + warning (OPT_Wattributes,
- + "argument of %qs attribute is not an integer constant",
- + IDENTIFIER_POINTER (name));
- + *no_add_attrs = true;
- + }
- +
- + return NULL_TREE;
- +}
- +
- +
- +/* Handle an acall attribute;
- + arguments as in struct attribute_spec.handler. */
- +
- +static tree
- +avr32_handle_acall_attribute (tree * node, tree name,
- + tree args ATTRIBUTE_UNUSED,
- + int flags ATTRIBUTE_UNUSED, bool * no_add_attrs)
- +{
- + if (TREE_CODE (*node) == FUNCTION_TYPE || TREE_CODE (*node) == METHOD_TYPE)
- + {
- + warning (OPT_Wattributes,"`%s' attribute not yet supported...",
- + IDENTIFIER_POINTER (name));
- + *no_add_attrs = true;
- + return NULL_TREE;
- + }
- +
- + warning (OPT_Wattributes,"`%s' attribute only applies to functions",
- + IDENTIFIER_POINTER (name));
- + *no_add_attrs = true;
- + return NULL_TREE;
- +}
- +
- +
- +bool
- +avr32_flashvault_call(tree decl)
- +{
- + tree attributes;
- + tree fv_attribute;
- + tree vector_tree;
- + unsigned int vector;
- +
- + if (decl && TREE_CODE (decl) == FUNCTION_DECL)
- + {
- + attributes = DECL_ATTRIBUTES(decl);
- + fv_attribute = lookup_attribute ("flashvault", attributes);
- + if (fv_attribute != NULL_TREE)
- + {
- + /* Get attribute parameter, for the function vector number. */
- + /*
- + There is probably an easier, standard way to retrieve the
- + attribute parameter which needs to be done here.
- + */
- + vector_tree = TREE_VALUE(fv_attribute);
- + if (vector_tree != NULL_TREE)
- + {
- + vector = (unsigned int)TREE_INT_CST_LOW(TREE_VALUE(vector_tree));
- + fprintf (asm_out_file,
- + "\tmov\tr8, lo(%i)\t# Load vector number for sscall.\n",
- + vector);
- + }
- +
- + fprintf (asm_out_file,
- + "\tsscall\t# Secure system call.\n");
- +
- + return true;
- + }
- + }
- +
- + return false;
- +}
- +
- +
- +static bool has_attribute_p (tree decl, const char *name)
- +{
- + if (decl && TREE_CODE (decl) == FUNCTION_DECL)
- + {
- + return (lookup_attribute (name, DECL_ATTRIBUTES(decl)) != NULL_TREE);
- + }
- + return NULL_TREE;
- +}
- +
- +
- +/* Return 0 if the attributes for two types are incompatible, 1 if they
- + are compatible, and 2 if they are nearly compatible (which causes a
- + warning to be generated). */
- +static int
- +avr32_comp_type_attributes (tree type1, tree type2)
- +{
- + bool acall1, acall2, isr1, isr2, naked1, naked2, fv1, fv2, fvimpl1, fvimpl2;
- +
- + /* Check for mismatch of non-default calling convention. */
- + if (TREE_CODE (type1) != FUNCTION_TYPE)
- + return 1;
- +
- + /* Check for mismatched call attributes. */
- + acall1 = lookup_attribute ("acall", TYPE_ATTRIBUTES (type1)) != NULL;
- + acall2 = lookup_attribute ("acall", TYPE_ATTRIBUTES (type2)) != NULL;
- + naked1 = lookup_attribute ("naked", TYPE_ATTRIBUTES (type1)) != NULL;
- + naked2 = lookup_attribute ("naked", TYPE_ATTRIBUTES (type2)) != NULL;
- + fv1 = lookup_attribute ("flashvault", TYPE_ATTRIBUTES (type1)) != NULL;
- + fv2 = lookup_attribute ("flashvault", TYPE_ATTRIBUTES (type2)) != NULL;
- + fvimpl1 = lookup_attribute ("flashvault_impl", TYPE_ATTRIBUTES (type1)) != NULL;
- + fvimpl2 = lookup_attribute ("flashvault_impl", TYPE_ATTRIBUTES (type2)) != NULL;
- + isr1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
- + if (!isr1)
- + isr1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
- +
- + isr2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
- + if (!isr2)
- + isr2 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
- +
- + if ((acall1 && isr2)
- + || (acall2 && isr1)
- + || (naked1 && isr2)
- + || (naked2 && isr1)
- + || (fv1 && isr2)
- + || (fv2 && isr1)
- + || (fvimpl1 && isr2)
- + || (fvimpl2 && isr1)
- + || (fv1 && fvimpl2)
- + || (fv2 && fvimpl1)
- + )
- + return 0;
- +
- + return 1;
- +}
- +
- +
- +/* Computes the type of the current function. */
- +static unsigned long
- +avr32_compute_func_type (void)
- +{
- + unsigned long type = AVR32_FT_UNKNOWN;
- + tree a;
- + tree attr;
- +
- + if (TREE_CODE (current_function_decl) != FUNCTION_DECL)
- + abort ();
- +
- + /* Decide if the current function is volatile. Such functions never
- + return, and many memory cycles can be saved by not storing register
- + values that will never be needed again. This optimization was added to
- + speed up context switching in a kernel application. */
- + if (optimize > 0
- + && TREE_NOTHROW (current_function_decl)
- + && TREE_THIS_VOLATILE (current_function_decl))
- + type |= AVR32_FT_VOLATILE;
- +
- + if (cfun->static_chain_decl != NULL)
- + type |= AVR32_FT_NESTED;
- +
- + attr = DECL_ATTRIBUTES (current_function_decl);
- +
- + a = lookup_attribute ("isr", attr);
- + if (a == NULL_TREE)
- + a = lookup_attribute ("interrupt", attr);
- +
- + if (a == NULL_TREE)
- + type |= AVR32_FT_NORMAL;
- + else
- + type |= avr32_isr_value (TREE_VALUE (a));
- +
- +
- + a = lookup_attribute ("acall", attr);
- + if (a != NULL_TREE)
- + type |= AVR32_FT_ACALL;
- +
- + a = lookup_attribute ("naked", attr);
- + if (a != NULL_TREE)
- + type |= AVR32_FT_NAKED;
- +
- + a = lookup_attribute ("flashvault", attr);
- + if (a != NULL_TREE)
- + type |= AVR32_FT_FLASHVAULT;
- +
- + a = lookup_attribute ("flashvault_impl", attr);
- + if (a != NULL_TREE)
- + type |= AVR32_FT_FLASHVAULT_IMPL;
- +
- + return type;
- +}
- +
- +
- +/* Returns the type of the current function. */
- +static unsigned long
- +avr32_current_func_type (void)
- +{
- + if (AVR32_FUNC_TYPE (cfun->machine->func_type) == AVR32_FT_UNKNOWN)
- + cfun->machine->func_type = avr32_compute_func_type ();
- +
- + return cfun->machine->func_type;
- +}
- +
- +
- +/*
- +This target hook should return true if we should not pass type solely
- +in registers. The file expr.h defines a definition that is usually appropriate,
- +refer to expr.h for additional documentation.
- +*/
- +bool
- +avr32_must_pass_in_stack (enum machine_mode mode ATTRIBUTE_UNUSED, tree type)
- +{
- + if (type && AGGREGATE_TYPE_P (type)
- + /* If the alignment is less than the size then pass in the struct on
- + the stack. */
- + && ((unsigned int) TYPE_ALIGN_UNIT (type) <
- + (unsigned int) int_size_in_bytes (type))
- + /* If we support unaligned word accesses then structs of size 4 and 8
- + can have any alignment and still be passed in registers. */
- + && !(TARGET_UNALIGNED_WORD
- + && (int_size_in_bytes (type) == 4
- + || int_size_in_bytes (type) == 8))
- + /* Double word structs need only a word alignment. */
- + && !(int_size_in_bytes (type) == 8 && TYPE_ALIGN_UNIT (type) >= 4))
- + return true;
- +
- + if (type && AGGREGATE_TYPE_P (type)
- + /* Structs of size 3,5,6,7 are always passed in registers. */
- + && (int_size_in_bytes (type) == 3
- + || int_size_in_bytes (type) == 5
- + || int_size_in_bytes (type) == 6 || int_size_in_bytes (type) == 7))
- + return true;
- +
- +
- + return (type && TREE_ADDRESSABLE (type));
- +}
- +
- +
- +bool
- +avr32_strict_argument_naming (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED)
- +{
- + return true;
- +}
- +
- +
- +/*
- + This target hook should return true if an argument at the position indicated
- + by cum should be passed by reference. This predicate is queried after target
- + independent reasons for being passed by reference, such as TREE_ADDRESSABLE (type).
- +
- + If the hook returns true, a copy of that argument is made in memory and a
- + pointer to the argument is passed instead of the argument itself. The pointer
- + is passed in whatever way is appropriate for passing a pointer to that type.
- +*/
- +bool
- +avr32_pass_by_reference (CUMULATIVE_ARGS * cum ATTRIBUTE_UNUSED,
- + enum machine_mode mode ATTRIBUTE_UNUSED,
- + tree type, bool named ATTRIBUTE_UNUSED)
- +{
- + return (type && (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST));
- +}
- +
- +
- +static int
- +avr32_arg_partial_bytes (CUMULATIVE_ARGS * pcum ATTRIBUTE_UNUSED,
- + enum machine_mode mode ATTRIBUTE_UNUSED,
- + tree type ATTRIBUTE_UNUSED,
- + bool named ATTRIBUTE_UNUSED)
- +{
- + return 0;
- +}
- +
- +
- +struct gcc_target targetm = TARGET_INITIALIZER;
- +
- +/*
- + Table used to convert from register number in the assembler instructions and
- + the register numbers used in gcc.
- +*/
- +const int avr32_function_arg_reglist[] = {
- + INTERNAL_REGNUM (12),
- + INTERNAL_REGNUM (11),
- + INTERNAL_REGNUM (10),
- + INTERNAL_REGNUM (9),
- + INTERNAL_REGNUM (8)
- +};
- +
- +
- +rtx avr32_compare_op0 = NULL_RTX;
- +rtx avr32_compare_op1 = NULL_RTX;
- +rtx avr32_compare_operator = NULL_RTX;
- +rtx avr32_acc_cache = NULL_RTX;
- +/* type of branch to use */
- +enum avr32_cmp_type avr32_branch_type;
- +
- +
- +/*
- + Returns nonzero if it is allowed to store a value of mode mode in hard
- + register number regno.
- +*/
- +int
- +avr32_hard_regno_mode_ok (int regnr, enum machine_mode mode)
- +{
- + switch (mode)
- + {
- + case DImode: /* long long */
- + case DFmode: /* double */
- + case SCmode: /* __complex__ float */
- + case CSImode: /* __complex__ int */
- + if (regnr < 4)
- + { /* long long int not supported in r12, sp, lr or pc. */
- + return 0;
- + }
- + else
- + {
- + /* long long int has to be referred in even registers. */
- + if (regnr % 2)
- + return 0;
- + else
- + return 1;
- + }
- + case CDImode: /* __complex__ long long */
- + case DCmode: /* __complex__ double */
- + case TImode: /* 16 bytes */
- + if (regnr < 7)
- + return 0;
- + else if (regnr % 2)
- + return 0;
- + else
- + return 1;
- + default:
- + return 1;
- + }
- +}
- +
- +
- +int
- +avr32_rnd_operands (rtx add, rtx shift)
- +{
- + if (GET_CODE (shift) == CONST_INT &&
- + GET_CODE (add) == CONST_INT && INTVAL (shift) > 0)
- + {
- + if ((1 << (INTVAL (shift) - 1)) == INTVAL (add))
- + return TRUE;
- + }
- +
- + return FALSE;
- +}
- +
- +
- +int
- +avr32_const_ok_for_constraint_p (HOST_WIDE_INT value, char c, const char *str)
- +{
- + switch (c)
- + {
- + case 'K':
- + case 'I':
- + {
- + HOST_WIDE_INT min_value = 0, max_value = 0;
- + char size_str[3];
- + int const_size;
- +
- + size_str[0] = str[2];
- + size_str[1] = str[3];
- + size_str[2] = '\0';
- + const_size = atoi (size_str);
- +
- + if (TOUPPER (str[1]) == 'U')
- + {
- + min_value = 0;
- + max_value = (1 << const_size) - 1;
- + }
- + else if (TOUPPER (str[1]) == 'S')
- + {
- + min_value = -(1 << (const_size - 1));
- + max_value = (1 << (const_size - 1)) - 1;
- + }
- +
- + if (c == 'I')
- + {
- + value = -value;
- + }
- +
- + if (value >= min_value && value <= max_value)
- + {
- + return 1;
- + }
- + break;
- + }
- + case 'M':
- + return avr32_mask_upper_bits_operand (GEN_INT (value), VOIDmode);
- + case 'J':
- + return avr32_hi16_immediate_operand (GEN_INT (value), VOIDmode);
- + case 'O':
- + return one_bit_set_operand (GEN_INT (value), VOIDmode);
- + case 'N':
- + return one_bit_cleared_operand (GEN_INT (value), VOIDmode);
- + case 'L':
- + /* The lower 16-bits are set. */
- + return ((value & 0xffff) == 0xffff) ;
- + }
- +
- + return 0;
- +}
- +
- +
- +/* Compute mask of registers which needs saving upon function entry. */
- +static unsigned long
- +avr32_compute_save_reg_mask (int push)
- +{
- + unsigned long func_type;
- + unsigned int save_reg_mask = 0;
- + unsigned int reg;
- +
- + func_type = avr32_current_func_type ();
- +
- + if (IS_INTERRUPT (func_type))
- + {
- + unsigned int max_reg = 12;
- +
- + /* Get the banking scheme for the interrupt */
- + switch (func_type)
- + {
- + case AVR32_FT_ISR_FULL:
- + max_reg = 0;
- + break;
- + case AVR32_FT_ISR_HALF:
- + max_reg = 7;
- + break;
- + case AVR32_FT_ISR_NONE:
- + max_reg = 12;
- + break;
- + }
- +
- + /* Interrupt functions must not corrupt any registers, even call
- + clobbered ones. If this is a leaf function we can just examine the
- + registers used by the RTL, but otherwise we have to assume that
- + whatever function is called might clobber anything, and so we have
- + to save all the call-clobbered registers as well. */
- +
- + /* Need not push the registers r8-r12 for AVR32A architectures, as this
- + is automatially done in hardware. We also do not have any shadow
- + registers. */
- + if (TARGET_UARCH_AVR32A)
- + {
- + max_reg = 7;
- + func_type = AVR32_FT_ISR_NONE;
- + }
- +
- + /* All registers which are used and are not shadowed must be saved. */
- + for (reg = 0; reg <= max_reg; reg++)
- + if (df_regs_ever_live_p (INTERNAL_REGNUM (reg))
- + || (!current_function_is_leaf
- + && call_used_regs[INTERNAL_REGNUM (reg)]))
- + save_reg_mask |= (1 << reg);
- +
- + /* Check LR */
- + if ((df_regs_ever_live_p (LR_REGNUM)
- + || !current_function_is_leaf || frame_pointer_needed)
- + /* Only non-shadowed register models */
- + && (func_type == AVR32_FT_ISR_NONE))
- + save_reg_mask |= (1 << ASM_REGNUM (LR_REGNUM));
- +
- + /* Make sure that the GOT register is pushed. */
- + if (max_reg >= ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM)
- + && crtl->uses_pic_offset_table)
- + save_reg_mask |= (1 << ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM));
- +
- + }
- + else
- + {
- + int use_pushm = optimize_size;
- +
- + /* In the normal case we only need to save those registers which are
- + call saved and which are used by this function. */
- + for (reg = 0; reg <= 7; reg++)
- + if (df_regs_ever_live_p (INTERNAL_REGNUM (reg))
- + && !call_used_regs[INTERNAL_REGNUM (reg)])
- + save_reg_mask |= (1 << reg);
- +
- + /* Make sure that the GOT register is pushed. */
- + if (crtl->uses_pic_offset_table)
- + save_reg_mask |= (1 << ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM));
- +
- +
- + /* If we optimize for size and do not have anonymous arguments: use
- + pushm/popm always. */
- + if (use_pushm)
- + {
- + if ((save_reg_mask & (1 << 0))
- + || (save_reg_mask & (1 << 1))
- + || (save_reg_mask & (1 << 2)) || (save_reg_mask & (1 << 3)))
- + save_reg_mask |= 0xf;
- +
- + if ((save_reg_mask & (1 << 4))
- + || (save_reg_mask & (1 << 5))
- + || (save_reg_mask & (1 << 6)) || (save_reg_mask & (1 << 7)))
- + save_reg_mask |= 0xf0;
- +
- + if ((save_reg_mask & (1 << 8)) || (save_reg_mask & (1 << 9)))
- + save_reg_mask |= 0x300;
- + }
- +
- +
- + /* Check LR */
- + if ((df_regs_ever_live_p (LR_REGNUM)
- + || !current_function_is_leaf
- + || (optimize_size
- + && save_reg_mask
- + && !crtl->calls_eh_return)
- + || frame_pointer_needed)
- + && !IS_FLASHVAULT (func_type))
- + {
- + if (push
- + /* Never pop LR into PC for functions which
- + calls __builtin_eh_return, since we need to
- + fix the SP after the restoring of the registers
- + and before returning. */
- + || crtl->calls_eh_return)
- + {
- + /* Push/Pop LR */
- + save_reg_mask |= (1 << ASM_REGNUM (LR_REGNUM));
- + }
- + else
- + {
- + /* Pop PC */
- + save_reg_mask |= (1 << ASM_REGNUM (PC_REGNUM));
- + }
- + }
- + }
- +
- +
- + /* Save registers so the exception handler can modify them. */
- + if (crtl->calls_eh_return)
- + {
- + unsigned int i;
- +
- + for (i = 0;; i++)
- + {
- + reg = EH_RETURN_DATA_REGNO (i);
- + if (reg == INVALID_REGNUM)
- + break;
- + save_reg_mask |= 1 << ASM_REGNUM (reg);
- + }
- + }
- +
- + return save_reg_mask;
- +}
- +
- +
- +/* Compute total size in bytes of all saved registers. */
- +static int
- +avr32_get_reg_mask_size (int reg_mask)
- +{
- + int reg, size;
- + size = 0;
- +
- + for (reg = 0; reg <= 15; reg++)
- + if (reg_mask & (1 << reg))
- + size += 4;
- +
- + return size;
- +}
- +
- +
- +/* Get a register from one of the registers which are saved onto the stack
- + upon function entry. */
- +static int
- +avr32_get_saved_reg (int save_reg_mask)
- +{
- + unsigned int reg;
- +
- + /* Find the first register which is saved in the saved_reg_mask */
- + for (reg = 0; reg <= 15; reg++)
- + if (save_reg_mask & (1 << reg))
- + return reg;
- +
- + return -1;
- +}
- +
- +
- +/* Return 1 if it is possible to return using a single instruction. */
- +int
- +avr32_use_return_insn (int iscond)
- +{
- + unsigned int func_type = avr32_current_func_type ();
- + unsigned long saved_int_regs;
- +
- + /* Never use a return instruction before reload has run. */
- + if (!reload_completed)
- + return 0;
- +
- + /* Must adjust the stack for vararg functions. */
- + if (crtl->args.info.uses_anonymous_args)
- + return 0;
- +
- + /* If there a stack adjstment. */
- + if (get_frame_size ())
- + return 0;
- +
- + saved_int_regs = avr32_compute_save_reg_mask (TRUE);
- +
- + /* Conditional returns can not be performed in one instruction if we need
- + to restore registers from the stack */
- + if (iscond && saved_int_regs)
- + return 0;
- +
- + /* Conditional return can not be used for interrupt handlers. */
- + if (iscond && IS_INTERRUPT (func_type))
- + return 0;
- +
- + /* For interrupt handlers which needs to pop registers */
- + if (saved_int_regs && IS_INTERRUPT (func_type))
- + return 0;
- +
- +
- + /* If there are saved registers but the LR isn't saved, then we need two
- + instructions for the return. */
- + if (saved_int_regs && !(saved_int_regs & (1 << ASM_REGNUM (LR_REGNUM))))
- + return 0;
- +
- +
- + return 1;
- +}
- +
- +
- +/* Generate some function prologue info in the assembly file. */
- +void
- +avr32_target_asm_function_prologue (FILE * f, HOST_WIDE_INT frame_size)
- +{
- + unsigned long func_type = avr32_current_func_type ();
- +
- + if (IS_NAKED (func_type))
- + fprintf (f,
- + "\t# Function is naked: Prologue and epilogue provided by programmer\n");
- +
- + if (IS_FLASHVAULT (func_type))
- + {
- + fprintf(f,
- + "\t.ident \"flashvault\"\n\t# Function is defined with flashvault attribute.\n");
- + }
- +
- + if (IS_FLASHVAULT_IMPL (func_type))
- + {
- + fprintf(f,
- + "\t.ident \"flashvault\"\n\t# Function is defined with flashvault_impl attribute.\n");
- +
- + /* Save information on flashvault function declaration. */
- + tree fv_attribute = lookup_attribute ("flashvault_impl", DECL_ATTRIBUTES(current_function_decl));
- + if (fv_attribute != NULL_TREE)
- + {
- + tree vector_tree = TREE_VALUE(fv_attribute);
- + if (vector_tree != NULL_TREE)
- + {
- + unsigned int vector_num;
- + const char * name;
- +
- + vector_num = (unsigned int) TREE_INT_CST_LOW (TREE_VALUE (vector_tree));
- +
- + name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
- +
- + flashvault_decl_list_add (vector_num, name);
- + }
- + }
- + }
- +
- + if (IS_INTERRUPT (func_type))
- + {
- + switch (func_type)
- + {
- + case AVR32_FT_ISR_FULL:
- + fprintf (f,
- + "\t# Interrupt Function: Fully shadowed register file\n");
- + break;
- + case AVR32_FT_ISR_HALF:
- + fprintf (f,
- + "\t# Interrupt Function: Half shadowed register file\n");
- + break;
- + default:
- + case AVR32_FT_ISR_NONE:
- + fprintf (f, "\t# Interrupt Function: No shadowed register file\n");
- + break;
- + }
- + }
- +
- +
- + fprintf (f, "\t# args = %i, frame = %li, pretend = %i\n",
- + crtl->args.size, frame_size,
- + crtl->args.pretend_args_size);
- +
- + fprintf (f, "\t# frame_needed = %i, leaf_function = %i\n",
- + frame_pointer_needed, current_function_is_leaf);
- +
- + fprintf (f, "\t# uses_anonymous_args = %i\n",
- + crtl->args.info.uses_anonymous_args);
- +
- + if (crtl->calls_eh_return)
- + fprintf (f, "\t# Calls __builtin_eh_return.\n");
- +
- +}
- +
- +
- +/* Generate and emit an insn that we will recognize as a pushm or stm.
- + Unfortunately, since this insn does not reflect very well the actual
- + semantics of the operation, we need to annotate the insn for the benefit
- + of DWARF2 frame unwind information. */
- +
- +int avr32_convert_to_reglist16 (int reglist8_vect);
- +
- +static rtx
- +emit_multi_reg_push (int reglist, int usePUSHM)
- +{
- + rtx insn;
- + rtx dwarf;
- + rtx tmp;
- + rtx reg;
- + int i;
- + int nr_regs;
- + int index = 0;
- +
- + if (usePUSHM)
- + {
- + insn = emit_insn (gen_pushm (gen_rtx_CONST_INT (SImode, reglist)));
- + reglist = avr32_convert_to_reglist16 (reglist);
- + }
- + else
- + {
- + insn = emit_insn (gen_stm (stack_pointer_rtx,
- + gen_rtx_CONST_INT (SImode, reglist),
- + gen_rtx_CONST_INT (SImode, 1)));
- + }
- +
- + nr_regs = avr32_get_reg_mask_size (reglist) / 4;
- + dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (nr_regs + 1));
- +
- + for (i = 15; i >= 0; i--)
- + {
- + if (reglist & (1 << i))
- + {
- + reg = gen_rtx_REG (SImode, INTERNAL_REGNUM (i));
- + tmp = gen_rtx_SET (VOIDmode,
- + gen_rtx_MEM (SImode,
- + plus_constant (stack_pointer_rtx,
- + 4 * index)), reg);
- + RTX_FRAME_RELATED_P (tmp) = 1;
- + XVECEXP (dwarf, 0, 1 + index++) = tmp;
- + }
- + }
- +
- + tmp = gen_rtx_SET (SImode,
- + stack_pointer_rtx,
- + gen_rtx_PLUS (SImode,
- + stack_pointer_rtx,
- + GEN_INT (-4 * nr_regs)));
- + RTX_FRAME_RELATED_P (tmp) = 1;
- + XVECEXP (dwarf, 0, 0) = tmp;
- + REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
- + REG_NOTES (insn));
- + return insn;
- +}
- +
- +rtx
- +avr32_gen_load_multiple (rtx * regs, int count, rtx from,
- + int write_back, int in_struct_p, int scalar_p)
- +{
- +
- + rtx result;
- + int i = 0, j;
- +
- + result =
- + gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count + (write_back ? 1 : 0)));
- +
- + if (write_back)
- + {
- + XVECEXP (result, 0, 0)
- + = gen_rtx_SET (GET_MODE (from), from,
- + plus_constant (from, count * 4));
- + i = 1;
- + count++;
- + }
- +
- +
- + for (j = 0; i < count; i++, j++)
- + {
- + rtx unspec;
- + rtx mem = gen_rtx_MEM (SImode, plus_constant (from, j * 4));
- + MEM_IN_STRUCT_P (mem) = in_struct_p;
- + MEM_SCALAR_P (mem) = scalar_p;
- + unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, mem), UNSPEC_LDM);
- + XVECEXP (result, 0, i) = gen_rtx_SET (VOIDmode, regs[j], unspec);
- + }
- +
- + return result;
- +}
- +
- +
- +rtx
- +avr32_gen_store_multiple (rtx * regs, int count, rtx to,
- + int in_struct_p, int scalar_p)
- +{
- + rtx result;
- + int i = 0, j;
- +
- + result = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
- +
- + for (j = 0; i < count; i++, j++)
- + {
- + rtx mem = gen_rtx_MEM (SImode, plus_constant (to, j * 4));
- + MEM_IN_STRUCT_P (mem) = in_struct_p;
- + MEM_SCALAR_P (mem) = scalar_p;
- + XVECEXP (result, 0, i)
- + = gen_rtx_SET (VOIDmode, mem,
- + gen_rtx_UNSPEC (VOIDmode,
- + gen_rtvec (1, regs[j]),
- + UNSPEC_STORE_MULTIPLE));
- + }
- +
- + return result;
- +}
- +
- +
- +/* Move a block of memory if it is word aligned or we support unaligned
- + word memory accesses. The size must be maximum 64 bytes. */
- +int
- +avr32_gen_movmemsi (rtx * operands)
- +{
- + HOST_WIDE_INT bytes_to_go;
- + rtx src, dst;
- + rtx st_src, st_dst;
- + int src_offset = 0, dst_offset = 0;
- + int block_size;
- + int dst_in_struct_p, src_in_struct_p;
- + int dst_scalar_p, src_scalar_p;
- + int unaligned;
- +
- + if (GET_CODE (operands[2]) != CONST_INT
- + || GET_CODE (operands[3]) != CONST_INT
- + || INTVAL (operands[2]) > 64
- + || ((INTVAL (operands[3]) & 3) && !TARGET_UNALIGNED_WORD))
- + return 0;
- +
- + unaligned = (INTVAL (operands[3]) & 3) != 0;
- +
- + block_size = 4;
- +
- + st_dst = XEXP (operands[0], 0);
- + st_src = XEXP (operands[1], 0);
- +
- + dst_in_struct_p = MEM_IN_STRUCT_P (operands[0]);
- + dst_scalar_p = MEM_SCALAR_P (operands[0]);
- + src_in_struct_p = MEM_IN_STRUCT_P (operands[1]);
- + src_scalar_p = MEM_SCALAR_P (operands[1]);
- +
- + dst = copy_to_mode_reg (SImode, st_dst);
- + src = copy_to_mode_reg (SImode, st_src);
- +
- + bytes_to_go = INTVAL (operands[2]);
- +
- + while (bytes_to_go)
- + {
- + enum machine_mode move_mode;
- + /* (Seems to be a problem with reloads for the movti pattern so this is
- + disabled until that problem is resolved)
- + UPDATE: Problem seems to be solved now.... */
- + if (bytes_to_go >= GET_MODE_SIZE (TImode) && !unaligned
- + /* Do not emit ldm/stm for UC3 as ld.d/st.d is more optimal. */
- + && !TARGET_ARCH_UC)
- + move_mode = TImode;
- + else if ((bytes_to_go >= GET_MODE_SIZE (DImode)) && !unaligned)
- + move_mode = DImode;
- + else if (bytes_to_go >= GET_MODE_SIZE (SImode))
- + move_mode = SImode;
- + else
- + move_mode = QImode;
- +
- + {
- + rtx src_mem;
- + rtx dst_mem = gen_rtx_MEM (move_mode,
- + gen_rtx_PLUS (SImode, dst,
- + GEN_INT (dst_offset)));
- + dst_offset += GET_MODE_SIZE (move_mode);
- + if ( 0 /* This causes an error in GCC. Think there is
- + something wrong in the gcse pass which causes REQ_EQUIV notes
- + to be wrong so disabling it for now. */
- + && move_mode == TImode
- + && INTVAL (operands[2]) > GET_MODE_SIZE (TImode) )
- + {
- + src_mem = gen_rtx_MEM (move_mode,
- + gen_rtx_POST_INC (SImode, src));
- + }
- + else
- + {
- + src_mem = gen_rtx_MEM (move_mode,
- + gen_rtx_PLUS (SImode, src,
- + GEN_INT (src_offset)));
- + src_offset += GET_MODE_SIZE (move_mode);
- + }
- +
- + bytes_to_go -= GET_MODE_SIZE (move_mode);
- +
- + MEM_IN_STRUCT_P (dst_mem) = dst_in_struct_p;
- + MEM_SCALAR_P (dst_mem) = dst_scalar_p;
- +
- + MEM_IN_STRUCT_P (src_mem) = src_in_struct_p;
- + MEM_SCALAR_P (src_mem) = src_scalar_p;
- + emit_move_insn (dst_mem, src_mem);
- +
- + }
- + }
- +
- + return 1;
- +}
- +
- +
- +/* Expand the prologue instruction. */
- +void
- +avr32_expand_prologue (void)
- +{
- + rtx insn, dwarf;
- + unsigned long saved_reg_mask;
- + int reglist8 = 0;
- +
- + /* Naked functions do not have a prologue. */
- + if (IS_NAKED (avr32_current_func_type ()))
- + return;
- +
- + saved_reg_mask = avr32_compute_save_reg_mask (TRUE);
- +
- + if (saved_reg_mask)
- + {
- + /* Must push used registers. */
- +
- + /* Should we use POPM or LDM? */
- + int usePUSHM = TRUE;
- + reglist8 = 0;
- + if (((saved_reg_mask & (1 << 0)) ||
- + (saved_reg_mask & (1 << 1)) ||
- + (saved_reg_mask & (1 << 2)) || (saved_reg_mask & (1 << 3))))
- + {
- + /* One of R0-R3 should at least be pushed. */
- + if (((saved_reg_mask & (1 << 0)) &&
- + (saved_reg_mask & (1 << 1)) &&
- + (saved_reg_mask & (1 << 2)) && (saved_reg_mask & (1 << 3))))
- + {
- + /* All should be pushed. */
- + reglist8 |= 0x01;
- + }
- + else
- + {
- + usePUSHM = FALSE;
- + }
- + }
- +
- + if (((saved_reg_mask & (1 << 4)) ||
- + (saved_reg_mask & (1 << 5)) ||
- + (saved_reg_mask & (1 << 6)) || (saved_reg_mask & (1 << 7))))
- + {
- + /* One of R4-R7 should at least be pushed */
- + if (((saved_reg_mask & (1 << 4)) &&
- + (saved_reg_mask & (1 << 5)) &&
- + (saved_reg_mask & (1 << 6)) && (saved_reg_mask & (1 << 7))))
- + {
- + if (usePUSHM)
- + /* All should be pushed */
- + reglist8 |= 0x02;
- + }
- + else
- + {
- + usePUSHM = FALSE;
- + }
- + }
- +
- + if (((saved_reg_mask & (1 << 8)) || (saved_reg_mask & (1 << 9))))
- + {
- + /* One of R8-R9 should at least be pushed. */
- + if (((saved_reg_mask & (1 << 8)) && (saved_reg_mask & (1 << 9))))
- + {
- + if (usePUSHM)
- + /* All should be pushed. */
- + reglist8 |= 0x04;
- + }
- + else
- + {
- + usePUSHM = FALSE;
- + }
- + }
- +
- + if (saved_reg_mask & (1 << 10))
- + reglist8 |= 0x08;
- +
- + if (saved_reg_mask & (1 << 11))
- + reglist8 |= 0x10;
- +
- + if (saved_reg_mask & (1 << 12))
- + reglist8 |= 0x20;
- +
- + if ((saved_reg_mask & (1 << ASM_REGNUM (LR_REGNUM)))
- + && !IS_FLASHVAULT (avr32_current_func_type ()))
- + {
- + /* Push LR */
- + reglist8 |= 0x40;
- + }
- +
- + if (usePUSHM)
- + {
- + insn = emit_multi_reg_push (reglist8, TRUE);
- + }
- + else
- + {
- + insn = emit_multi_reg_push (saved_reg_mask, FALSE);
- + }
- + RTX_FRAME_RELATED_P (insn) = 1;
- +
- + /* Prevent this instruction from being scheduled after any other
- + instructions. */
- + emit_insn (gen_blockage ());
- + }
- +
- + /* Set frame pointer */
- + if (frame_pointer_needed)
- + {
- + insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
- + RTX_FRAME_RELATED_P (insn) = 1;
- + }
- +
- + if (get_frame_size () > 0)
- + {
- + if (avr32_const_ok_for_constraint_p (get_frame_size (), 'K', "Ks21"))
- + {
- + insn = emit_insn (gen_rtx_SET (SImode,
- + stack_pointer_rtx,
- + gen_rtx_PLUS (SImode,
- + stack_pointer_rtx,
- + gen_rtx_CONST_INT
- + (SImode,
- + -get_frame_size
- + ()))));
- + RTX_FRAME_RELATED_P (insn) = 1;
- + }
- + else
- + {
- + /* Immediate is larger than k21 We must either check if we can use
- + one of the pushed reegisters as temporary storage or we must
- + make us a temp register by pushing a register to the stack. */
- + rtx temp_reg, const_pool_entry, insn;
- + if (saved_reg_mask)
- + {
- + temp_reg =
- + gen_rtx_REG (SImode,
- + INTERNAL_REGNUM (avr32_get_saved_reg
- + (saved_reg_mask)));
- + }
- + else
- + {
- + temp_reg = gen_rtx_REG (SImode, INTERNAL_REGNUM (7));
- + emit_move_insn (gen_rtx_MEM
- + (SImode,
- + gen_rtx_PRE_DEC (SImode, stack_pointer_rtx)),
- + temp_reg);
- + }
- +
- + const_pool_entry =
- + force_const_mem (SImode,
- + gen_rtx_CONST_INT (SImode, get_frame_size ()));
- + emit_move_insn (temp_reg, const_pool_entry);
- +
- + insn = emit_insn (gen_rtx_SET (SImode,
- + stack_pointer_rtx,
- + gen_rtx_MINUS (SImode,
- + stack_pointer_rtx,
- + temp_reg)));
- +
- + dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
- + gen_rtx_PLUS (SImode, stack_pointer_rtx,
- + GEN_INT (-get_frame_size ())));
- + REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
- + dwarf, REG_NOTES (insn));
- + RTX_FRAME_RELATED_P (insn) = 1;
- +
- + if (!saved_reg_mask)
- + {
- + insn =
- + emit_move_insn (temp_reg,
- + gen_rtx_MEM (SImode,
- + gen_rtx_POST_INC (SImode,
- + gen_rtx_REG
- + (SImode,
- + 13))));
- + }
- +
- + /* Mark the temp register as dead */
- + REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_DEAD, temp_reg,
- + REG_NOTES (insn));
- +
- +
- + }
- +
- + /* Prevent the the stack adjustment to be scheduled after any
- + instructions using the frame pointer. */
- + emit_insn (gen_blockage ());
- + }
- +
- + /* Load GOT */
- + if (flag_pic)
- + {
- + avr32_load_pic_register ();
- +
- + /* gcc does not know that load or call instructions might use the pic
- + register so it might schedule these instructions before the loading
- + of the pic register. To avoid this emit a barrier for now. TODO!
- + Find out a better way to let gcc know which instructions might use
- + the pic register. */
- + emit_insn (gen_blockage ());
- + }
- + return;
- +}
- +
- +
- +void
- +avr32_set_return_address (rtx source, rtx scratch)
- +{
- + rtx addr;
- + unsigned long saved_regs;
- +
- + saved_regs = avr32_compute_save_reg_mask (TRUE);
- +
- + if (!(saved_regs & (1 << ASM_REGNUM (LR_REGNUM))))
- + emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
- + else
- + {
- + if (frame_pointer_needed)
- + addr = gen_rtx_REG (Pmode, FRAME_POINTER_REGNUM);
- + else
- + if (avr32_const_ok_for_constraint_p (get_frame_size (), 'K', "Ks16"))
- + {
- + addr = plus_constant (stack_pointer_rtx, get_frame_size ());
- + }
- + else
- + {
- + emit_insn (gen_movsi (scratch, GEN_INT (get_frame_size ())));
- + addr = scratch;
- + }
- + emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
- + }
- +}
- +
- +
- +/* Return the length of INSN. LENGTH is the initial length computed by
- + attributes in the machine-description file. */
- +int
- +avr32_adjust_insn_length (rtx insn ATTRIBUTE_UNUSED,
- + int length ATTRIBUTE_UNUSED)
- +{
- + return length;
- +}
- +
- +
- +void
- +avr32_output_return_instruction (int single_ret_inst ATTRIBUTE_UNUSED,
- + int iscond ATTRIBUTE_UNUSED,
- + rtx cond ATTRIBUTE_UNUSED, rtx r12_imm)
- +{
- +
- + unsigned long saved_reg_mask;
- + int insert_ret = TRUE;
- + int reglist8 = 0;
- + int stack_adjustment = get_frame_size ();
- + unsigned int func_type = avr32_current_func_type ();
- + FILE *f = asm_out_file;
- +
- + /* Naked functions does not have an epilogue */
- + if (IS_NAKED (func_type))
- + return;
- +
- + saved_reg_mask = avr32_compute_save_reg_mask (FALSE);
- +
- + /* Reset frame pointer */
- + if (stack_adjustment > 0)
- + {
- + if (avr32_const_ok_for_constraint_p (stack_adjustment, 'I', "Is21"))
- + {
- + fprintf (f, "\tsub\tsp, %i # Reset Frame Pointer\n",
- + -stack_adjustment);
- + }
- + else
- + {
- + /* TODO! Is it safe to use r8 as scratch?? */
- + fprintf (f, "\tmov\tr8, lo(%i) # Reset Frame Pointer\n",
- + -stack_adjustment);
- + fprintf (f, "\torh\tr8, hi(%i) # Reset Frame Pointer\n",
- + -stack_adjustment);
- + fprintf (f, "\tadd\tsp, r8 # Reset Frame Pointer\n");
- + }
- + }
- +
- + if (saved_reg_mask)
- + {
- + /* Must pop used registers */
- +
- + /* Should we use POPM or LDM? */
- + int usePOPM = TRUE;
- + if (((saved_reg_mask & (1 << 0)) ||
- + (saved_reg_mask & (1 << 1)) ||
- + (saved_reg_mask & (1 << 2)) || (saved_reg_mask & (1 << 3))))
- + {
- + /* One of R0-R3 should at least be popped */
- + if (((saved_reg_mask & (1 << 0)) &&
- + (saved_reg_mask & (1 << 1)) &&
- + (saved_reg_mask & (1 << 2)) && (saved_reg_mask & (1 << 3))))
- + {
- + /* All should be popped */
- + reglist8 |= 0x01;
- + }
- + else
- + {
- + usePOPM = FALSE;
- + }
- + }
- +
- + if (((saved_reg_mask & (1 << 4)) ||
- + (saved_reg_mask & (1 << 5)) ||
- + (saved_reg_mask & (1 << 6)) || (saved_reg_mask & (1 << 7))))
- + {
- + /* One of R0-R3 should at least be popped */
- + if (((saved_reg_mask & (1 << 4)) &&
- + (saved_reg_mask & (1 << 5)) &&
- + (saved_reg_mask & (1 << 6)) && (saved_reg_mask & (1 << 7))))
- + {
- + if (usePOPM)
- + /* All should be popped */
- + reglist8 |= 0x02;
- + }
- + else
- + {
- + usePOPM = FALSE;
- + }
- + }
- +
- + if (((saved_reg_mask & (1 << 8)) || (saved_reg_mask & (1 << 9))))
- + {
- + /* One of R8-R9 should at least be pushed */
- + if (((saved_reg_mask & (1 << 8)) && (saved_reg_mask & (1 << 9))))
- + {
- + if (usePOPM)
- + /* All should be pushed */
- + reglist8 |= 0x04;
- + }
- + else
- + {
- + usePOPM = FALSE;
- + }
- + }
- +
- + if (saved_reg_mask & (1 << 10))
- + reglist8 |= 0x08;
- +
- + if (saved_reg_mask & (1 << 11))
- + reglist8 |= 0x10;
- +
- + if (saved_reg_mask & (1 << 12))
- + reglist8 |= 0x20;
- +
- + if (saved_reg_mask & (1 << ASM_REGNUM (LR_REGNUM)))
- + /* Pop LR */
- + reglist8 |= 0x40;
- +
- + if ((saved_reg_mask & (1 << ASM_REGNUM (PC_REGNUM)))
- + && !IS_FLASHVAULT_IMPL (func_type))
- + /* Pop LR into PC. */
- + reglist8 |= 0x80;
- +
- + if (usePOPM)
- + {
- + char reglist[64]; /* 64 bytes should be enough... */
- + avr32_make_reglist8 (reglist8, (char *) reglist);
- +
- + if (reglist8 & 0x80)
- + /* This instruction is also a return */
- + insert_ret = FALSE;
- +
- + if (r12_imm && !insert_ret)
- + fprintf (f, "\tpopm\t%s, r12=%li\n", reglist, INTVAL (r12_imm));
- + else
- + fprintf (f, "\tpopm\t%s\n", reglist);
- +
- + }
- + else
- + {
- + char reglist[64]; /* 64 bytes should be enough... */
- + avr32_make_reglist16 (saved_reg_mask, (char *) reglist);
- + if (saved_reg_mask & (1 << ASM_REGNUM (PC_REGNUM)))
- + /* This instruction is also a return */
- + insert_ret = FALSE;
- +
- + if (r12_imm && !insert_ret)
- + fprintf (f, "\tldm\tsp++, %s, r12=%li\n", reglist,
- + INTVAL (r12_imm));
- + else
- + fprintf (f, "\tldm\tsp++, %s\n", reglist);
- +
- + }
- +
- + }
- +
- + /* Stack adjustment for exception handler. */
- + if (crtl->calls_eh_return)
- + fprintf (f, "\tadd\tsp, r%d\n", ASM_REGNUM (EH_RETURN_STACKADJ_REGNO));
- +
- +
- + if (IS_INTERRUPT (func_type))
- + {
- + fprintf (f, "\trete\n");
- + }
- + else if (IS_FLASHVAULT (func_type))
- + {
- + /* Normal return from Secure System call, increment SS_RAR before
- + returning. Use R8 as scratch. */
- + fprintf (f,
- + "\t# Normal return from sscall.\n"
- + "\t# Increment SS_RAR before returning.\n"
- + "\t# Use R8 as scratch.\n"
- + "\tmfsr\tr8, 440\n"
- + "\tsub\tr8, -2\n"
- + "\tmtsr\t440, r8\n"
- + "\tretss\n");
- + }
- + else if (insert_ret)
- + {
- + if (r12_imm)
- + fprintf (f, "\tretal\t%li\n", INTVAL (r12_imm));
- + else
- + fprintf (f, "\tretal\tr12\n");
- + }
- +}
- +
- +void
- +avr32_make_reglist16 (int reglist16_vect, char *reglist16_string)
- +{
- + int i;
- + bool first_reg = true;
- + /* Make sure reglist16_string is empty. */
- + reglist16_string[0] = '\0';
- +
- + for (i = 0; i < 16; ++i)
- + {
- + if (reglist16_vect & (1 << i))
- + {
- + first_reg == true ? first_reg = false : strcat(reglist16_string,", ");
- + strcat (reglist16_string, reg_names[INTERNAL_REGNUM (i)]);
- + }
- + }
- +}
- +
- +int
- +avr32_convert_to_reglist16 (int reglist8_vect)
- +{
- + int reglist16_vect = 0;
- + if (reglist8_vect & 0x1)
- + reglist16_vect |= 0xF;
- + if (reglist8_vect & 0x2)
- + reglist16_vect |= 0xF0;
- + if (reglist8_vect & 0x4)
- + reglist16_vect |= 0x300;
- + if (reglist8_vect & 0x8)
- + reglist16_vect |= 0x400;
- + if (reglist8_vect & 0x10)
- + reglist16_vect |= 0x800;
- + if (reglist8_vect & 0x20)
- + reglist16_vect |= 0x1000;
- + if (reglist8_vect & 0x40)
- + reglist16_vect |= 0x4000;
- + if (reglist8_vect & 0x80)
- + reglist16_vect |= 0x8000;
- +
- + return reglist16_vect;
- +}
- +
- +void
- +avr32_make_reglist8 (int reglist8_vect, char *reglist8_string)
- +{
- + /* Make sure reglist8_string is empty. */
- + reglist8_string[0] = '\0';
- +
- + if (reglist8_vect & 0x1)
- + strcpy (reglist8_string, "r0-r3");
- + if (reglist8_vect & 0x2)
- + strlen (reglist8_string) ? strcat (reglist8_string, ", r4-r7") :
- + strcpy (reglist8_string, "r4-r7");
- + if (reglist8_vect & 0x4)
- + strlen (reglist8_string) ? strcat (reglist8_string, ", r8-r9") :
- + strcpy (reglist8_string, "r8-r9");
- + if (reglist8_vect & 0x8)
- + strlen (reglist8_string) ? strcat (reglist8_string, ", r10") :
- + strcpy (reglist8_string, "r10");
- + if (reglist8_vect & 0x10)
- + strlen (reglist8_string) ? strcat (reglist8_string, ", r11") :
- + strcpy (reglist8_string, "r11");
- + if (reglist8_vect & 0x20)
- + strlen (reglist8_string) ? strcat (reglist8_string, ", r12") :
- + strcpy (reglist8_string, "r12");
- + if (reglist8_vect & 0x40)
- + strlen (reglist8_string) ? strcat (reglist8_string, ", lr") :
- + strcpy (reglist8_string, "lr");
- + if (reglist8_vect & 0x80)
- + strlen (reglist8_string) ? strcat (reglist8_string, ", pc") :
- + strcpy (reglist8_string, "pc");
- +}
- +
- +
- +int
- +avr32_eh_return_data_regno (int n)
- +{
- + if (n >= 0 && n <= 3)
- + return 8 + n;
- + else
- + return INVALID_REGNUM;
- +}
- +
- +
- +/* Compute the distance from register FROM to register TO.
- + These can be the arg pointer, the frame pointer or
- + the stack pointer.
- + Typical stack layout looks like this:
- +
- + old stack pointer -> | |
- + ----
- + | | \
- + | | saved arguments for
- + | | vararg functions
- + arg_pointer -> | | /
- + --
- + | | \
- + | | call saved
- + | | registers
- + | | /
- + frame ptr -> --
- + | | \
- + | | local
- + | | variables
- + stack ptr --> | | /
- + --
- + | | \
- + | | outgoing
- + | | arguments
- + | | /
- + --
- +
- + For a given funciton some or all of these stack compomnents
- + may not be needed, giving rise to the possibility of
- + eliminating some of the registers.
- +
- + The values returned by this function must reflect the behaviour
- + of avr32_expand_prologue() and avr32_compute_save_reg_mask().
- +
- + The sign of the number returned reflects the direction of stack
- + growth, so the values are positive for all eliminations except
- + from the soft frame pointer to the hard frame pointer. */
- +int
- +avr32_initial_elimination_offset (int from, int to)
- +{
- + int i;
- + int call_saved_regs = 0;
- + unsigned long saved_reg_mask;
- + unsigned int local_vars = get_frame_size ();
- +
- + saved_reg_mask = avr32_compute_save_reg_mask (TRUE);
- +
- + for (i = 0; i < 16; ++i)
- + {
- + if (saved_reg_mask & (1 << i))
- + call_saved_regs += 4;
- + }
- +
- + switch (from)
- + {
- + case ARG_POINTER_REGNUM:
- + switch (to)
- + {
- + case STACK_POINTER_REGNUM:
- + return call_saved_regs + local_vars;
- + case FRAME_POINTER_REGNUM:
- + return call_saved_regs;
- + default:
- + abort ();
- + }
- + case FRAME_POINTER_REGNUM:
- + switch (to)
- + {
- + case STACK_POINTER_REGNUM:
- + return local_vars;
- + default:
- + abort ();
- + }
- + default:
- + abort ();
- + }
- +}
- +
- +
- +/*
- + Returns a rtx used when passing the next argument to a function.
- + avr32_init_cumulative_args() and avr32_function_arg_advance() sets which
- + register to use.
- +*/
- +rtx
- +avr32_function_arg (CUMULATIVE_ARGS * cum, enum machine_mode mode,
- + tree type, int named)
- +{
- + int index = -1;
- + //unsigned long func_type = avr32_current_func_type ();
- + //int last_reg_index = (IS_FLASHVAULT(func_type) || IS_FLASHVAULT_IMPL(func_type) || cum->flashvault_func ? LAST_CUM_REG_INDEX - 1 : LAST_CUM_REG_INDEX);
- + int last_reg_index = (cum->flashvault_func ? LAST_CUM_REG_INDEX - 1 : LAST_CUM_REG_INDEX);
- +
- + HOST_WIDE_INT arg_size, arg_rsize;
- + if (type)
- + {
- + arg_size = int_size_in_bytes (type);
- + }
- + else
- + {
- + arg_size = GET_MODE_SIZE (mode);
- + }
- + arg_rsize = PUSH_ROUNDING (arg_size);
- +
- + /*
- + The last time this macro is called, it is called with mode == VOIDmode,
- + and its result is passed to the call or call_value pattern as operands 2
- + and 3 respectively. */
- + if (mode == VOIDmode)
- + {
- + return gen_rtx_CONST_INT (SImode, 22); /* ToDo: fixme. */
- + }
- +
- + if ((*targetm.calls.must_pass_in_stack) (mode, type) || !named)
- + {
- + return NULL_RTX;
- + }
- +
- + if (arg_rsize == 8)
- + {
- + /* use r11:r10 or r9:r8. */
- + if (!(GET_USED_INDEX (cum, 1) || GET_USED_INDEX (cum, 2)))
- + index = 1;
- + else if ((last_reg_index == 4) &&
- + !(GET_USED_INDEX (cum, 3) || GET_USED_INDEX (cum, 4)))
- + index = 3;
- + else
- + index = -1;
- + }
- + else if (arg_rsize == 4)
- + { /* Use first available register */
- + index = 0;
- + while (index <= last_reg_index && GET_USED_INDEX (cum, index))
- + index++;
- + if (index > last_reg_index)
- + index = -1;
- + }
- +
- + SET_REG_INDEX (cum, index);
- +
- + if (GET_REG_INDEX (cum) >= 0)
- + return gen_rtx_REG (mode, avr32_function_arg_reglist[GET_REG_INDEX (cum)]);
- +
- + return NULL_RTX;
- +}
- +
- +
- +/* Set the register used for passing the first argument to a function. */
- +void
- +avr32_init_cumulative_args (CUMULATIVE_ARGS * cum,
- + tree fntype ATTRIBUTE_UNUSED,
- + rtx libname ATTRIBUTE_UNUSED,
- + tree fndecl)
- +{
- + /* Set all registers as unused. */
- + SET_INDEXES_UNUSED (cum);
- +
- + /* Reset uses_anonymous_args */
- + cum->uses_anonymous_args = 0;
- +
- + /* Reset size of stack pushed arguments */
- + cum->stack_pushed_args_size = 0;
- +
- + cum->flashvault_func = (fndecl && (has_attribute_p (fndecl,"flashvault") || has_attribute_p (fndecl,"flashvault_impl")));
- +}
- +
- +
- +/*
- + Set register used for passing the next argument to a function. Only the
- + Scratch Registers are used.
- +
- + number name
- + 15 r15 PC
- + 14 r14 LR
- + 13 r13 _SP_________
- + FIRST_CUM_REG 12 r12 _||_
- + 10 r11 ||
- + 11 r10 _||_ Scratch Registers
- + 8 r9 ||
- + LAST_SCRATCH_REG 9 r8 _\/_________
- + 6 r7 /\
- + 7 r6 ||
- + 4 r5 ||
- + 5 r4 ||
- + 2 r3 ||
- + 3 r2 ||
- + 0 r1 ||
- + 1 r0 _||_________
- +
- +*/
- +void
- +avr32_function_arg_advance (CUMULATIVE_ARGS * cum, enum machine_mode mode,
- + tree type, int named ATTRIBUTE_UNUSED)
- +{
- + HOST_WIDE_INT arg_size, arg_rsize;
- +
- + if (type)
- + {
- + arg_size = int_size_in_bytes (type);
- + }
- + else
- + {
- + arg_size = GET_MODE_SIZE (mode);
- + }
- + arg_rsize = PUSH_ROUNDING (arg_size);
- +
- + /* If the argument had to be passed in stack, no register is used. */
- + if ((*targetm.calls.must_pass_in_stack) (mode, type))
- + {
- + cum->stack_pushed_args_size += PUSH_ROUNDING (int_size_in_bytes (type));
- + return;
- + }
- +
- + /* Mark the used registers as "used". */
- + if (GET_REG_INDEX (cum) >= 0)
- + {
- + SET_USED_INDEX (cum, GET_REG_INDEX (cum));
- + if (arg_rsize == 8)
- + {
- + SET_USED_INDEX (cum, (GET_REG_INDEX (cum) + 1));
- + }
- + }
- + else
- + {
- + /* Had to use stack */
- + cum->stack_pushed_args_size += arg_rsize;
- + }
- +}
- +
- +
- +/*
- + Defines witch direction to go to find the next register to use if the
- + argument is larger then one register or for arguments shorter than an
- + int which is not promoted, such as the last part of structures with
- + size not a multiple of 4. */
- +enum direction
- +avr32_function_arg_padding (enum machine_mode mode ATTRIBUTE_UNUSED,
- + tree type)
- +{
- + /* Pad upward for all aggregates except byte and halfword sized aggregates
- + which can be passed in registers. */
- + if (type
- + && AGGREGATE_TYPE_P (type)
- + && (int_size_in_bytes (type) != 1)
- + && !((int_size_in_bytes (type) == 2)
- + && TYPE_ALIGN_UNIT (type) >= 2)
- + && (int_size_in_bytes (type) & 0x3))
- + {
- + return upward;
- + }
- +
- + return downward;
- +}
- +
- +
- +/* Return a rtx used for the return value from a function call. */
- +rtx
- +avr32_function_value (tree type, tree func, bool outgoing ATTRIBUTE_UNUSED)
- +{
- + if (avr32_return_in_memory (type, func))
- + return NULL_RTX;
- +
- + if (int_size_in_bytes (type) <= 4)
- + {
- + enum machine_mode mode = TYPE_MODE (type);
- + int unsignedp = 0;
- + PROMOTE_FUNCTION_MODE (mode, unsignedp, type);
- + return gen_rtx_REG (mode, RET_REGISTER);
- + }
- + else if (int_size_in_bytes (type) <= 8)
- + return gen_rtx_REG (TYPE_MODE (type), INTERNAL_REGNUM (11));
- +
- + return NULL_RTX;
- +}
- +
- +
- +/* Return a rtx used for the return value from a library function call. */
- +rtx
- +avr32_libcall_value (enum machine_mode mode)
- +{
- +
- + if (GET_MODE_SIZE (mode) <= 4)
- + return gen_rtx_REG (mode, RET_REGISTER);
- + else if (GET_MODE_SIZE (mode) <= 8)
- + return gen_rtx_REG (mode, INTERNAL_REGNUM (11));
- + else
- + return NULL_RTX;
- +}
- +
- +
- +/* Return TRUE if X references a SYMBOL_REF. */
- +int
- +symbol_mentioned_p (rtx x)
- +{
- + const char *fmt;
- + int i;
- +
- + if (GET_CODE (x) == SYMBOL_REF)
- + return 1;
- +
- + fmt = GET_RTX_FORMAT (GET_CODE (x));
- +
- + for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
- + {
- + if (fmt[i] == 'E')
- + {
- + int j;
- +
- + for (j = XVECLEN (x, i) - 1; j >= 0; j--)
- + if (symbol_mentioned_p (XVECEXP (x, i, j)))
- + return 1;
- + }
- + else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
- + return 1;
- + }
- +
- + return 0;
- +}
- +
- +
- +/* Return TRUE if X references a LABEL_REF. */
- +int
- +label_mentioned_p (rtx x)
- +{
- + const char *fmt;
- + int i;
- +
- + if (GET_CODE (x) == LABEL_REF)
- + return 1;
- +
- + fmt = GET_RTX_FORMAT (GET_CODE (x));
- + for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
- + {
- + if (fmt[i] == 'E')
- + {
- + int j;
- +
- + for (j = XVECLEN (x, i) - 1; j >= 0; j--)
- + if (label_mentioned_p (XVECEXP (x, i, j)))
- + return 1;
- + }
- + else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
- + return 1;
- + }
- +
- + return 0;
- +}
- +
- +
- +/* Return TRUE if X contains a MEM expression. */
- +int
- +mem_mentioned_p (rtx x)
- +{
- + const char *fmt;
- + int i;
- +
- + if (MEM_P (x))
- + return 1;
- +
- + fmt = GET_RTX_FORMAT (GET_CODE (x));
- + for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
- + {
- + if (fmt[i] == 'E')
- + {
- + int j;
- +
- + for (j = XVECLEN (x, i) - 1; j >= 0; j--)
- + if (mem_mentioned_p (XVECEXP (x, i, j)))
- + return 1;
- + }
- + else if (fmt[i] == 'e' && mem_mentioned_p (XEXP (x, i)))
- + return 1;
- + }
- +
- + return 0;
- +}
- +
- +
- +int
- +avr32_legitimate_pic_operand_p (rtx x)
- +{
- +
- + /* We can't have const, this must be broken down to a symbol. */
- + if (GET_CODE (x) == CONST)
- + return FALSE;
- +
- + /* Can't access symbols or labels via the constant pool either */
- + if ((GET_CODE (x) == SYMBOL_REF
- + && CONSTANT_POOL_ADDRESS_P (x)
- + && (symbol_mentioned_p (get_pool_constant (x))
- + || label_mentioned_p (get_pool_constant (x)))))
- + return FALSE;
- +
- + return TRUE;
- +}
- +
- +
- +rtx
- +legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
- + rtx reg)
- +{
- +
- + if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF)
- + {
- + int subregs = 0;
- +
- + if (reg == 0)
- + {
- + if (!can_create_pseudo_p ())
- + abort ();
- + else
- + reg = gen_reg_rtx (Pmode);
- +
- + subregs = 1;
- + }
- +
- + emit_move_insn (reg, orig);
- +
- + /* Only set current function as using pic offset table if flag_pic is
- + set. This is because this function is also used if
- + TARGET_HAS_ASM_ADDR_PSEUDOS is set. */
- + if (flag_pic)
- + crtl->uses_pic_offset_table = 1;
- +
- + /* Put a REG_EQUAL note on this insn, so that it can be optimized by
- + loop. */
- + return reg;
- + }
- + else if (GET_CODE (orig) == CONST)
- + {
- + rtx base, offset;
- +
- + if (flag_pic
- + && GET_CODE (XEXP (orig, 0)) == PLUS
- + && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
- + return orig;
- +
- + if (reg == 0)
- + {
- + if (!can_create_pseudo_p ())
- + abort ();
- + else
- + reg = gen_reg_rtx (Pmode);
- + }
- +
- + if (GET_CODE (XEXP (orig, 0)) == PLUS)
- + {
- + base =
- + legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
- + offset =
- + legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
- + base == reg ? 0 : reg);
- + }
- + else
- + abort ();
- +
- + if (GET_CODE (offset) == CONST_INT)
- + {
- + /* The base register doesn't really matter, we only want to test
- + the index for the appropriate mode. */
- + if (!avr32_const_ok_for_constraint_p (INTVAL (offset), 'I', "Is21"))
- + {
- + if (can_create_pseudo_p ())
- + offset = force_reg (Pmode, offset);
- + else
- + abort ();
- + }
- +
- + if (GET_CODE (offset) == CONST_INT)
- + return plus_constant (base, INTVAL (offset));
- + }
- +
- + return gen_rtx_PLUS (Pmode, base, offset);
- + }
- +
- + return orig;
- +}
- +
- +
- +/* Generate code to load the PIC register. */
- +void
- +avr32_load_pic_register (void)
- +{
- + rtx l1, pic_tmp;
- + rtx global_offset_table;
- +
- + if ((crtl->uses_pic_offset_table == 0) || TARGET_NO_INIT_GOT)
- + return;
- +
- + if (!flag_pic)
- + abort ();
- +
- + l1 = gen_label_rtx ();
- +
- + global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
- + pic_tmp =
- + gen_rtx_CONST (Pmode,
- + gen_rtx_MINUS (SImode, gen_rtx_LABEL_REF (Pmode, l1),
- + global_offset_table));
- + emit_insn (gen_pic_load_addr
- + (pic_offset_table_rtx, force_const_mem (SImode, pic_tmp)));
- + emit_insn (gen_pic_compute_got_from_pc (pic_offset_table_rtx, l1));
- +
- + /* Need to emit this whether or not we obey regdecls, since setjmp/longjmp
- + can cause life info to screw up. */
- + emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
- +}
- +
- +
- +/* This hook should return true if values of type type are returned at the most
- + significant end of a register (in other words, if they are padded at the
- + least significant end). You can assume that type is returned in a register;
- + the caller is required to check this. Note that the register provided by
- + FUNCTION_VALUE must be able to hold the complete return value. For example,
- + if a 1-, 2- or 3-byte structure is returned at the most significant end of a
- + 4-byte register, FUNCTION_VALUE should provide an SImode rtx. */
- +bool
- +avr32_return_in_msb (tree type ATTRIBUTE_UNUSED)
- +{
- + /* if ( AGGREGATE_TYPE_P (type) ) if ((int_size_in_bytes(type) == 1) ||
- + ((int_size_in_bytes(type) == 2) && TYPE_ALIGN_UNIT(type) >= 2)) return
- + false; else return true; */
- +
- + return false;
- +}
- +
- +
- +/*
- + Returns one if a certain function value is going to be returned in memory
- + and zero if it is going to be returned in a register.
- +
- + BLKmode and all other modes that is larger than 64 bits are returned in
- + memory.
- +*/
- +bool
- +avr32_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED)
- +{
- + if (TYPE_MODE (type) == VOIDmode)
- + return false;
- +
- + if (int_size_in_bytes (type) > (2 * UNITS_PER_WORD)
- + || int_size_in_bytes (type) == -1)
- + {
- + return true;
- + }
- +
- + /* If we have an aggregate then use the same mechanism as when checking if
- + it should be passed on the stack. */
- + if (type
- + && AGGREGATE_TYPE_P (type)
- + && (*targetm.calls.must_pass_in_stack) (TYPE_MODE (type), type))
- + return true;
- +
- + return false;
- +}
- +
- +
- +/* Output the constant part of the trampoline.
- + lddpc r0, pc[0x8:e] ; load static chain register
- + lddpc pc, pc[0x8:e] ; jump to subrutine
- + .long 0 ; Address to static chain,
- + ; filled in by avr32_initialize_trampoline()
- + .long 0 ; Address to subrutine,
- + ; filled in by avr32_initialize_trampoline()
- +*/
- +void
- +avr32_trampoline_template (FILE * file)
- +{
- + fprintf (file, "\tlddpc r0, pc[8]\n");
- + fprintf (file, "\tlddpc pc, pc[8]\n");
- + /* make room for the address of the static chain. */
- + fprintf (file, "\t.long\t0\n");
- + /* make room for the address to the subrutine. */
- + fprintf (file, "\t.long\t0\n");
- +}
- +
- +
- +/* Initialize the variable parts of a trampoline. */
- +void
- +avr32_initialize_trampoline (rtx addr, rtx fnaddr, rtx static_chain)
- +{
- + /* Store the address to the static chain. */
- + emit_move_insn (gen_rtx_MEM
- + (SImode, plus_constant (addr, TRAMPOLINE_SIZE - 4)),
- + static_chain);
- +
- + /* Store the address to the function. */
- + emit_move_insn (gen_rtx_MEM (SImode, plus_constant (addr, TRAMPOLINE_SIZE)),
- + fnaddr);
- +
- + emit_insn (gen_cache (gen_rtx_REG (SImode, 13),
- + gen_rtx_CONST_INT (SImode,
- + AVR32_CACHE_INVALIDATE_ICACHE)));
- +}
- +
- +
- +/* Return nonzero if X is valid as an addressing register. */
- +int
- +avr32_address_register_rtx_p (rtx x, int strict_p)
- +{
- + int regno;
- +
- + if (!register_operand(x, GET_MODE(x)))
- + return 0;
- +
- + /* If strict we require the register to be a hard register. */
- + if (strict_p
- + && !REG_P(x))
- + return 0;
- +
- + regno = REGNO (x);
- +
- + if (strict_p)
- + return REGNO_OK_FOR_BASE_P (regno);
- +
- + return (regno <= LAST_REGNUM || regno >= FIRST_PSEUDO_REGISTER);
- +}
- +
- +
- +/* Return nonzero if INDEX is valid for an address index operand. */
- +int
- +avr32_legitimate_index_p (enum machine_mode mode, rtx index, int strict_p)
- +{
- + enum rtx_code code = GET_CODE (index);
- +
- + if (GET_MODE_SIZE (mode) > 8)
- + return 0;
- +
- + /* Standard coprocessor addressing modes. */
- + if (code == CONST_INT)
- + {
- + return CONST_OK_FOR_CONSTRAINT_P (INTVAL (index), 'K', "Ks16");
- + }
- +
- + if (avr32_address_register_rtx_p (index, strict_p))
- + return 1;
- +
- + if (code == MULT)
- + {
- + rtx xiop0 = XEXP (index, 0);
- + rtx xiop1 = XEXP (index, 1);
- + return ((avr32_address_register_rtx_p (xiop0, strict_p)
- + && power_of_two_operand (xiop1, SImode)
- + && (INTVAL (xiop1) <= 8))
- + || (avr32_address_register_rtx_p (xiop1, strict_p)
- + && power_of_two_operand (xiop0, SImode)
- + && (INTVAL (xiop0) <= 8)));
- + }
- + else if (code == ASHIFT)
- + {
- + rtx op = XEXP (index, 1);
- +
- + return (avr32_address_register_rtx_p (XEXP (index, 0), strict_p)
- + && GET_CODE (op) == CONST_INT
- + && INTVAL (op) > 0 && INTVAL (op) <= 3);
- + }
- +
- + return 0;
- +}
- +
- +
- +/*
- + Used in the GO_IF_LEGITIMATE_ADDRESS macro. Returns a nonzero value if
- + the RTX x is a legitimate memory address.
- +
- + Returns NO_REGS if the address is not legatime, GENERAL_REGS or ALL_REGS
- + if it is.
- +*/
- +
- +
- +/* Forward declaration */
- +int is_minipool_label (rtx label);
- +
- +int
- +avr32_legitimate_address (enum machine_mode mode, rtx x, int strict)
- +{
- +
- + switch (GET_CODE (x))
- + {
- + case REG:
- + return avr32_address_register_rtx_p (x, strict);
- + case CONST_INT:
- + return ((mode==SImode) && TARGET_RMW_ADDRESSABLE_DATA
- + && CONST_OK_FOR_CONSTRAINT_P(INTVAL(x), 'K', "Ks17"));
- + case CONST:
- + {
- + rtx label = avr32_find_symbol (x);
- + if (label
- + &&
- + (/*
- + If we enable (const (plus (symbol_ref ...))) type constant
- + pool entries we must add support for it in the predicates and
- + in the minipool generation in avr32_reorg().
- + (CONSTANT_POOL_ADDRESS_P (label)
- + && !(flag_pic
- + && (symbol_mentioned_p (get_pool_constant (label))
- + || label_mentioned_p (get_pool_constant (label)))))
- + ||*/
- + ((GET_CODE (label) == LABEL_REF)
- + && GET_CODE (XEXP (label, 0)) == CODE_LABEL
- + && is_minipool_label (XEXP (label, 0)))
- + /*|| ((GET_CODE (label) == SYMBOL_REF)
- + && mode == SImode
- + && SYMBOL_REF_RMW_ADDR(label))*/))
- + {
- + return TRUE;
- + }
- + }
- + break;
- + case LABEL_REF:
- + if (GET_CODE (XEXP (x, 0)) == CODE_LABEL
- + && is_minipool_label (XEXP (x, 0)))
- + {
- + return TRUE;
- + }
- + break;
- + case SYMBOL_REF:
- + {
- + if (CONSTANT_POOL_ADDRESS_P (x)
- + && !(flag_pic
- + && (symbol_mentioned_p (get_pool_constant (x))
- + || label_mentioned_p (get_pool_constant (x)))))
- + return TRUE;
- + else if (SYMBOL_REF_RCALL_FUNCTION_P (x)
- + || (mode == SImode
- + && SYMBOL_REF_RMW_ADDR (x)))
- + return TRUE;
- + break;
- + }
- + case PRE_DEC: /* (pre_dec (...)) */
- + case POST_INC: /* (post_inc (...)) */
- + return avr32_address_register_rtx_p (XEXP (x, 0), strict);
- + case PLUS: /* (plus (...) (...)) */
- + {
- + rtx xop0 = XEXP (x, 0);
- + rtx xop1 = XEXP (x, 1);
- +
- + return ((avr32_address_register_rtx_p (xop0, strict)
- + && avr32_legitimate_index_p (mode, xop1, strict))
- + || (avr32_address_register_rtx_p (xop1, strict)
- + && avr32_legitimate_index_p (mode, xop0, strict)));
- + }
- + default:
- + break;
- + }
- +
- + return FALSE;
- +}
- +
- +
- +int
- +avr32_const_ok_for_move (HOST_WIDE_INT c)
- +{
- + if ( TARGET_V2_INSNS )
- + return ( avr32_const_ok_for_constraint_p (c, 'K', "Ks21")
- + /* movh instruction */
- + || avr32_hi16_immediate_operand (GEN_INT(c), VOIDmode) );
- + else
- + return avr32_const_ok_for_constraint_p (c, 'K', "Ks21");
- +}
- +
- +
- +int
- +avr32_const_double_immediate (rtx value)
- +{
- + HOST_WIDE_INT hi, lo;
- +
- + if (GET_CODE (value) != CONST_DOUBLE)
- + return FALSE;
- +
- + if (SCALAR_FLOAT_MODE_P (GET_MODE (value)))
- + {
- + HOST_WIDE_INT target_float[2];
- + hi = lo = 0;
- + real_to_target (target_float, CONST_DOUBLE_REAL_VALUE (value),
- + GET_MODE (value));
- + lo = target_float[0];
- + hi = target_float[1];
- + }
- + else
- + {
- + hi = CONST_DOUBLE_HIGH (value);
- + lo = CONST_DOUBLE_LOW (value);
- + }
- +
- + if (avr32_const_ok_for_constraint_p (lo, 'K', "Ks21")
- + && (GET_MODE (value) == SFmode
- + || avr32_const_ok_for_constraint_p (hi, 'K', "Ks21")))
- + {
- + return TRUE;
- + }
- +
- + return FALSE;
- +}
- +
- +
- +int
- +avr32_legitimate_constant_p (rtx x)
- +{
- + switch (GET_CODE (x))
- + {
- + case CONST_INT:
- + /* Check if we should put large immediate into constant pool
- + or load them directly with mov/orh.*/
- + if (!avr32_imm_in_const_pool)
- + return 1;
- +
- + return avr32_const_ok_for_move (INTVAL (x));
- + case CONST_DOUBLE:
- + /* Check if we should put large immediate into constant pool
- + or load them directly with mov/orh.*/
- + if (!avr32_imm_in_const_pool)
- + return 1;
- +
- + if (GET_MODE (x) == SFmode
- + || GET_MODE (x) == DFmode || GET_MODE (x) == DImode)
- + return avr32_const_double_immediate (x);
- + else
- + return 0;
- + case LABEL_REF:
- + case SYMBOL_REF:
- + return avr32_find_symbol (x) && (flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS);
- + case CONST:
- + case HIGH:
- + case CONST_VECTOR:
- + return 0;
- + default:
- + printf ("%s():\n", __FUNCTION__);
- + debug_rtx (x);
- + return 1;
- + }
- +}
- +
- +
- +/* Strip any special encoding from labels */
- +const char *
- +avr32_strip_name_encoding (const char *name)
- +{
- + const char *stripped = name;
- +
- + while (1)
- + {
- + switch (stripped[0])
- + {
- + case '#':
- + stripped = strchr (name + 1, '#') + 1;
- + break;
- + case '*':
- + stripped = &stripped[1];
- + break;
- + default:
- + return stripped;
- + }
- + }
- +}
- +
- +
- +
- +/* Do anything needed before RTL is emitted for each function. */
- +static struct machine_function *
- +avr32_init_machine_status (void)
- +{
- + struct machine_function *machine;
- + machine =
- + (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
- +
- +#if AVR32_FT_UNKNOWN != 0
- + machine->func_type = AVR32_FT_UNKNOWN;
- +#endif
- +
- + machine->minipool_label_head = 0;
- + machine->minipool_label_tail = 0;
- + machine->ifcvt_after_reload = 0;
- + return machine;
- +}
- +
- +
- +void
- +avr32_init_expanders (void)
- +{
- + /* Arrange to initialize and mark the machine per-function status. */
- + init_machine_status = avr32_init_machine_status;
- +}
- +
- +
- +/* Return an RTX indicating where the return address to the
- + calling function can be found. */
- +rtx
- +avr32_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
- +{
- + if (count != 0)
- + return NULL_RTX;
- +
- + return get_hard_reg_initial_val (Pmode, LR_REGNUM);
- +}
- +
- +
- +void
- +avr32_encode_section_info (tree decl, rtx rtl, int first)
- +{
- + default_encode_section_info(decl, rtl, first);
- +
- + if ( TREE_CODE (decl) == VAR_DECL
- + && (GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF)
- + && (lookup_attribute ("rmw_addressable", DECL_ATTRIBUTES (decl))
- + || TARGET_RMW_ADDRESSABLE_DATA) ){
- + if ( !TARGET_RMW || flag_pic )
- + return;
- + // {
- + // warning ("Using RMW addressable data with an arch that does not support RMW instructions.");
- + // return;
- + // }
- + //
- + //if ( flag_pic )
- + // {
- + // warning ("Using RMW addressable data with together with -fpic switch. Can not use RMW instruction when compiling with -fpic.");
- + // return;
- + // }
- + SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= (1 << SYMBOL_FLAG_RMW_ADDR_SHIFT);
- + }
- +}
- +
- +
- +void
- +avr32_asm_output_label (FILE * stream, const char *name)
- +{
- + name = avr32_strip_name_encoding (name);
- +
- + /* Print the label. */
- + assemble_name (stream, name);
- + fprintf (stream, ":\n");
- +}
- +
- +
- +void
- +avr32_asm_weaken_label (FILE * stream, const char *name)
- +{
- + fprintf (stream, "\t.weak ");
- + assemble_name (stream, name);
- + fprintf (stream, "\n");
- +}
- +
- +
- +/*
- + Checks if a labelref is equal to a reserved word in the assembler. If it is,
- + insert a '_' before the label name.
- +*/
- +void
- +avr32_asm_output_labelref (FILE * stream, const char *name)
- +{
- + int verbatim = FALSE;
- + const char *stripped = name;
- + int strip_finished = FALSE;
- +
- + while (!strip_finished)
- + {
- + switch (stripped[0])
- + {
- + case '#':
- + stripped = strchr (name + 1, '#') + 1;
- + break;
- + case '*':
- + stripped = &stripped[1];
- + verbatim = TRUE;
- + break;
- + default:
- + strip_finished = TRUE;
- + break;
- + }
- + }
- +
- + if (verbatim)
- + fputs (stripped, stream);
- + else
- + asm_fprintf (stream, "%U%s", stripped);
- +}
- +
- +
- +/*
- + Check if the comparison in compare_exp is redundant
- + for the condition given in next_cond given that the
- + needed flags are already set by an earlier instruction.
- + Uses cc_prev_status to check this.
- +
- + Returns NULL_RTX if the compare is not redundant
- + or the new condition to use in the conditional
- + instruction if the compare is redundant.
- +*/
- +static rtx
- +is_compare_redundant (rtx compare_exp, rtx next_cond)
- +{
- + int z_flag_valid = FALSE;
- + int n_flag_valid = FALSE;
- + rtx new_cond;
- +
- + if (GET_CODE (compare_exp) != COMPARE
- + && GET_CODE (compare_exp) != AND)
- + return NULL_RTX;
- +
- +
- + if (rtx_equal_p (cc_prev_status.mdep.value, compare_exp))
- + {
- + /* cc0 already contains the correct comparison -> delete cmp insn */
- + return next_cond;
- + }
- +
- + if (GET_MODE (compare_exp) != SImode)
- + return NULL_RTX;
- +
- + switch (cc_prev_status.mdep.flags)
- + {
- + case CC_SET_VNCZ:
- + case CC_SET_NCZ:
- + n_flag_valid = TRUE;
- + case CC_SET_CZ:
- + case CC_SET_Z:
- + z_flag_valid = TRUE;
- + }
- +
- + if (cc_prev_status.mdep.value
- + && GET_CODE (compare_exp) == COMPARE
- + && REG_P (XEXP (compare_exp, 0))
- + && REGNO (XEXP (compare_exp, 0)) == REGNO (cc_prev_status.mdep.value)
- + && GET_CODE (XEXP (compare_exp, 1)) == CONST_INT
- + && next_cond != NULL_RTX)
- + {
- + if (INTVAL (XEXP (compare_exp, 1)) == 0
- + && z_flag_valid
- + && (GET_CODE (next_cond) == EQ || GET_CODE (next_cond) == NE))
- + /* We can skip comparison Z flag is already reflecting ops[0] */
- + return next_cond;
- + else if (n_flag_valid
- + && ((INTVAL (XEXP (compare_exp, 1)) == 0
- + && (GET_CODE (next_cond) == GE
- + || GET_CODE (next_cond) == LT))
- + || (INTVAL (XEXP (compare_exp, 1)) == -1
- + && (GET_CODE (next_cond) == GT
- + || GET_CODE (next_cond) == LE))))
- + {
- + /* We can skip comparison N flag is already reflecting ops[0],
- + which means that we can use the mi/pl conditions to check if
- + ops[0] is GE or LT 0. */
- + if ((GET_CODE (next_cond) == GE) || (GET_CODE (next_cond) == GT))
- + new_cond =
- + gen_rtx_UNSPEC (GET_MODE (next_cond), gen_rtvec (2, cc0_rtx, const0_rtx),
- + UNSPEC_COND_PL);
- + else
- + new_cond =
- + gen_rtx_UNSPEC (GET_MODE (next_cond), gen_rtvec (2, cc0_rtx, const0_rtx),
- + UNSPEC_COND_MI);
- + return new_cond;
- + }
- + }
- + return NULL_RTX;
- +}
- +
- +
- +/* Updates cc_status. */
- +void
- +avr32_notice_update_cc (rtx exp, rtx insn)
- +{
- + enum attr_cc attr_cc = get_attr_cc (insn);
- +
- + if ( attr_cc == CC_SET_Z_IF_NOT_V2 )
- + {
- + if (TARGET_V2_INSNS)
- + attr_cc = CC_NONE;
- + else
- + attr_cc = CC_SET_Z;
- + }
- +
- + switch (attr_cc)
- + {
- + case CC_CALL_SET:
- + CC_STATUS_INIT;
- + /* Check if the function call returns a value in r12 */
- + if (REG_P (recog_data.operand[0])
- + && REGNO (recog_data.operand[0]) == RETVAL_REGNUM)
- + {
- + cc_status.flags = 0;
- + cc_status.mdep.value =
- + gen_rtx_COMPARE (SImode, recog_data.operand[0], const0_rtx);
- + cc_status.mdep.flags = CC_SET_VNCZ;
- + cc_status.mdep.cond_exec_cmp_clobbered = 0;
- +
- + }
- + break;
- + case CC_COMPARE:
- + {
- + /* Check that compare will not be optimized away if so nothing should
- + be done */
- + rtx compare_exp = SET_SRC (exp);
- + /* Check if we have a tst expression. If so convert it to a
- + compare with 0. */
- + if ( REG_P (SET_SRC (exp)) )
- + compare_exp = gen_rtx_COMPARE (GET_MODE (SET_SRC (exp)),
- + SET_SRC (exp),
- + const0_rtx);
- +
- + if (!next_insn_emits_cmp (insn)
- + && (is_compare_redundant (compare_exp, get_next_insn_cond (insn)) == NULL_RTX))
- + {
- +
- + /* Reset the nonstandard flag */
- + CC_STATUS_INIT;
- + cc_status.flags = 0;
- + cc_status.mdep.value = compare_exp;
- + cc_status.mdep.flags = CC_SET_VNCZ;
- + cc_status.mdep.cond_exec_cmp_clobbered = 0;
- + }
- + }
- + break;
- + case CC_CMP_COND_INSN:
- + {
- + /* Conditional insn that emit the compare itself. */
- + rtx cmp;
- + rtx cmp_op0, cmp_op1;
- + rtx cond;
- + rtx dest;
- + rtx next_insn = next_nonnote_insn (insn);
- +
- + if ( GET_CODE (exp) == COND_EXEC )
- + {
- + cmp_op0 = XEXP (COND_EXEC_TEST (exp), 0);
- + cmp_op1 = XEXP (COND_EXEC_TEST (exp), 1);
- + cond = COND_EXEC_TEST (exp);
- + dest = SET_DEST (COND_EXEC_CODE (exp));
- + }
- + else
- + {
- + /* If then else conditional. compare operands are in operands
- + 4 and 5. */
- + cmp_op0 = recog_data.operand[4];
- + cmp_op1 = recog_data.operand[5];
- + cond = recog_data.operand[1];
- + dest = SET_DEST (exp);
- + }
- +
- + if ( GET_CODE (cmp_op0) == AND )
- + cmp = cmp_op0;
- + else
- + cmp = gen_rtx_COMPARE (GET_MODE (cmp_op0),
- + cmp_op0,
- + cmp_op1);
- +
- + /* Check if the conditional insns updates a register present
- + in the comparison, if so then we must reset the cc_status. */
- + if (REG_P (dest)
- + && (reg_mentioned_p (dest, cmp_op0)
- + || reg_mentioned_p (dest, cmp_op1))
- + && GET_CODE (exp) != COND_EXEC )
- + {
- + CC_STATUS_INIT;
- + }
- + else if (is_compare_redundant (cmp, cond) == NULL_RTX)
- + {
- + /* Reset the nonstandard flag */
- + CC_STATUS_INIT;
- + if ( GET_CODE (cmp_op0) == AND )
- + {
- + cc_status.flags = CC_INVERTED;
- + cc_status.mdep.flags = CC_SET_Z;
- + }
- + else
- + {
- + cc_status.flags = 0;
- + cc_status.mdep.flags = CC_SET_VNCZ;
- + }
- + cc_status.mdep.value = cmp;
- + cc_status.mdep.cond_exec_cmp_clobbered = 0;
- + }
- +
- +
- + /* Check if we have a COND_EXEC insn which updates one
- + of the registers in the compare status. */
- + if (REG_P (dest)
- + && (reg_mentioned_p (dest, cmp_op0)
- + || reg_mentioned_p (dest, cmp_op1))
- + && GET_CODE (exp) == COND_EXEC )
- + cc_status.mdep.cond_exec_cmp_clobbered = 1;
- +
- + if ( cc_status.mdep.cond_exec_cmp_clobbered
- + && GET_CODE (exp) == COND_EXEC
- + && next_insn != NULL
- + && INSN_P (next_insn)
- + && !(GET_CODE (PATTERN (next_insn)) == COND_EXEC
- + && rtx_equal_p (XEXP (COND_EXEC_TEST (PATTERN (next_insn)), 0), cmp_op0)
- + && rtx_equal_p (XEXP (COND_EXEC_TEST (PATTERN (next_insn)), 1), cmp_op1)
- + && (GET_CODE (COND_EXEC_TEST (PATTERN (next_insn))) == GET_CODE (cond)
- + || GET_CODE (COND_EXEC_TEST (PATTERN (next_insn))) == reverse_condition (GET_CODE (cond)))) )
- + {
- + /* We have a sequence of conditional insns where the compare status has been clobbered
- + since the compare no longer reflects the content of the values to compare. */
- + CC_STATUS_INIT;
- + cc_status.mdep.cond_exec_cmp_clobbered = 1;
- + }
- +
- + }
- + break;
- + case CC_BLD:
- + /* Bit load is kind of like an inverted testsi, because the Z flag is
- + inverted */
- + CC_STATUS_INIT;
- + cc_status.flags = CC_INVERTED;
- + cc_status.mdep.value = SET_SRC (exp);
- + cc_status.mdep.flags = CC_SET_Z;
- + cc_status.mdep.cond_exec_cmp_clobbered = 0;
- + break;
- + case CC_NONE:
- + /* Insn does not affect CC at all. Check if the instruction updates
- + some of the register currently reflected in cc0 */
- +
- + if ((GET_CODE (exp) == SET)
- + && (cc_status.value1 || cc_status.value2 || cc_status.mdep.value)
- + && (reg_mentioned_p (SET_DEST (exp), cc_status.value1)
- + || reg_mentioned_p (SET_DEST (exp), cc_status.value2)
- + || reg_mentioned_p (SET_DEST (exp), cc_status.mdep.value)))
- + {
- + CC_STATUS_INIT;
- + }
- +
- + /* If this is a parallel we must step through each of the parallel
- + expressions */
- + if (GET_CODE (exp) == PARALLEL)
- + {
- + int i;
- + for (i = 0; i < XVECLEN (exp, 0); ++i)
- + {
- + rtx vec_exp = XVECEXP (exp, 0, i);
- + if ((GET_CODE (vec_exp) == SET)
- + && (cc_status.value1 || cc_status.value2
- + || cc_status.mdep.value)
- + && (reg_mentioned_p (SET_DEST (vec_exp), cc_status.value1)
- + || reg_mentioned_p (SET_DEST (vec_exp),
- + cc_status.value2)
- + || reg_mentioned_p (SET_DEST (vec_exp),
- + cc_status.mdep.value)))
- + {
- + CC_STATUS_INIT;
- + }
- + }
- + }
- +
- + /* Check if we have memory opartions with post_inc or pre_dec on the
- + register currently reflected in cc0 */
- + if (GET_CODE (exp) == SET
- + && GET_CODE (SET_SRC (exp)) == MEM
- + && (GET_CODE (XEXP (SET_SRC (exp), 0)) == POST_INC
- + || GET_CODE (XEXP (SET_SRC (exp), 0)) == PRE_DEC)
- + &&
- + (reg_mentioned_p
- + (XEXP (XEXP (SET_SRC (exp), 0), 0), cc_status.value1)
- + || reg_mentioned_p (XEXP (XEXP (SET_SRC (exp), 0), 0),
- + cc_status.value2)
- + || reg_mentioned_p (XEXP (XEXP (SET_SRC (exp), 0), 0),
- + cc_status.mdep.value)))
- + CC_STATUS_INIT;
- +
- + if (GET_CODE (exp) == SET
- + && GET_CODE (SET_DEST (exp)) == MEM
- + && (GET_CODE (XEXP (SET_DEST (exp), 0)) == POST_INC
- + || GET_CODE (XEXP (SET_DEST (exp), 0)) == PRE_DEC)
- + &&
- + (reg_mentioned_p
- + (XEXP (XEXP (SET_DEST (exp), 0), 0), cc_status.value1)
- + || reg_mentioned_p (XEXP (XEXP (SET_DEST (exp), 0), 0),
- + cc_status.value2)
- + || reg_mentioned_p (XEXP (XEXP (SET_DEST (exp), 0), 0),
- + cc_status.mdep.value)))
- + CC_STATUS_INIT;
- + break;
- +
- + case CC_SET_VNCZ:
- + CC_STATUS_INIT;
- + cc_status.mdep.value = recog_data.operand[0];
- + cc_status.mdep.flags = CC_SET_VNCZ;
- + cc_status.mdep.cond_exec_cmp_clobbered = 0;
- + break;
- +
- + case CC_SET_NCZ:
- + CC_STATUS_INIT;
- + cc_status.mdep.value = recog_data.operand[0];
- + cc_status.mdep.flags = CC_SET_NCZ;
- + cc_status.mdep.cond_exec_cmp_clobbered = 0;
- + break;
- +
- + case CC_SET_CZ:
- + CC_STATUS_INIT;
- + cc_status.mdep.value = recog_data.operand[0];
- + cc_status.mdep.flags = CC_SET_CZ;
- + cc_status.mdep.cond_exec_cmp_clobbered = 0;
- + break;
- +
- + case CC_SET_Z:
- + CC_STATUS_INIT;
- + cc_status.mdep.value = recog_data.operand[0];
- + cc_status.mdep.flags = CC_SET_Z;
- + cc_status.mdep.cond_exec_cmp_clobbered = 0;
- + break;
- +
- + case CC_CLOBBER:
- + CC_STATUS_INIT;
- + break;
- +
- + default:
- + CC_STATUS_INIT;
- + }
- +}
- +
- +
- +/*
- + Outputs to stdio stream stream the assembler syntax for an instruction
- + operand x. x is an RTL expression.
- +*/
- +void
- +avr32_print_operand (FILE * stream, rtx x, int code)
- +{
- + int error = 0;
- +
- + if ( code == '?' )
- + {
- + /* Predicable instruction, print condition code */
- +
- + /* If the insn should not be conditional then do nothing. */
- + if ( current_insn_predicate == NULL_RTX )
- + return;
- +
- + /* Set x to the predicate to force printing
- + the condition later on. */
- + x = current_insn_predicate;
- +
- + /* Reverse condition if useing bld insn. */
- + if ( GET_CODE (XEXP(current_insn_predicate,0)) == AND )
- + x = reversed_condition (current_insn_predicate);
- + }
- + else if ( code == '!' )
- + {
- + /* Output compare for conditional insn if needed. */
- + rtx new_cond;
- + gcc_assert ( current_insn_predicate != NULL_RTX );
- + new_cond = avr32_output_cmp(current_insn_predicate,
- + GET_MODE(XEXP(current_insn_predicate,0)),
- + XEXP(current_insn_predicate,0),
- + XEXP(current_insn_predicate,1));
- +
- + /* Check if the new condition is a special avr32 condition
- + specified using UNSPECs. If so we must handle it differently. */
- + if ( GET_CODE (new_cond) == UNSPEC )
- + {
- + current_insn_predicate =
- + gen_rtx_UNSPEC (CCmode,
- + gen_rtvec (2,
- + XEXP(current_insn_predicate,0),
- + XEXP(current_insn_predicate,1)),
- + XINT (new_cond, 1));
- + }
- + else
- + {
- + PUT_CODE(current_insn_predicate, GET_CODE(new_cond));
- + }
- + return;
- + }
- +
- + switch (GET_CODE (x))
- + {
- + case UNSPEC:
- + switch (XINT (x, 1))
- + {
- + case UNSPEC_COND_PL:
- + if (code == 'i')
- + fputs ("mi", stream);
- + else
- + fputs ("pl", stream);
- + break;
- + case UNSPEC_COND_MI:
- + if (code == 'i')
- + fputs ("pl", stream);
- + else
- + fputs ("mi", stream);
- + break;
- + default:
- + error = 1;
- + }
- + break;
- + case EQ:
- + if (code == 'i')
- + fputs ("ne", stream);
- + else
- + fputs ("eq", stream);
- + break;
- + case NE:
- + if (code == 'i')
- + fputs ("eq", stream);
- + else
- + fputs ("ne", stream);
- + break;
- + case GT:
- + if (code == 'i')
- + fputs ("le", stream);
- + else
- + fputs ("gt", stream);
- + break;
- + case GTU:
- + if (code == 'i')
- + fputs ("ls", stream);
- + else
- + fputs ("hi", stream);
- + break;
- + case LT:
- + if (code == 'i')
- + fputs ("ge", stream);
- + else
- + fputs ("lt", stream);
- + break;
- + case LTU:
- + if (code == 'i')
- + fputs ("hs", stream);
- + else
- + fputs ("lo", stream);
- + break;
- + case GE:
- + if (code == 'i')
- + fputs ("lt", stream);
- + else
- + fputs ("ge", stream);
- + break;
- + case GEU:
- + if (code == 'i')
- + fputs ("lo", stream);
- + else
- + fputs ("hs", stream);
- + break;
- + case LE:
- + if (code == 'i')
- + fputs ("gt", stream);
- + else
- + fputs ("le", stream);
- + break;
- + case LEU:
- + if (code == 'i')
- + fputs ("hi", stream);
- + else
- + fputs ("ls", stream);
- + break;
- + case CONST_INT:
- + {
- + HOST_WIDE_INT value = INTVAL (x);
- +
- + switch (code)
- + {
- + case 'm':
- + if ( HOST_BITS_PER_WIDE_INT > BITS_PER_WORD )
- + {
- + /* A const_int can be used to represent DImode constants. */
- + value >>= BITS_PER_WORD;
- + }
- + /* We might get a const_int immediate for setting a DI register,
- + we then must then return the correct sign extended DI. The most
- + significant word is just a sign extension. */
- + else if (value < 0)
- + value = -1;
- + else
- + value = 0;
- + break;
- + case 'i':
- + value++;
- + break;
- + case 'p':
- + {
- + /* Set to bit position of first bit set in immediate */
- + int i, bitpos = 32;
- + for (i = 0; i < 32; i++)
- + if (value & (1 << i))
- + {
- + bitpos = i;
- + break;
- + }
- + value = bitpos;
- + }
- + break;
- + case 'z':
- + {
- + /* Set to bit position of first bit cleared in immediate */
- + int i, bitpos = 32;
- + for (i = 0; i < 32; i++)
- + if (!(value & (1 << i)))
- + {
- + bitpos = i;
- + break;
- + }
- + value = bitpos;
- + }
- + break;
- + case 'r':
- + {
- + /* Reglist 8 */
- + char op[50];
- + op[0] = '\0';
- +
- + if (value & 0x01)
- + strcpy (op, "r0-r3");
- + if (value & 0x02)
- + strlen (op) ? strcat (op, ", r4-r7") : strcpy (op,"r4-r7");
- + if (value & 0x04)
- + strlen (op) ? strcat (op, ", r8-r9") : strcpy (op,"r8-r9");
- + if (value & 0x08)
- + strlen (op) ? strcat (op, ", r10") : strcpy (op,"r10");
- + if (value & 0x10)
- + strlen (op) ? strcat (op, ", r11") : strcpy (op,"r11");
- + if (value & 0x20)
- + strlen (op) ? strcat (op, ", r12") : strcpy (op,"r12");
- + if (value & 0x40)
- + strlen (op) ? strcat (op, ", lr") : strcpy (op, "lr");
- + if (value & 0x80)
- + strlen (op) ? strcat (op, ", pc") : strcpy (op, "pc");
- +
- + fputs (op, stream);
- + return;
- + }
- + case 's':
- + {
- + /* Reglist 16 */
- + char reglist16_string[100];
- + int i;
- + bool first_reg = true;
- + reglist16_string[0] = '\0';
- +
- + for (i = 0; i < 16; ++i)
- + {
- + if (value & (1 << i))
- + {
- + first_reg == true ? first_reg = false : strcat(reglist16_string,", ");
- + strcat(reglist16_string,reg_names[INTERNAL_REGNUM(i)]);
- + }
- + }
- + fputs (reglist16_string, stream);
- + return;
- + }
- + case 'h':
- + /* Print halfword part of word */
- + fputs (value ? "b" : "t", stream);
- + return;
- + }
- +
- + /* Print Value */
- + fprintf (stream, "%d", value);
- + break;
- + }
- + case CONST_DOUBLE:
- + {
- + HOST_WIDE_INT hi, lo;
- + if (SCALAR_FLOAT_MODE_P (GET_MODE (x)))
- + {
- + HOST_WIDE_INT target_float[2];
- + hi = lo = 0;
- + real_to_target (target_float, CONST_DOUBLE_REAL_VALUE (x),
- + GET_MODE (x));
- + /* For doubles the most significant part starts at index 0. */
- + if (GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
- + {
- + hi = target_float[0];
- + lo = target_float[1];
- + }
- + else
- + {
- + lo = target_float[0];
- + }
- + }
- + else
- + {
- + hi = CONST_DOUBLE_HIGH (x);
- + lo = CONST_DOUBLE_LOW (x);
- + }
- +
- + if (code == 'm')
- + fprintf (stream, "%ld", hi);
- + else
- + fprintf (stream, "%ld", lo);
- +
- + break;
- + }
- + case CONST:
- + output_addr_const (stream, XEXP (XEXP (x, 0), 0));
- + fprintf (stream, "+%ld", INTVAL (XEXP (XEXP (x, 0), 1)));
- + break;
- + case REG:
- + /* Swap register name if the register is DImode or DFmode. */
- + if (GET_MODE (x) == DImode || GET_MODE (x) == DFmode)
- + {
- + /* Double register must have an even numbered address */
- + gcc_assert (!(REGNO (x) % 2));
- + if (code == 'm')
- + fputs (reg_names[true_regnum (x)], stream);
- + else
- + fputs (reg_names[true_regnum (x) + 1], stream);
- + }
- + else if (GET_MODE (x) == TImode)
- + {
- + switch (code)
- + {
- + case 'T':
- + fputs (reg_names[true_regnum (x)], stream);
- + break;
- + case 'U':
- + fputs (reg_names[true_regnum (x) + 1], stream);
- + break;
- + case 'L':
- + fputs (reg_names[true_regnum (x) + 2], stream);
- + break;
- + case 'B':
- + fputs (reg_names[true_regnum (x) + 3], stream);
- + break;
- + default:
- + fprintf (stream, "%s, %s, %s, %s",
- + reg_names[true_regnum (x) + 3],
- + reg_names[true_regnum (x) + 2],
- + reg_names[true_regnum (x) + 1],
- + reg_names[true_regnum (x)]);
- + break;
- + }
- + }
- + else
- + {
- + fputs (reg_names[true_regnum (x)], stream);
- + }
- + break;
- + case CODE_LABEL:
- + case LABEL_REF:
- + case SYMBOL_REF:
- + output_addr_const (stream, x);
- + break;
- + case MEM:
- + switch (GET_CODE (XEXP (x, 0)))
- + {
- + case LABEL_REF:
- + case SYMBOL_REF:
- + output_addr_const (stream, XEXP (x, 0));
- + break;
- + case MEM:
- + switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
- + {
- + case SYMBOL_REF:
- + output_addr_const (stream, XEXP (XEXP (x, 0), 0));
- + break;
- + default:
- + error = 1;
- + break;
- + }
- + break;
- + case REG:
- + avr32_print_operand (stream, XEXP (x, 0), 0);
- + if (code != 'p')
- + fputs ("[0]", stream);
- + break;
- + case PRE_DEC:
- + fputs ("--", stream);
- + avr32_print_operand (stream, XEXP (XEXP (x, 0), 0), 0);
- + break;
- + case POST_INC:
- + avr32_print_operand (stream, XEXP (XEXP (x, 0), 0), 0);
- + fputs ("++", stream);
- + break;
- + case PLUS:
- + {
- + rtx op0 = XEXP (XEXP (x, 0), 0);
- + rtx op1 = XEXP (XEXP (x, 0), 1);
- + rtx base = NULL_RTX, offset = NULL_RTX;
- +
- + if (avr32_address_register_rtx_p (op0, 1))
- + {
- + base = op0;
- + offset = op1;
- + }
- + else if (avr32_address_register_rtx_p (op1, 1))
- + {
- + /* Operands are switched. */
- + base = op1;
- + offset = op0;
- + }
- +
- + gcc_assert (base && offset
- + && avr32_address_register_rtx_p (base, 1)
- + && avr32_legitimate_index_p (GET_MODE (x), offset,
- + 1));
- +
- + avr32_print_operand (stream, base, 0);
- + fputs ("[", stream);
- + avr32_print_operand (stream, offset, 0);
- + fputs ("]", stream);
- + break;
- + }
- + case CONST:
- + output_addr_const (stream, XEXP (XEXP (XEXP (x, 0), 0), 0));
- + fprintf (stream, " + %ld",
- + INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)));
- + break;
- + case CONST_INT:
- + avr32_print_operand (stream, XEXP (x, 0), 0);
- + break;
- + default:
- + error = 1;
- + }
- + break;
- + case MULT:
- + {
- + int value = INTVAL (XEXP (x, 1));
- +
- + /* Convert immediate in multiplication into a shift immediate */
- + switch (value)
- + {
- + case 2:
- + value = 1;
- + break;
- + case 4:
- + value = 2;
- + break;
- + case 8:
- + value = 3;
- + break;
- + default:
- + value = 0;
- + }
- + fprintf (stream, "%s << %i", reg_names[true_regnum (XEXP (x, 0))],
- + value);
- + break;
- + }
- + case ASHIFT:
- + if (GET_CODE (XEXP (x, 1)) == CONST_INT)
- + fprintf (stream, "%s << %i", reg_names[true_regnum (XEXP (x, 0))],
- + (int) INTVAL (XEXP (x, 1)));
- + else if (REG_P (XEXP (x, 1)))
- + fprintf (stream, "%s << %s", reg_names[true_regnum (XEXP (x, 0))],
- + reg_names[true_regnum (XEXP (x, 1))]);
- + else
- + {
- + error = 1;
- + }
- + break;
- + case LSHIFTRT:
- + if (GET_CODE (XEXP (x, 1)) == CONST_INT)
- + fprintf (stream, "%s >> %i", reg_names[true_regnum (XEXP (x, 0))],
- + (int) INTVAL (XEXP (x, 1)));
- + else if (REG_P (XEXP (x, 1)))
- + fprintf (stream, "%s >> %s", reg_names[true_regnum (XEXP (x, 0))],
- + reg_names[true_regnum (XEXP (x, 1))]);
- + else
- + {
- + error = 1;
- + }
- + fprintf (stream, ">>");
- + break;
- + case PARALLEL:
- + {
- + /* Load store multiple */
- + int i;
- + int count = XVECLEN (x, 0);
- + int reglist16 = 0;
- + char reglist16_string[100];
- +
- + for (i = 0; i < count; ++i)
- + {
- + rtx vec_elm = XVECEXP (x, 0, i);
- + if (GET_MODE (vec_elm) != SET)
- + {
- + debug_rtx (vec_elm);
- + internal_error ("Unknown element in parallel expression!");
- + }
- + if (GET_MODE (XEXP (vec_elm, 0)) == REG)
- + {
- + /* Load multiple */
- + reglist16 |= 1 << ASM_REGNUM (REGNO (XEXP (vec_elm, 0)));
- + }
- + else
- + {
- + /* Store multiple */
- + reglist16 |= 1 << ASM_REGNUM (REGNO (XEXP (vec_elm, 1)));
- + }
- + }
- +
- + avr32_make_reglist16 (reglist16, reglist16_string);
- + fputs (reglist16_string, stream);
- +
- + break;
- + }
- +
- + case PLUS:
- + {
- + rtx op0 = XEXP (x, 0);
- + rtx op1 = XEXP (x, 1);
- + rtx base = NULL_RTX, offset = NULL_RTX;
- +
- + if (avr32_address_register_rtx_p (op0, 1))
- + {
- + base = op0;
- + offset = op1;
- + }
- + else if (avr32_address_register_rtx_p (op1, 1))
- + {
- + /* Operands are switched. */
- + base = op1;
- + offset = op0;
- + }
- +
- + gcc_assert (base && offset
- + && avr32_address_register_rtx_p (base, 1)
- + && avr32_legitimate_index_p (GET_MODE (x), offset, 1));
- +
- + avr32_print_operand (stream, base, 0);
- + fputs ("[", stream);
- + avr32_print_operand (stream, offset, 0);
- + fputs ("]", stream);
- + break;
- + }
- +
- + default:
- + error = 1;
- + }
- +
- + if (error)
- + {
- + debug_rtx (x);
- + internal_error ("Illegal expression for avr32_print_operand");
- + }
- +}
- +
- +rtx
- +avr32_get_note_reg_equiv (rtx insn)
- +{
- + rtx note;
- +
- + note = find_reg_note (insn, REG_EQUIV, NULL_RTX);
- +
- + if (note != NULL_RTX)
- + return XEXP (note, 0);
- + else
- + return NULL_RTX;
- +}
- +
- +
- +/*
- + Outputs to stdio stream stream the assembler syntax for an instruction
- + operand that is a memory reference whose address is x. x is an RTL
- + expression.
- +
- + ToDo: fixme.
- +*/
- +void
- +avr32_print_operand_address (FILE * stream, rtx x)
- +{
- + fprintf (stream, "(%d) /* address */", REGNO (x));
- +}
- +
- +
- +/* Return true if _GLOBAL_OFFSET_TABLE_ symbol is mentioned. */
- +bool
- +avr32_got_mentioned_p (rtx addr)
- +{
- + if (GET_CODE (addr) == MEM)
- + addr = XEXP (addr, 0);
- + while (GET_CODE (addr) == CONST)
- + addr = XEXP (addr, 0);
- + if (GET_CODE (addr) == SYMBOL_REF)
- + {
- + return streq (XSTR (addr, 0), "_GLOBAL_OFFSET_TABLE_");
- + }
- + if (GET_CODE (addr) == PLUS || GET_CODE (addr) == MINUS)
- + {
- + bool l1, l2;
- +
- + l1 = avr32_got_mentioned_p (XEXP (addr, 0));
- + l2 = avr32_got_mentioned_p (XEXP (addr, 1));
- + return l1 || l2;
- + }
- + return false;
- +}
- +
- +
- +/* Find the symbol in an address expression. */
- +rtx
- +avr32_find_symbol (rtx addr)
- +{
- + if (GET_CODE (addr) == MEM)
- + addr = XEXP (addr, 0);
- +
- + while (GET_CODE (addr) == CONST)
- + addr = XEXP (addr, 0);
- +
- + if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == LABEL_REF)
- + return addr;
- + if (GET_CODE (addr) == PLUS)
- + {
- + rtx l1, l2;
- +
- + l1 = avr32_find_symbol (XEXP (addr, 0));
- + l2 = avr32_find_symbol (XEXP (addr, 1));
- + if (l1 != NULL_RTX && l2 == NULL_RTX)
- + return l1;
- + else if (l1 == NULL_RTX && l2 != NULL_RTX)
- + return l2;
- + }
- +
- + return NULL_RTX;
- +}
- +
- +
- +/* Routines for manipulation of the constant pool. */
- +
- +/* AVR32 instructions cannot load a large constant directly into a
- + register; they have to come from a pc relative load. The constant
- + must therefore be placed in the addressable range of the pc
- + relative load. Depending on the precise pc relative load
- + instruction the range is somewhere between 256 bytes and 4k. This
- + means that we often have to dump a constant inside a function, and
- + generate code to branch around it.
- +
- + It is important to minimize this, since the branches will slow
- + things down and make the code larger.
- +
- + Normally we can hide the table after an existing unconditional
- + branch so that there is no interruption of the flow, but in the
- + worst case the code looks like this:
- +
- + lddpc rn, L1
- + ...
- + rjmp L2
- + align
- + L1: .long value
- + L2:
- + ...
- +
- + lddpc rn, L3
- + ...
- + rjmp L4
- + align
- + L3: .long value
- + L4:
- + ...
- +
- + We fix this by performing a scan after scheduling, which notices
- + which instructions need to have their operands fetched from the
- + constant table and builds the table.
- +
- + The algorithm starts by building a table of all the constants that
- + need fixing up and all the natural barriers in the function (places
- + where a constant table can be dropped without breaking the flow).
- + For each fixup we note how far the pc-relative replacement will be
- + able to reach and the offset of the instruction into the function.
- +
- + Having built the table we then group the fixes together to form
- + tables that are as large as possible (subject to addressing
- + constraints) and emit each table of constants after the last
- + barrier that is within range of all the instructions in the group.
- + If a group does not contain a barrier, then we forcibly create one
- + by inserting a jump instruction into the flow. Once the table has
- + been inserted, the insns are then modified to reference the
- + relevant entry in the pool.
- +
- + Possible enhancements to the algorithm (not implemented) are:
- +
- + 1) For some processors and object formats, there may be benefit in
- + aligning the pools to the start of cache lines; this alignment
- + would need to be taken into account when calculating addressability
- + of a pool. */
- +
- +/* These typedefs are located at the start of this file, so that
- + they can be used in the prototypes there. This comment is to
- + remind readers of that fact so that the following structures
- + can be understood more easily.
- +
- + typedef struct minipool_node Mnode;
- + typedef struct minipool_fixup Mfix; */
- +
- +struct minipool_node
- +{
- + /* Doubly linked chain of entries. */
- + Mnode *next;
- + Mnode *prev;
- + /* The maximum offset into the code that this entry can be placed. While
- + pushing fixes for forward references, all entries are sorted in order of
- + increasing max_address. */
- + HOST_WIDE_INT max_address;
- + /* Similarly for an entry inserted for a backwards ref. */
- + HOST_WIDE_INT min_address;
- + /* The number of fixes referencing this entry. This can become zero if we
- + "unpush" an entry. In this case we ignore the entry when we come to
- + emit the code. */
- + int refcount;
- + /* The offset from the start of the minipool. */
- + HOST_WIDE_INT offset;
- + /* The value in table. */
- + rtx value;
- + /* The mode of value. */
- + enum machine_mode mode;
- + /* The size of the value. */
- + int fix_size;
- +};
- +
- +
- +struct minipool_fixup
- +{
- + Mfix *next;
- + rtx insn;
- + HOST_WIDE_INT address;
- + rtx *loc;
- + enum machine_mode mode;
- + int fix_size;
- + rtx value;
- + Mnode *minipool;
- + HOST_WIDE_INT forwards;
- + HOST_WIDE_INT backwards;
- +};
- +
- +
- +/* Fixes less than a word need padding out to a word boundary. */
- +#define MINIPOOL_FIX_SIZE(mode, value) \
- + (IS_FORCE_MINIPOOL(value) ? 0 : \
- + (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4))
- +
- +#define IS_FORCE_MINIPOOL(x) \
- + (GET_CODE(x) == UNSPEC && \
- + XINT(x, 1) == UNSPEC_FORCE_MINIPOOL)
- +
- +static Mnode *minipool_vector_head;
- +static Mnode *minipool_vector_tail;
- +
- +/* The linked list of all minipool fixes required for this function. */
- +Mfix *minipool_fix_head;
- +Mfix *minipool_fix_tail;
- +/* The fix entry for the current minipool, once it has been placed. */
- +Mfix *minipool_barrier;
- +
- +
- +/* Determines if INSN is the start of a jump table. Returns the end
- + of the TABLE or NULL_RTX. */
- +static rtx
- +is_jump_table (rtx insn)
- +{
- + rtx table;
- +
- + if (GET_CODE (insn) == JUMP_INSN
- + && JUMP_LABEL (insn) != NULL
- + && ((table = next_real_insn (JUMP_LABEL (insn)))
- + == next_real_insn (insn))
- + && table != NULL
- + && GET_CODE (table) == JUMP_INSN
- + && (GET_CODE (PATTERN (table)) == ADDR_VEC
- + || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
- + return table;
- +
- + return NULL_RTX;
- +}
- +
- +
- +static HOST_WIDE_INT
- +get_jump_table_size (rtx insn)
- +{
- + /* ADDR_VECs only take room if read-only data does into the text section. */
- + if (JUMP_TABLES_IN_TEXT_SECTION
- +#if !defined(READONLY_DATA_SECTION_ASM_OP)
- + || 1
- +#endif
- + )
- + {
- + rtx body = PATTERN (insn);
- + int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
- +
- + return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
- + }
- +
- + return 0;
- +}
- +
- +
- +/* Move a minipool fix MP from its current location to before MAX_MP.
- + If MAX_MP is NULL, then MP doesn't need moving, but the addressing
- + constraints may need updating. */
- +static Mnode *
- +move_minipool_fix_forward_ref (Mnode * mp, Mnode * max_mp,
- + HOST_WIDE_INT max_address)
- +{
- + /* This should never be true and the code below assumes these are
- + different. */
- + if (mp == max_mp)
- + abort ();
- +
- + if (max_mp == NULL)
- + {
- + if (max_address < mp->max_address)
- + mp->max_address = max_address;
- + }
- + else
- + {
- + if (max_address > max_mp->max_address - mp->fix_size)
- + mp->max_address = max_mp->max_address - mp->fix_size;
- + else
- + mp->max_address = max_address;
- +
- + /* Unlink MP from its current position. Since max_mp is non-null,
- + mp->prev must be non-null. */
- + mp->prev->next = mp->next;
- + if (mp->next != NULL)
- + mp->next->prev = mp->prev;
- + else
- + minipool_vector_tail = mp->prev;
- +
- + /* Re-insert it before MAX_MP. */
- + mp->next = max_mp;
- + mp->prev = max_mp->prev;
- + max_mp->prev = mp;
- +
- + if (mp->prev != NULL)
- + mp->prev->next = mp;
- + else
- + minipool_vector_head = mp;
- + }
- +
- + /* Save the new entry. */
- + max_mp = mp;
- +
- + /* Scan over the preceding entries and adjust their addresses as required.
- + */
- + while (mp->prev != NULL
- + && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
- + {
- + mp->prev->max_address = mp->max_address - mp->prev->fix_size;
- + mp = mp->prev;
- + }
- +
- + return max_mp;
- +}
- +
- +
- +/* Add a constant to the minipool for a forward reference. Returns the
- + node added or NULL if the constant will not fit in this pool. */
- +static Mnode *
- +add_minipool_forward_ref (Mfix * fix)
- +{
- + /* If set, max_mp is the first pool_entry that has a lower constraint than
- + the one we are trying to add. */
- + Mnode *max_mp = NULL;
- + HOST_WIDE_INT max_address = fix->address + fix->forwards;
- + Mnode *mp;
- +
- + /* If this fix's address is greater than the address of the first entry,
- + then we can't put the fix in this pool. We subtract the size of the
- + current fix to ensure that if the table is fully packed we still have
- + enough room to insert this value by suffling the other fixes forwards. */
- + if (minipool_vector_head &&
- + fix->address >= minipool_vector_head->max_address - fix->fix_size)
- + return NULL;
- +
- + /* Scan the pool to see if a constant with the same value has already been
- + added. While we are doing this, also note the location where we must
- + insert the constant if it doesn't already exist. */
- + for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
- + {
- + if (GET_CODE (fix->value) == GET_CODE (mp->value)
- + && fix->mode == mp->mode
- + && (GET_CODE (fix->value) != CODE_LABEL
- + || (CODE_LABEL_NUMBER (fix->value)
- + == CODE_LABEL_NUMBER (mp->value)))
- + && rtx_equal_p (fix->value, mp->value))
- + {
- + /* More than one fix references this entry. */
- + mp->refcount++;
- + return move_minipool_fix_forward_ref (mp, max_mp, max_address);
- + }
- +
- + /* Note the insertion point if necessary. */
- + if (max_mp == NULL && mp->max_address > max_address)
- + max_mp = mp;
- +
- + }
- +
- + /* The value is not currently in the minipool, so we need to create a new
- + entry for it. If MAX_MP is NULL, the entry will be put on the end of
- + the list since the placement is less constrained than any existing
- + entry. Otherwise, we insert the new fix before MAX_MP and, if
- + necessary, adjust the constraints on the other entries. */
- + mp = xmalloc (sizeof (*mp));
- + mp->fix_size = fix->fix_size;
- + mp->mode = fix->mode;
- + mp->value = fix->value;
- + mp->refcount = 1;
- + /* Not yet required for a backwards ref. */
- + mp->min_address = -65536;
- +
- + if (max_mp == NULL)
- + {
- + mp->max_address = max_address;
- + mp->next = NULL;
- + mp->prev = minipool_vector_tail;
- +
- + if (mp->prev == NULL)
- + {
- + minipool_vector_head = mp;
- + minipool_vector_label = gen_label_rtx ();
- + }
- + else
- + mp->prev->next = mp;
- +
- + minipool_vector_tail = mp;
- + }
- + else
- + {
- + if (max_address > max_mp->max_address - mp->fix_size)
- + mp->max_address = max_mp->max_address - mp->fix_size;
- + else
- + mp->max_address = max_address;
- +
- + mp->next = max_mp;
- + mp->prev = max_mp->prev;
- + max_mp->prev = mp;
- + if (mp->prev != NULL)
- + mp->prev->next = mp;
- + else
- + minipool_vector_head = mp;
- + }
- +
- + /* Save the new entry. */
- + max_mp = mp;
- +
- + /* Scan over the preceding entries and adjust their addresses as required.
- + */
- + while (mp->prev != NULL
- + && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
- + {
- + mp->prev->max_address = mp->max_address - mp->prev->fix_size;
- + mp = mp->prev;
- + }
- +
- + return max_mp;
- +}
- +
- +
- +static Mnode *
- +move_minipool_fix_backward_ref (Mnode * mp, Mnode * min_mp,
- + HOST_WIDE_INT min_address)
- +{
- + HOST_WIDE_INT offset;
- +
- + /* This should never be true, and the code below assumes these are
- + different. */
- + if (mp == min_mp)
- + abort ();
- +
- + if (min_mp == NULL)
- + {
- + if (min_address > mp->min_address)
- + mp->min_address = min_address;
- + }
- + else
- + {
- + /* We will adjust this below if it is too loose. */
- + mp->min_address = min_address;
- +
- + /* Unlink MP from its current position. Since min_mp is non-null,
- + mp->next must be non-null. */
- + mp->next->prev = mp->prev;
- + if (mp->prev != NULL)
- + mp->prev->next = mp->next;
- + else
- + minipool_vector_head = mp->next;
- +
- + /* Reinsert it after MIN_MP. */
- + mp->prev = min_mp;
- + mp->next = min_mp->next;
- + min_mp->next = mp;
- + if (mp->next != NULL)
- + mp->next->prev = mp;
- + else
- + minipool_vector_tail = mp;
- + }
- +
- + min_mp = mp;
- +
- + offset = 0;
- + for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
- + {
- + mp->offset = offset;
- + if (mp->refcount > 0)
- + offset += mp->fix_size;
- +
- + if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
- + mp->next->min_address = mp->min_address + mp->fix_size;
- + }
- +
- + return min_mp;
- +}
- +
- +
- +/* Add a constant to the minipool for a backward reference. Returns the
- + node added or NULL if the constant will not fit in this pool.
- +
- + Note that the code for insertion for a backwards reference can be
- + somewhat confusing because the calculated offsets for each fix do
- + not take into account the size of the pool (which is still under
- + construction. */
- +static Mnode *
- +add_minipool_backward_ref (Mfix * fix)
- +{
- + /* If set, min_mp is the last pool_entry that has a lower constraint than
- + the one we are trying to add. */
- + Mnode *min_mp = NULL;
- + /* This can be negative, since it is only a constraint. */
- + HOST_WIDE_INT min_address = fix->address - fix->backwards;
- + Mnode *mp;
- +
- + /* If we can't reach the current pool from this insn, or if we can't insert
- + this entry at the end of the pool without pushing other fixes out of
- + range, then we don't try. This ensures that we can't fail later on. */
- + if (min_address >= minipool_barrier->address
- + || (minipool_vector_tail->min_address + fix->fix_size
- + >= minipool_barrier->address))
- + return NULL;
- +
- + /* Scan the pool to see if a constant with the same value has already been
- + added. While we are doing this, also note the location where we must
- + insert the constant if it doesn't already exist. */
- + for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
- + {
- + if (GET_CODE (fix->value) == GET_CODE (mp->value)
- + && fix->mode == mp->mode
- + && (GET_CODE (fix->value) != CODE_LABEL
- + || (CODE_LABEL_NUMBER (fix->value)
- + == CODE_LABEL_NUMBER (mp->value)))
- + && rtx_equal_p (fix->value, mp->value)
- + /* Check that there is enough slack to move this entry to the end
- + of the table (this is conservative). */
- + && (mp->max_address
- + > (minipool_barrier->address
- + + minipool_vector_tail->offset
- + + minipool_vector_tail->fix_size)))
- + {
- + mp->refcount++;
- + return move_minipool_fix_backward_ref (mp, min_mp, min_address);
- + }
- +
- + if (min_mp != NULL)
- + mp->min_address += fix->fix_size;
- + else
- + {
- + /* Note the insertion point if necessary. */
- + if (mp->min_address < min_address)
- + {
- + min_mp = mp;
- + }
- + else if (mp->max_address
- + < minipool_barrier->address + mp->offset + fix->fix_size)
- + {
- + /* Inserting before this entry would push the fix beyond its
- + maximum address (which can happen if we have re-located a
- + forwards fix); force the new fix to come after it. */
- + min_mp = mp;
- + min_address = mp->min_address + fix->fix_size;
- + }
- + }
- + }
- +
- + /* We need to create a new entry. */
- + mp = xmalloc (sizeof (*mp));
- + mp->fix_size = fix->fix_size;
- + mp->mode = fix->mode;
- + mp->value = fix->value;
- + mp->refcount = 1;
- + mp->max_address = minipool_barrier->address + 65536;
- +
- + mp->min_address = min_address;
- +
- + if (min_mp == NULL)
- + {
- + mp->prev = NULL;
- + mp->next = minipool_vector_head;
- +
- + if (mp->next == NULL)
- + {
- + minipool_vector_tail = mp;
- + minipool_vector_label = gen_label_rtx ();
- + }
- + else
- + mp->next->prev = mp;
- +
- + minipool_vector_head = mp;
- + }
- + else
- + {
- + mp->next = min_mp->next;
- + mp->prev = min_mp;
- + min_mp->next = mp;
- +
- + if (mp->next != NULL)
- + mp->next->prev = mp;
- + else
- + minipool_vector_tail = mp;
- + }
- +
- + /* Save the new entry. */
- + min_mp = mp;
- +
- + if (mp->prev)
- + mp = mp->prev;
- + else
- + mp->offset = 0;
- +
- + /* Scan over the following entries and adjust their offsets. */
- + while (mp->next != NULL)
- + {
- + if (mp->next->min_address < mp->min_address + mp->fix_size)
- + mp->next->min_address = mp->min_address + mp->fix_size;
- +
- + if (mp->refcount)
- + mp->next->offset = mp->offset + mp->fix_size;
- + else
- + mp->next->offset = mp->offset;
- +
- + mp = mp->next;
- + }
- +
- + return min_mp;
- +}
- +
- +
- +static void
- +assign_minipool_offsets (Mfix * barrier)
- +{
- + HOST_WIDE_INT offset = 0;
- + Mnode *mp;
- +
- + minipool_barrier = barrier;
- +
- + for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
- + {
- + mp->offset = offset;
- +
- + if (mp->refcount > 0)
- + offset += mp->fix_size;
- + }
- +}
- +
- +
- +/* Print a symbolic form of X to the debug file, F. */
- +static void
- +avr32_print_value (FILE * f, rtx x)
- +{
- + switch (GET_CODE (x))
- + {
- + case CONST_INT:
- + fprintf (f, "0x%x", (int) INTVAL (x));
- + return;
- +
- + case CONST_DOUBLE:
- + fprintf (f, "<0x%lx,0x%lx>", (long) XWINT (x, 2), (long) XWINT (x, 3));
- + return;
- +
- + case CONST_VECTOR:
- + {
- + int i;
- +
- + fprintf (f, "<");
- + for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
- + {
- + fprintf (f, "0x%x", (int) INTVAL (CONST_VECTOR_ELT (x, i)));
- + if (i < (CONST_VECTOR_NUNITS (x) - 1))
- + fputc (',', f);
- + }
- + fprintf (f, ">");
- + }
- + return;
- +
- + case CONST_STRING:
- + fprintf (f, "\"%s\"", XSTR (x, 0));
- + return;
- +
- + case SYMBOL_REF:
- + fprintf (f, "`%s'", XSTR (x, 0));
- + return;
- +
- + case LABEL_REF:
- + fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
- + return;
- +
- + case CONST:
- + avr32_print_value (f, XEXP (x, 0));
- + return;
- +
- + case PLUS:
- + avr32_print_value (f, XEXP (x, 0));
- + fprintf (f, "+");
- + avr32_print_value (f, XEXP (x, 1));
- + return;
- +
- + case PC:
- + fprintf (f, "pc");
- + return;
- +
- + default:
- + fprintf (f, "????");
- + return;
- + }
- +}
- +
- +
- +int
- +is_minipool_label (rtx label)
- +{
- + minipool_labels *cur_mp_label = cfun->machine->minipool_label_head;
- +
- + if (GET_CODE (label) != CODE_LABEL)
- + return FALSE;
- +
- + while (cur_mp_label)
- + {
- + if (CODE_LABEL_NUMBER (label)
- + == CODE_LABEL_NUMBER (cur_mp_label->label))
- + return TRUE;
- + cur_mp_label = cur_mp_label->next;
- + }
- + return FALSE;
- +}
- +
- +
- +static void
- +new_minipool_label (rtx label)
- +{
- + if (!cfun->machine->minipool_label_head)
- + {
- + cfun->machine->minipool_label_head =
- + ggc_alloc (sizeof (minipool_labels));
- + cfun->machine->minipool_label_tail = cfun->machine->minipool_label_head;
- + cfun->machine->minipool_label_head->label = label;
- + cfun->machine->minipool_label_head->next = 0;
- + cfun->machine->minipool_label_head->prev = 0;
- + }
- + else
- + {
- + cfun->machine->minipool_label_tail->next =
- + ggc_alloc (sizeof (minipool_labels));
- + cfun->machine->minipool_label_tail->next->label = label;
- + cfun->machine->minipool_label_tail->next->next = 0;
- + cfun->machine->minipool_label_tail->next->prev =
- + cfun->machine->minipool_label_tail;
- + cfun->machine->minipool_label_tail =
- + cfun->machine->minipool_label_tail->next;
- + }
- +}
- +
- +
- +/* Output the literal table */
- +static void
- +dump_minipool (rtx scan)
- +{
- + Mnode *mp;
- + Mnode *nmp;
- +
- + if (dump_file)
- + fprintf (dump_file,
- + ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
- + INSN_UID (scan), (unsigned long) minipool_barrier->address, 4);
- +
- + scan = emit_insn_after (gen_consttable_start (), scan);
- + scan = emit_insn_after (gen_align_4 (), scan);
- + scan = emit_label_after (minipool_vector_label, scan);
- + new_minipool_label (minipool_vector_label);
- +
- + for (mp = minipool_vector_head; mp != NULL; mp = nmp)
- + {
- + if (mp->refcount > 0)
- + {
- + if (dump_file)
- + {
- + fprintf (dump_file,
- + ";; Offset %u, min %ld, max %ld ",
- + (unsigned) mp->offset, (unsigned long) mp->min_address,
- + (unsigned long) mp->max_address);
- + avr32_print_value (dump_file, mp->value);
- + fputc ('\n', dump_file);
- + }
- +
- + switch (mp->fix_size)
- + {
- +#ifdef HAVE_consttable_4
- + case 4:
- + scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
- + break;
- +
- +#endif
- +#ifdef HAVE_consttable_8
- + case 8:
- + scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
- + break;
- +
- +#endif
- +#ifdef HAVE_consttable_16
- + case 16:
- + scan = emit_insn_after (gen_consttable_16 (mp->value), scan);
- + break;
- +
- +#endif
- + case 0:
- + /* This can happen for force-minipool entries which just are
- + there to force the minipool to be generate. */
- + break;
- + default:
- + abort ();
- + break;
- + }
- + }
- +
- + nmp = mp->next;
- + free (mp);
- + }
- +
- + minipool_vector_head = minipool_vector_tail = NULL;
- + scan = emit_insn_after (gen_consttable_end (), scan);
- + scan = emit_barrier_after (scan);
- +}
- +
- +
- +/* Return the cost of forcibly inserting a barrier after INSN. */
- +static int
- +avr32_barrier_cost (rtx insn)
- +{
- + /* Basing the location of the pool on the loop depth is preferable, but at
- + the moment, the basic block information seems to be corrupt by this
- + stage of the compilation. */
- + int base_cost = 50;
- + rtx next = next_nonnote_insn (insn);
- +
- + if (next != NULL && GET_CODE (next) == CODE_LABEL)
- + base_cost -= 20;
- +
- + switch (GET_CODE (insn))
- + {
- + case CODE_LABEL:
- + /* It will always be better to place the table before the label, rather
- + than after it. */
- + return 50;
- +
- + case INSN:
- + case CALL_INSN:
- + return base_cost;
- +
- + case JUMP_INSN:
- + return base_cost - 10;
- +
- + default:
- + return base_cost + 10;
- + }
- +}
- +
- +
- +/* Find the best place in the insn stream in the range
- + (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
- + Create the barrier by inserting a jump and add a new fix entry for
- + it. */
- +static Mfix *
- +create_fix_barrier (Mfix * fix, HOST_WIDE_INT max_address)
- +{
- + HOST_WIDE_INT count = 0;
- + rtx barrier;
- + rtx from = fix->insn;
- + rtx selected = from;
- + int selected_cost;
- + HOST_WIDE_INT selected_address;
- + Mfix *new_fix;
- + HOST_WIDE_INT max_count = max_address - fix->address;
- + rtx label = gen_label_rtx ();
- +
- + selected_cost = avr32_barrier_cost (from);
- + selected_address = fix->address;
- +
- + while (from && count < max_count)
- + {
- + rtx tmp;
- + int new_cost;
- +
- + /* This code shouldn't have been called if there was a natural barrier
- + within range. */
- + if (GET_CODE (from) == BARRIER)
- + abort ();
- +
- + /* Count the length of this insn. */
- + count += get_attr_length (from);
- +
- + /* If there is a jump table, add its length. */
- + tmp = is_jump_table (from);
- + if (tmp != NULL)
- + {
- + count += get_jump_table_size (tmp);
- +
- + /* Jump tables aren't in a basic block, so base the cost on the
- + dispatch insn. If we select this location, we will still put
- + the pool after the table. */
- + new_cost = avr32_barrier_cost (from);
- +
- + if (count < max_count && new_cost <= selected_cost)
- + {
- + selected = tmp;
- + selected_cost = new_cost;
- + selected_address = fix->address + count;
- + }
- +
- + /* Continue after the dispatch table. */
- + from = NEXT_INSN (tmp);
- + continue;
- + }
- +
- + new_cost = avr32_barrier_cost (from);
- +
- + if (count < max_count && new_cost <= selected_cost)
- + {
- + selected = from;
- + selected_cost = new_cost;
- + selected_address = fix->address + count;
- + }
- +
- + from = NEXT_INSN (from);
- + }
- +
- + /* Create a new JUMP_INSN that branches around a barrier. */
- + from = emit_jump_insn_after (gen_jump (label), selected);
- + JUMP_LABEL (from) = label;
- + barrier = emit_barrier_after (from);
- + emit_label_after (label, barrier);
- +
- + /* Create a minipool barrier entry for the new barrier. */
- + new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (*new_fix));
- + new_fix->insn = barrier;
- + new_fix->address = selected_address;
- + new_fix->next = fix->next;
- + fix->next = new_fix;
- +
- + return new_fix;
- +}
- +
- +
- +/* Record that there is a natural barrier in the insn stream at
- + ADDRESS. */
- +static void
- +push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
- +{
- + Mfix *fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (*fix));
- +
- + fix->insn = insn;
- + fix->address = address;
- +
- + fix->next = NULL;
- + if (minipool_fix_head != NULL)
- + minipool_fix_tail->next = fix;
- + else
- + minipool_fix_head = fix;
- +
- + minipool_fix_tail = fix;
- +}
- +
- +
- +/* Record INSN, which will need fixing up to load a value from the
- + minipool. ADDRESS is the offset of the insn since the start of the
- + function; LOC is a pointer to the part of the insn which requires
- + fixing; VALUE is the constant that must be loaded, which is of type
- + MODE. */
- +static void
- +push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx * loc,
- + enum machine_mode mode, rtx value)
- +{
- + Mfix *fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (*fix));
- + rtx body = PATTERN (insn);
- +
- + fix->insn = insn;
- + fix->address = address;
- + fix->loc = loc;
- + fix->mode = mode;
- + fix->fix_size = MINIPOOL_FIX_SIZE (mode, value);
- + fix->value = value;
- +
- + if (GET_CODE (body) == PARALLEL)
- + {
- + /* Mcall : Ks16 << 2 */
- + fix->forwards = ((1 << 15) - 1) << 2;
- + fix->backwards = (1 << 15) << 2;
- + }
- + else if (GET_CODE (body) == SET
- + && GET_MODE_SIZE (GET_MODE (SET_DEST (body))) == 4)
- + {
- + if (optimize_size)
- + {
- + /* Lddpc : Ku7 << 2 */
- + fix->forwards = ((1 << 7) - 1) << 2;
- + fix->backwards = 0;
- + }
- + else
- + {
- + /* Ld.w : Ks16 */
- + fix->forwards = ((1 << 15) - 4);
- + fix->backwards = (1 << 15);
- + }
- + }
- + else if (GET_CODE (body) == SET
- + && GET_MODE_SIZE (GET_MODE (SET_DEST (body))) == 8)
- + {
- + /* Ld.d : Ks16 */
- + fix->forwards = ((1 << 15) - 4);
- + fix->backwards = (1 << 15);
- + }
- + else if (GET_CODE (body) == UNSPEC_VOLATILE
- + && XINT (body, 1) == VUNSPEC_MVRC)
- + {
- + /* Coprocessor load */
- + /* Ldc : Ku8 << 2 */
- + fix->forwards = ((1 << 8) - 1) << 2;
- + fix->backwards = 0;
- + }
- + else
- + {
- + /* Assume worst case which is lddpc insn. */
- + fix->forwards = ((1 << 7) - 1) << 2;
- + fix->backwards = 0;
- + }
- +
- + fix->minipool = NULL;
- +
- + /* If an insn doesn't have a range defined for it, then it isn't expecting
- + to be reworked by this code. Better to abort now than to generate duff
- + assembly code. */
- + if (fix->forwards == 0 && fix->backwards == 0)
- + abort ();
- +
- + if (dump_file)
- + {
- + fprintf (dump_file,
- + ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
- + GET_MODE_NAME (mode),
- + INSN_UID (insn), (unsigned long) address,
- + -1 * (long) fix->backwards, (long) fix->forwards);
- + avr32_print_value (dump_file, fix->value);
- + fprintf (dump_file, "\n");
- + }
- +
- + /* Add it to the chain of fixes. */
- + fix->next = NULL;
- +
- + if (minipool_fix_head != NULL)
- + minipool_fix_tail->next = fix;
- + else
- + minipool_fix_head = fix;
- +
- + minipool_fix_tail = fix;
- +}
- +
- +
- +/* Scan INSN and note any of its operands that need fixing.
- + If DO_PUSHES is false we do not actually push any of the fixups
- + needed. The function returns TRUE is any fixups were needed/pushed.
- + This is used by avr32_memory_load_p() which needs to know about loads
- + of constants that will be converted into minipool loads. */
- +static bool
- +note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
- +{
- + bool result = false;
- + int opno;
- +
- + extract_insn (insn);
- +
- + if (!constrain_operands (1))
- + fatal_insn_not_found (insn);
- +
- + if (recog_data.n_alternatives == 0)
- + return false;
- +
- + /* Fill in recog_op_alt with information about the constraints of this
- + insn. */
- + preprocess_constraints ();
- +
- + for (opno = 0; opno < recog_data.n_operands; opno++)
- + {
- + rtx op;
- +
- + /* Things we need to fix can only occur in inputs. */
- + if (recog_data.operand_type[opno] != OP_IN)
- + continue;
- +
- + op = recog_data.operand[opno];
- +
- + if (avr32_const_pool_ref_operand (op, GET_MODE (op)))
- + {
- + if (do_pushes)
- + {
- + rtx cop = avoid_constant_pool_reference (op);
- +
- + /* Casting the address of something to a mode narrower than a
- + word can cause avoid_constant_pool_reference() to return the
- + pool reference itself. That's no good to us here. Lets
- + just hope that we can use the constant pool value directly.
- + */
- + if (op == cop)
- + cop = get_pool_constant (XEXP (op, 0));
- +
- + push_minipool_fix (insn, address,
- + recog_data.operand_loc[opno],
- + recog_data.operand_mode[opno], cop);
- + }
- +
- + result = true;
- + }
- + else if (TARGET_HAS_ASM_ADDR_PSEUDOS
- + && avr32_address_operand (op, GET_MODE (op)))
- + {
- + /* Handle pseudo instructions using a direct address. These pseudo
- + instructions might need entries in the constant pool and we must
- + therefor create a constant pool for them, in case the
- + assembler/linker needs to insert entries. */
- + if (do_pushes)
- + {
- + /* Push a dummy constant pool entry so that the .cpool
- + directive should be inserted on the appropriate place in the
- + code even if there are no real constant pool entries. This
- + is used by the assembler and linker to know where to put
- + generated constant pool entries. */
- + push_minipool_fix (insn, address,
- + recog_data.operand_loc[opno],
- + recog_data.operand_mode[opno],
- + gen_rtx_UNSPEC (VOIDmode,
- + gen_rtvec (1, const0_rtx),
- + UNSPEC_FORCE_MINIPOOL));
- + result = true;
- + }
- + }
- + }
- + return result;
- +}
- +
- +
- +static int
- +avr32_insn_is_cast (rtx insn)
- +{
- +
- + if (NONJUMP_INSN_P (insn)
- + && GET_CODE (PATTERN (insn)) == SET
- + && (GET_CODE (SET_SRC (PATTERN (insn))) == ZERO_EXTEND
- + || GET_CODE (SET_SRC (PATTERN (insn))) == SIGN_EXTEND)
- + && REG_P (XEXP (SET_SRC (PATTERN (insn)), 0))
- + && REG_P (SET_DEST (PATTERN (insn))))
- + return true;
- + return false;
- +}
- +
- +
- +/* Replace all occurances of reg FROM with reg TO in X. */
- +rtx
- +avr32_replace_reg (rtx x, rtx from, rtx to)
- +{
- + int i, j;
- + const char *fmt;
- +
- + gcc_assert ( REG_P (from) && REG_P (to) );
- +
- + /* Allow this function to make replacements in EXPR_LISTs. */
- + if (x == 0)
- + return 0;
- +
- + if (rtx_equal_p (x, from))
- + return to;
- +
- + if (GET_CODE (x) == SUBREG)
- + {
- + rtx new = avr32_replace_reg (SUBREG_REG (x), from, to);
- +
- + if (GET_CODE (new) == CONST_INT)
- + {
- + x = simplify_subreg (GET_MODE (x), new,
- + GET_MODE (SUBREG_REG (x)),
- + SUBREG_BYTE (x));
- + gcc_assert (x);
- + }
- + else
- + SUBREG_REG (x) = new;
- +
- + return x;
- + }
- + else if (GET_CODE (x) == ZERO_EXTEND)
- + {
- + rtx new = avr32_replace_reg (XEXP (x, 0), from, to);
- +
- + if (GET_CODE (new) == CONST_INT)
- + {
- + x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
- + new, GET_MODE (XEXP (x, 0)));
- + gcc_assert (x);
- + }
- + else
- + XEXP (x, 0) = new;
- +
- + return x;
- + }
- +
- + fmt = GET_RTX_FORMAT (GET_CODE (x));
- + for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
- + {
- + if (fmt[i] == 'e')
- + XEXP (x, i) = avr32_replace_reg (XEXP (x, i), from, to);
- + else if (fmt[i] == 'E')
- + for (j = XVECLEN (x, i) - 1; j >= 0; j--)
- + XVECEXP (x, i, j) = avr32_replace_reg (XVECEXP (x, i, j), from, to);
- + }
- +
- + return x;
- +}
- +
- +
- +/* FIXME: The level of nesting in this function is way too deep. It needs to be
- + torn apart. */
- +static void
- +avr32_reorg_optimization (void)
- +{
- + rtx first = get_first_nonnote_insn ();
- + rtx insn;
- +
- + if (TARGET_MD_REORG_OPTIMIZATION && (optimize_size || (optimize > 0)))
- + {
- +
- + /* Scan through all insns looking for cast operations. */
- + if (dump_file)
- + {
- + fprintf (dump_file, ";; Deleting redundant cast operations:\n");
- + }
- + for (insn = first; insn; insn = NEXT_INSN (insn))
- + {
- + rtx reg, src_reg, scan;
- + enum machine_mode mode;
- + int unused_cast;
- + rtx label_ref;
- +
- + if (avr32_insn_is_cast (insn)
- + && (GET_MODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == QImode
- + || GET_MODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == HImode))
- + {
- + mode = GET_MODE (XEXP (SET_SRC (PATTERN (insn)), 0));
- + reg = SET_DEST (PATTERN (insn));
- + src_reg = XEXP (SET_SRC (PATTERN (insn)), 0);
- + }
- + else
- + {
- + continue;
- + }
- +
- + unused_cast = false;
- + label_ref = NULL_RTX;
- + for (scan = NEXT_INSN (insn); scan; scan = NEXT_INSN (scan))
- + {
- + /* Check if we have reached the destination of a simple
- + conditional jump which we have already scanned past. If so,
- + we can safely continue scanning. */
- + if (LABEL_P (scan) && label_ref != NULL_RTX)
- + {
- + if (CODE_LABEL_NUMBER (scan) ==
- + CODE_LABEL_NUMBER (XEXP (label_ref, 0)))
- + label_ref = NULL_RTX;
- + else
- + break;
- + }
- +
- + if (!INSN_P (scan))
- + continue;
- +
- + /* For conditional jumps we can manage to keep on scanning if
- + we meet the destination label later on before any new jump
- + insns occure. */
- + if (GET_CODE (scan) == JUMP_INSN)
- + {
- + if (any_condjump_p (scan) && label_ref == NULL_RTX)
- + label_ref = condjump_label (scan);
- + else
- + break;
- + }
- +
- + /* Check if we have a call and the register is used as an argument. */
- + if (CALL_P (scan)
- + && find_reg_fusage (scan, USE, reg) )
- + break;
- +
- + if (!reg_mentioned_p (reg, PATTERN (scan)))
- + continue;
- +
- + /* Check if casted register is used in this insn */
- + if ((regno_use_in (REGNO (reg), PATTERN (scan)) != NULL_RTX)
- + && (GET_MODE (regno_use_in (REGNO (reg), PATTERN (scan))) ==
- + GET_MODE (reg)))
- + {
- + /* If not used in the source to the set or in a memory
- + expression in the destiantion then the register is used
- + as a destination and is really dead. */
- + if (single_set (scan)
- + && GET_CODE (PATTERN (scan)) == SET
- + && REG_P (SET_DEST (PATTERN (scan)))
- + && !regno_use_in (REGNO (reg), SET_SRC (PATTERN (scan)))
- + && label_ref == NULL_RTX)
- + {
- + unused_cast = true;
- + }
- + break;
- + }
- +
- + /* Check if register is dead or set in this insn */
- + if (dead_or_set_p (scan, reg))
- + {
- + unused_cast = true;
- + break;
- + }
- + }
- +
- + /* Check if we have unresolved conditional jumps */
- + if (label_ref != NULL_RTX)
- + continue;
- +
- + if (unused_cast)
- + {
- + if (REGNO (reg) == REGNO (XEXP (SET_SRC (PATTERN (insn)), 0)))
- + {
- + /* One operand cast, safe to delete */
- + if (dump_file)
- + {
- + fprintf (dump_file,
- + ";; INSN %i removed, casted register %i value not used.\n",
- + INSN_UID (insn), REGNO (reg));
- + }
- + SET_INSN_DELETED (insn);
- + /* Force the instruction to be recognized again */
- + INSN_CODE (insn) = -1;
- + }
- + else
- + {
- + /* Two operand cast, which really could be substituted with
- + a move, if the source register is dead after the cast
- + insn and then the insn which sets the source register
- + could instead directly set the destination register for
- + the cast. As long as there are no insns in between which
- + uses the register. */
- + rtx link = NULL_RTX;
- + rtx set;
- + rtx src_reg = XEXP (SET_SRC (PATTERN (insn)), 0);
- + unused_cast = false;
- +
- + if (!find_reg_note (insn, REG_DEAD, src_reg))
- + continue;
- +
- + /* Search for the insn which sets the source register */
- + for (scan = PREV_INSN (insn);
- + scan && GET_CODE (scan) != CODE_LABEL;
- + scan = PREV_INSN (scan))
- + {
- + if (! INSN_P (scan))
- + continue;
- +
- + set = single_set (scan);
- + // Fix for bug #11763 : the following if condition
- + // has been modified and else part is included to
- + // set the link to NULL_RTX.
- + // if (set && rtx_equal_p (src_reg, SET_DEST (set)))
- + if (set && (REGNO(src_reg) == REGNO(SET_DEST(set))))
- + {
- + if (rtx_equal_p (src_reg, SET_DEST (set)))
- + {
- + link = scan;
- + break;
- + }
- + else
- + {
- + link = NULL_RTX;
- + break;
- + }
- + }
- + }
- +
- +
- + /* Found no link or link is a call insn where we can not
- + change the destination register */
- + if (link == NULL_RTX || CALL_P (link))
- + continue;
- +
- + /* Scan through all insn between link and insn */
- + for (scan = NEXT_INSN (link); scan; scan = NEXT_INSN (scan))
- + {
- + /* Don't try to trace forward past a CODE_LABEL if we
- + haven't seen INSN yet. Ordinarily, we will only
- + find the setting insn in LOG_LINKS if it is in the
- + same basic block. However, cross-jumping can insert
- + code labels in between the load and the call, and
- + can result in situations where a single call insn
- + may have two targets depending on where we came
- + from. */
- +
- + if (GET_CODE (scan) == CODE_LABEL)
- + break;
- +
- + if (!INSN_P (scan))
- + continue;
- +
- + /* Don't try to trace forward past a JUMP. To optimize
- + safely, we would have to check that all the
- + instructions at the jump destination did not use REG.
- + */
- +
- + if (GET_CODE (scan) == JUMP_INSN)
- + {
- + break;
- + }
- +
- + if (!reg_mentioned_p (src_reg, PATTERN (scan)))
- + continue;
- +
- + /* We have reached the cast insn */
- + if (scan == insn)
- + {
- + /* We can remove cast and replace the destination
- + register of the link insn with the destination
- + of the cast */
- + if (dump_file)
- + {
- + fprintf (dump_file,
- + ";; INSN %i removed, casted value unused. "
- + "Destination of removed cast operation: register %i, folded into INSN %i.\n",
- + INSN_UID (insn), REGNO (reg),
- + INSN_UID (link));
- + }
- + /* Update link insn */
- + SET_DEST (PATTERN (link)) =
- + gen_rtx_REG (mode, REGNO (reg));
- + /* Force the instruction to be recognized again */
- + INSN_CODE (link) = -1;
- +
- + /* Delete insn */
- + SET_INSN_DELETED (insn);
- + /* Force the instruction to be recognized again */
- + INSN_CODE (insn) = -1;
- + break;
- + }
- + }
- + }
- + }
- + }
- + }
- +
- + /* Disabled this optimization since it has a bug */
- + /* In the case where the data instruction the shifted insn gets folded
- + * into is a branch destination, this breaks, i.e.
- + *
- + * add r8, r10, r8 << 2
- + * 1:
- + * ld.w r11, r8[0]
- + * ...
- + * mov r8, sp
- + * rjmp 1b
- + *
- + * gets folded to:
- + *
- + * 1:
- + * ld.w r11, r10[r8 << 2]
- + * ...
- + * mov r8, sp
- + * rjmp 1b
- + *
- + * which is clearly wrong..
- + */
- + if (0 && TARGET_MD_REORG_OPTIMIZATION && (optimize_size || (optimize > 0)))
- + {
- +
- + /* Scan through all insns looking for shifted add operations */
- + if (dump_file)
- + {
- + fprintf (dump_file,
- + ";; Deleting redundant shifted add operations:\n");
- + }
- + for (insn = first; insn; insn = NEXT_INSN (insn))
- + {
- + rtx reg, mem_expr, scan, op0, op1;
- + int add_only_used_as_pointer;
- +
- + if (INSN_P (insn)
- + && GET_CODE (PATTERN (insn)) == SET
- + && GET_CODE (SET_SRC (PATTERN (insn))) == PLUS
- + && (GET_CODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == MULT
- + || GET_CODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == ASHIFT)
- + && GET_CODE (XEXP (XEXP (SET_SRC (PATTERN (insn)), 0), 1)) ==
- + CONST_INT && REG_P (SET_DEST (PATTERN (insn)))
- + && REG_P (XEXP (SET_SRC (PATTERN (insn)), 1))
- + && REG_P (XEXP (XEXP (SET_SRC (PATTERN (insn)), 0), 0)))
- + {
- + reg = SET_DEST (PATTERN (insn));
- + mem_expr = SET_SRC (PATTERN (insn));
- + op0 = XEXP (XEXP (mem_expr, 0), 0);
- + op1 = XEXP (mem_expr, 1);
- + }
- + else
- + {
- + continue;
- + }
- +
- + /* Scan forward the check if the result of the shifted add
- + operation is only used as an address in memory operations and
- + that the operands to the shifted add are not clobbered. */
- + add_only_used_as_pointer = false;
- + for (scan = NEXT_INSN (insn); scan; scan = NEXT_INSN (scan))
- + {
- + if (!INSN_P (scan))
- + continue;
- +
- + /* Don't try to trace forward past a JUMP or CALL. To optimize
- + safely, we would have to check that all the instructions at
- + the jump destination did not use REG. */
- +
- + if (GET_CODE (scan) == JUMP_INSN)
- + {
- + break;
- + }
- +
- + /* If used in a call insn then we cannot optimize it away */
- + if (CALL_P (scan) && find_regno_fusage (scan, USE, REGNO (reg)))
- + break;
- +
- + /* If any of the operands of the shifted add are clobbered we
- + cannot optimize the shifted adda away */
- + if ((reg_set_p (op0, scan) && (REGNO (op0) != REGNO (reg)))
- + || (reg_set_p (op1, scan) && (REGNO (op1) != REGNO (reg))))
- + break;
- +
- + if (!reg_mentioned_p (reg, PATTERN (scan)))
- + continue;
- +
- + /* If used any other place than as a pointer or as the
- + destination register we failed */
- + if (!(single_set (scan)
- + && GET_CODE (PATTERN (scan)) == SET
- + && ((MEM_P (SET_DEST (PATTERN (scan)))
- + && REG_P (XEXP (SET_DEST (PATTERN (scan)), 0))
- + && REGNO (XEXP (SET_DEST (PATTERN (scan)), 0)) == REGNO (reg))
- + || (MEM_P (SET_SRC (PATTERN (scan)))
- + && REG_P (XEXP (SET_SRC (PATTERN (scan)), 0))
- + && REGNO (XEXP
- + (SET_SRC (PATTERN (scan)), 0)) == REGNO (reg))))
- + && !(GET_CODE (PATTERN (scan)) == SET
- + && REG_P (SET_DEST (PATTERN (scan)))
- + && !regno_use_in (REGNO (reg),
- + SET_SRC (PATTERN (scan)))))
- + break;
- +
- + /* We cannot replace the pointer in TImode insns
- + as these has a differene addressing mode than the other
- + memory insns. */
- + if ( GET_MODE (SET_DEST (PATTERN (scan))) == TImode )
- + break;
- +
- + /* Check if register is dead or set in this insn */
- + if (dead_or_set_p (scan, reg))
- + {
- + add_only_used_as_pointer = true;
- + break;
- + }
- + }
- +
- + if (add_only_used_as_pointer)
- + {
- + /* Lets delete the add insn and replace all memory references
- + which uses the pointer with the full expression. */
- + if (dump_file)
- + {
- + fprintf (dump_file,
- + ";; Deleting INSN %i since address expression can be folded into all "
- + "memory references using this expression\n",
- + INSN_UID (insn));
- + }
- + SET_INSN_DELETED (insn);
- + /* Force the instruction to be recognized again */
- + INSN_CODE (insn) = -1;
- +
- + for (scan = NEXT_INSN (insn); scan; scan = NEXT_INSN (scan))
- + {
- + if (!INSN_P (scan))
- + continue;
- +
- + if (!reg_mentioned_p (reg, PATTERN (scan)))
- + continue;
- +
- + /* If used any other place than as a pointer or as the
- + destination register we failed */
- + if ((single_set (scan)
- + && GET_CODE (PATTERN (scan)) == SET
- + && ((MEM_P (SET_DEST (PATTERN (scan)))
- + && REG_P (XEXP (SET_DEST (PATTERN (scan)), 0))
- + && REGNO (XEXP (SET_DEST (PATTERN (scan)), 0)) ==
- + REGNO (reg)) || (MEM_P (SET_SRC (PATTERN (scan)))
- + &&
- + REG_P (XEXP
- + (SET_SRC (PATTERN (scan)),
- + 0))
- + &&
- + REGNO (XEXP
- + (SET_SRC (PATTERN (scan)),
- + 0)) == REGNO (reg)))))
- + {
- + if (dump_file)
- + {
- + fprintf (dump_file,
- + ";; Register %i replaced by indexed address in INSN %i\n",
- + REGNO (reg), INSN_UID (scan));
- + }
- + if (MEM_P (SET_DEST (PATTERN (scan))))
- + XEXP (SET_DEST (PATTERN (scan)), 0) = mem_expr;
- + else
- + XEXP (SET_SRC (PATTERN (scan)), 0) = mem_expr;
- + }
- +
- + /* Check if register is dead or set in this insn */
- + if (dead_or_set_p (scan, reg))
- + {
- + break;
- + }
- +
- + }
- + }
- + }
- + }
- +
- +
- + if (TARGET_MD_REORG_OPTIMIZATION && (optimize_size || (optimize > 0)))
- + {
- +
- + /* Scan through all insns looking for conditional register to
- + register move operations */
- + if (dump_file)
- + {
- + fprintf (dump_file,
- + ";; Folding redundant conditional move operations:\n");
- + }
- + for (insn = first; insn; insn = next_nonnote_insn (insn))
- + {
- + rtx src_reg, dst_reg, scan, test;
- +
- + if (INSN_P (insn)
- + && GET_CODE (PATTERN (insn)) == COND_EXEC
- + && GET_CODE (COND_EXEC_CODE (PATTERN (insn))) == SET
- + && REG_P (SET_SRC (COND_EXEC_CODE (PATTERN (insn))))
- + && REG_P (SET_DEST (COND_EXEC_CODE (PATTERN (insn))))
- + && find_reg_note (insn, REG_DEAD, SET_SRC (COND_EXEC_CODE (PATTERN (insn)))))
- + {
- + src_reg = SET_SRC (COND_EXEC_CODE (PATTERN (insn)));
- + dst_reg = SET_DEST (COND_EXEC_CODE (PATTERN (insn)));
- + test = COND_EXEC_TEST (PATTERN (insn));
- + }
- + else
- + {
- + continue;
- + }
- +
- + /* Scan backward through the rest of insns in this if-then or if-else
- + block and check if we can fold the move into another of the conditional
- + insns in the same block. */
- + scan = prev_nonnote_insn (insn);
- + while (INSN_P (scan)
- + && GET_CODE (PATTERN (scan)) == COND_EXEC
- + && rtx_equal_p (COND_EXEC_TEST (PATTERN (scan)), test))
- + {
- + rtx pattern = COND_EXEC_CODE (PATTERN (scan));
- + if ( GET_CODE (pattern) == PARALLEL )
- + pattern = XVECEXP (pattern, 0, 0);
- +
- + if ( reg_set_p (src_reg, pattern) )
- + {
- + /* Fold in the destination register for the cond. move
- + into this insn. */
- + SET_DEST (pattern) = dst_reg;
- + if (dump_file)
- + {
- + fprintf (dump_file,
- + ";; Deleting INSN %i since this operation can be folded into INSN %i\n",
- + INSN_UID (insn), INSN_UID (scan));
- + }
- +
- + /* Scan and check if any of the insns in between uses the src_reg. We
- + must then replace it with the dst_reg. */
- + while ( (scan = next_nonnote_insn (scan)) != insn ){
- + avr32_replace_reg (scan, src_reg, dst_reg);
- + }
- + /* Delete the insn. */
- + SET_INSN_DELETED (insn);
- +
- + /* Force the instruction to be recognized again */
- + INSN_CODE (insn) = -1;
- + break;
- + }
- +
- + /* If the destination register is used but not set in this insn
- + we cannot fold. */
- + if ( reg_mentioned_p (dst_reg, pattern) )
- + break;
- +
- + scan = prev_nonnote_insn (scan);
- + }
- + }
- + }
- +
- +}
- +
- +
- +/* Exported to toplev.c.
- +
- + Do a final pass over the function, just before delayed branch
- + scheduling. */
- +static void
- +avr32_reorg (void)
- +{
- + rtx insn;
- + HOST_WIDE_INT address = 0;
- + Mfix *fix;
- +
- + minipool_fix_head = minipool_fix_tail = NULL;
- +
- + /* The first insn must always be a note, or the code below won't scan it
- + properly. */
- + insn = get_insns ();
- + if (GET_CODE (insn) != NOTE)
- + abort ();
- +
- + /* Scan all the insns and record the operands that will need fixing. */
- + for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
- + {
- + if (GET_CODE (insn) == BARRIER)
- + push_minipool_barrier (insn, address);
- + else if (INSN_P (insn))
- + {
- + rtx table;
- +
- + note_invalid_constants (insn, address, true);
- + address += get_attr_length (insn);
- +
- + /* If the insn is a vector jump, add the size of the table and skip
- + the table. */
- + if ((table = is_jump_table (insn)) != NULL)
- + {
- + address += get_jump_table_size (table);
- + insn = table;
- + }
- + }
- + }
- +
- + fix = minipool_fix_head;
- +
- + /* Now scan the fixups and perform the required changes. */
- + while (fix)
- + {
- + Mfix *ftmp;
- + Mfix *fdel;
- + Mfix *last_added_fix;
- + Mfix *last_barrier = NULL;
- + Mfix *this_fix;
- +
- + /* Skip any further barriers before the next fix. */
- + while (fix && GET_CODE (fix->insn) == BARRIER)
- + fix = fix->next;
- +
- + /* No more fixes. */
- + if (fix == NULL)
- + break;
- +
- + last_added_fix = NULL;
- +
- + for (ftmp = fix; ftmp; ftmp = ftmp->next)
- + {
- + if (GET_CODE (ftmp->insn) == BARRIER)
- + {
- + if (ftmp->address >= minipool_vector_head->max_address)
- + break;
- +
- + last_barrier = ftmp;
- + }
- + else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
- + break;
- +
- + last_added_fix = ftmp; /* Keep track of the last fix added.
- + */
- + }
- +
- + /* If we found a barrier, drop back to that; any fixes that we could
- + have reached but come after the barrier will now go in the next
- + mini-pool. */
- + if (last_barrier != NULL)
- + {
- + /* Reduce the refcount for those fixes that won't go into this pool
- + after all. */
- + for (fdel = last_barrier->next;
- + fdel && fdel != ftmp; fdel = fdel->next)
- + {
- + fdel->minipool->refcount--;
- + fdel->minipool = NULL;
- + }
- +
- + ftmp = last_barrier;
- + }
- + else
- + {
- + /* ftmp is first fix that we can't fit into this pool and there no
- + natural barriers that we could use. Insert a new barrier in the
- + code somewhere between the previous fix and this one, and
- + arrange to jump around it. */
- + HOST_WIDE_INT max_address;
- +
- + /* The last item on the list of fixes must be a barrier, so we can
- + never run off the end of the list of fixes without last_barrier
- + being set. */
- + if (ftmp == NULL)
- + abort ();
- +
- + max_address = minipool_vector_head->max_address;
- + /* Check that there isn't another fix that is in range that we
- + couldn't fit into this pool because the pool was already too
- + large: we need to put the pool before such an instruction. */
- + if (ftmp->address < max_address)
- + max_address = ftmp->address;
- +
- + last_barrier = create_fix_barrier (last_added_fix, max_address);
- + }
- +
- + assign_minipool_offsets (last_barrier);
- +
- + while (ftmp)
- + {
- + if (GET_CODE (ftmp->insn) != BARRIER
- + && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
- + == NULL))
- + break;
- +
- + ftmp = ftmp->next;
- + }
- +
- + /* Scan over the fixes we have identified for this pool, fixing them up
- + and adding the constants to the pool itself. */
- + for (this_fix = fix; this_fix && ftmp != this_fix;
- + this_fix = this_fix->next)
- + if (GET_CODE (this_fix->insn) != BARRIER
- + /* Do nothing for entries present just to force the insertion of
- + a minipool. */
- + && !IS_FORCE_MINIPOOL (this_fix->value))
- + {
- + rtx addr = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
- + minipool_vector_label),
- + this_fix->minipool->offset);
- + *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
- + }
- +
- + dump_minipool (last_barrier->insn);
- + fix = ftmp;
- + }
- +
- + /* Free the minipool memory. */
- + obstack_free (&minipool_obstack, minipool_startobj);
- +
- + avr32_reorg_optimization ();
- +}
- +
- +
- +/* Hook for doing some final scanning of instructions. Does nothing yet...*/
- +void
- +avr32_final_prescan_insn (rtx insn ATTRIBUTE_UNUSED,
- + rtx * opvec ATTRIBUTE_UNUSED,
- + int noperands ATTRIBUTE_UNUSED)
- +{
- + return;
- +}
- +
- +
- +/* Function for changing the condition on the next instruction,
- + should be used when emmiting compare instructions and
- + the condition of the next instruction needs to change.
- +*/
- +int
- +set_next_insn_cond (rtx cur_insn, rtx new_cond)
- +{
- + rtx next_insn = next_nonnote_insn (cur_insn);
- + if ((next_insn != NULL_RTX)
- + && (INSN_P (next_insn)))
- + {
- + if ((GET_CODE (PATTERN (next_insn)) == SET)
- + && (GET_CODE (SET_SRC (PATTERN (next_insn))) == IF_THEN_ELSE))
- + {
- + /* Branch instructions */
- + XEXP (SET_SRC (PATTERN (next_insn)), 0) = new_cond;
- + /* Force the instruction to be recognized again */
- + INSN_CODE (next_insn) = -1;
- + return TRUE;
- + }
- + else if ((GET_CODE (PATTERN (next_insn)) == SET)
- + && avr32_comparison_operator (SET_SRC (PATTERN (next_insn)),
- + GET_MODE (SET_SRC (PATTERN (next_insn)))))
- + {
- + /* scc with no compare */
- + SET_SRC (PATTERN (next_insn)) = new_cond;
- + /* Force the instruction to be recognized again */
- + INSN_CODE (next_insn) = -1;
- + return TRUE;
- + }
- + else if (GET_CODE (PATTERN (next_insn)) == COND_EXEC)
- + {
- + if ( GET_CODE (new_cond) == UNSPEC )
- + {
- + COND_EXEC_TEST (PATTERN (next_insn)) =
- + gen_rtx_UNSPEC (CCmode,
- + gen_rtvec (2,
- + XEXP (COND_EXEC_TEST (PATTERN (next_insn)), 0),
- + XEXP (COND_EXEC_TEST (PATTERN (next_insn)), 1)),
- + XINT (new_cond, 1));
- + }
- + else
- + {
- + PUT_CODE(COND_EXEC_TEST (PATTERN (next_insn)), GET_CODE(new_cond));
- + }
- + }
- + }
- +
- + return FALSE;
- +}
- +
- +
- +/* Function for obtaining the condition for the next instruction after cur_insn.
- +*/
- +rtx
- +get_next_insn_cond (rtx cur_insn)
- +{
- + rtx next_insn = next_nonnote_insn (cur_insn);
- + rtx cond = NULL_RTX;
- + if (next_insn != NULL_RTX
- + && INSN_P (next_insn))
- + {
- + if ((GET_CODE (PATTERN (next_insn)) == SET)
- + && (GET_CODE (SET_SRC (PATTERN (next_insn))) == IF_THEN_ELSE))
- + {
- + /* Branch and cond if then else instructions */
- + cond = XEXP (SET_SRC (PATTERN (next_insn)), 0);
- + }
- + else if ((GET_CODE (PATTERN (next_insn)) == SET)
- + && avr32_comparison_operator (SET_SRC (PATTERN (next_insn)),
- + GET_MODE (SET_SRC (PATTERN (next_insn)))))
- + {
- + /* scc with no compare */
- + cond = SET_SRC (PATTERN (next_insn));
- + }
- + else if (GET_CODE (PATTERN (next_insn)) == COND_EXEC)
- + {
- + cond = COND_EXEC_TEST (PATTERN (next_insn));
- + }
- + }
- + return cond;
- +}
- +
- +
- +/* Check if the next insn is a conditional insn that will emit a compare
- + for itself.
- +*/
- +rtx
- +next_insn_emits_cmp (rtx cur_insn)
- +{
- + rtx next_insn = next_nonnote_insn (cur_insn);
- + rtx cond = NULL_RTX;
- + if (next_insn != NULL_RTX
- + && INSN_P (next_insn))
- + {
- + if ( ((GET_CODE (PATTERN (next_insn)) == SET)
- + && (GET_CODE (SET_SRC (PATTERN (next_insn))) == IF_THEN_ELSE)
- + && (XEXP (XEXP (SET_SRC (PATTERN (next_insn)), 0),0) != cc0_rtx))
- + || GET_CODE (PATTERN (next_insn)) == COND_EXEC )
- + return TRUE;
- + }
- + return FALSE;
- +}
- +
- +
- +rtx
- +avr32_output_cmp (rtx cond, enum machine_mode mode, rtx op0, rtx op1)
- +{
- +
- + rtx new_cond = NULL_RTX;
- + rtx ops[2];
- + rtx compare_pattern;
- + ops[0] = op0;
- + ops[1] = op1;
- +
- + if ( GET_CODE (op0) == AND )
- + compare_pattern = op0;
- + else
- + compare_pattern = gen_rtx_COMPARE (mode, op0, op1);
- +
- + new_cond = is_compare_redundant (compare_pattern, cond);
- +
- + if (new_cond != NULL_RTX)
- + return new_cond;
- +
- + /* Check if we are inserting a bit-load instead of a compare. */
- + if ( GET_CODE (op0) == AND )
- + {
- + ops[0] = XEXP (op0, 0);
- + ops[1] = XEXP (op0, 1);
- + output_asm_insn ("bld\t%0, %p1", ops);
- + return cond;
- + }
- +
- + /* Insert compare */
- + switch (mode)
- + {
- + case QImode:
- + output_asm_insn ("cp.b\t%0, %1", ops);
- + break;
- + case HImode:
- + output_asm_insn ("cp.h\t%0, %1", ops);
- + break;
- + case SImode:
- + output_asm_insn ("cp.w\t%0, %1", ops);
- + break;
- + case DImode:
- + if (GET_CODE (op1) != REG)
- + output_asm_insn ("cp.w\t%0, %1\ncpc\t%m0", ops);
- + else
- + output_asm_insn ("cp.w\t%0, %1\ncpc\t%m0, %m1", ops);
- + break;
- + default:
- + internal_error ("Unknown comparison mode");
- + break;
- + }
- +
- + return cond;
- +}
- +
- +
- +int
- +avr32_load_multiple_operation (rtx op,
- + enum machine_mode mode ATTRIBUTE_UNUSED)
- +{
- + int count = XVECLEN (op, 0);
- + unsigned int dest_regno;
- + rtx src_addr;
- + rtx elt;
- + int i = 1, base = 0;
- +
- + if (count <= 1 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
- + return 0;
- +
- + /* Check to see if this might be a write-back. */
- + if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
- + {
- + i++;
- + base = 1;
- +
- + /* Now check it more carefully. */
- + if (GET_CODE (SET_DEST (elt)) != REG
- + || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
- + || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
- + || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 1) * 4)
- + return 0;
- + }
- +
- + /* Perform a quick check so we don't blow up below. */
- + if (count <= 1
- + || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
- + || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != REG
- + || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != UNSPEC)
- + return 0;
- +
- + dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, i - 1)));
- + src_addr = XEXP (SET_SRC (XVECEXP (op, 0, i - 1)), 0);
- +
- + for (; i < count; i++)
- + {
- + elt = XVECEXP (op, 0, i);
- +
- + if (GET_CODE (elt) != SET
- + || GET_CODE (SET_DEST (elt)) != REG
- + || GET_MODE (SET_DEST (elt)) != SImode
- + || GET_CODE (SET_SRC (elt)) != UNSPEC)
- + return 0;
- + }
- +
- + return 1;
- +}
- +
- +
- +int
- +avr32_store_multiple_operation (rtx op,
- + enum machine_mode mode ATTRIBUTE_UNUSED)
- +{
- + int count = XVECLEN (op, 0);
- + int src_regno;
- + rtx dest_addr;
- + rtx elt;
- + int i = 1;
- +
- + if (count <= 1 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
- + return 0;
- +
- + /* Perform a quick check so we don't blow up below. */
- + if (count <= i
- + || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
- + || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != MEM
- + || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != UNSPEC)
- + return 0;
- +
- + src_regno = REGNO (SET_SRC (XVECEXP (op, 0, i - 1)));
- + dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, i - 1)), 0);
- +
- + for (; i < count; i++)
- + {
- + elt = XVECEXP (op, 0, i);
- +
- + if (GET_CODE (elt) != SET
- + || GET_CODE (SET_DEST (elt)) != MEM
- + || GET_MODE (SET_DEST (elt)) != SImode
- + || GET_CODE (SET_SRC (elt)) != UNSPEC)
- + return 0;
- + }
- +
- + return 1;
- +}
- +
- +
- +int
- +avr32_valid_macmac_bypass (rtx insn_out, rtx insn_in)
- +{
- + /* Check if they use the same accumulator */
- + if (rtx_equal_p
- + (SET_DEST (PATTERN (insn_out)), SET_DEST (PATTERN (insn_in))))
- + {
- + return TRUE;
- + }
- +
- + return FALSE;
- +}
- +
- +
- +int
- +avr32_valid_mulmac_bypass (rtx insn_out, rtx insn_in)
- +{
- + /*
- + Check if the mul instruction produces the accumulator for the mac
- + instruction. */
- + if (rtx_equal_p
- + (SET_DEST (PATTERN (insn_out)), SET_DEST (PATTERN (insn_in))))
- + {
- + return TRUE;
- + }
- + return FALSE;
- +}
- +
- +
- +int
- +avr32_store_bypass (rtx insn_out, rtx insn_in)
- +{
- + /* Only valid bypass if the output result is used as an src in the store
- + instruction, NOT if used as a pointer or base. */
- + if (rtx_equal_p
- + (SET_DEST (PATTERN (insn_out)), SET_SRC (PATTERN (insn_in))))
- + {
- + return TRUE;
- + }
- +
- + return FALSE;
- +}
- +
- +
- +int
- +avr32_mul_waw_bypass (rtx insn_out, rtx insn_in)
- +{
- + /* Check if the register holding the result from the mul instruction is
- + used as a result register in the input instruction. */
- + if (rtx_equal_p
- + (SET_DEST (PATTERN (insn_out)), SET_DEST (PATTERN (insn_in))))
- + {
- + return TRUE;
- + }
- +
- + return FALSE;
- +}
- +
- +
- +int
- +avr32_valid_load_double_bypass (rtx insn_out, rtx insn_in)
- +{
- + /* Check if the first loaded word in insn_out is used in insn_in. */
- + rtx dst_reg;
- + rtx second_loaded_reg;
- +
- + /* If this is a double alu operation then the bypass is not valid */
- + if ((get_attr_type (insn_in) == TYPE_ALU
- + || get_attr_type (insn_in) == TYPE_ALU2)
- + && (GET_MODE_SIZE (GET_MODE (SET_DEST (PATTERN (insn_out)))) > 4))
- + return FALSE;
- +
- + /* Get the destination register in the load */
- + if (!REG_P (SET_DEST (PATTERN (insn_out))))
- + return FALSE;
- +
- + dst_reg = SET_DEST (PATTERN (insn_out));
- + second_loaded_reg = gen_rtx_REG (SImode, REGNO (dst_reg) + 1);
- +
- + if (!reg_mentioned_p (second_loaded_reg, PATTERN (insn_in)))
- + return TRUE;
- +
- + return FALSE;
- +}
- +
- +
- +int
- +avr32_valid_load_quad_bypass (rtx insn_out, rtx insn_in)
- +{
- + /*
- + Check if the two first loaded word in insn_out are used in insn_in. */
- + rtx dst_reg;
- + rtx third_loaded_reg, fourth_loaded_reg;
- +
- + /* Get the destination register in the load */
- + if (!REG_P (SET_DEST (PATTERN (insn_out))))
- + return FALSE;
- +
- + dst_reg = SET_DEST (PATTERN (insn_out));
- + third_loaded_reg = gen_rtx_REG (SImode, REGNO (dst_reg) + 2);
- + fourth_loaded_reg = gen_rtx_REG (SImode, REGNO (dst_reg) + 3);
- +
- + if (!reg_mentioned_p (third_loaded_reg, PATTERN (insn_in))
- + && !reg_mentioned_p (fourth_loaded_reg, PATTERN (insn_in)))
- + {
- + return TRUE;
- + }
- +
- + return FALSE;
- +}
- +
- +
- +rtx
- +avr32_ifcvt_modify_test (ce_if_block_t *ce_info, rtx test )
- +{
- + rtx branch_insn;
- + rtx cmp_test;
- + rtx compare_op0;
- + rtx compare_op1;
- +
- +
- + if ( !ce_info
- + || test == NULL_RTX
- + || !reg_mentioned_p (cc0_rtx, test))
- + return test;
- +
- + branch_insn = BB_END (ce_info->test_bb);
- + cmp_test = PATTERN(prev_nonnote_insn (branch_insn));
- +
- + if (GET_CODE(cmp_test) != SET
- + || !CC0_P(XEXP(cmp_test, 0)) )
- + return cmp_test;
- +
- + if ( GET_CODE(SET_SRC(cmp_test)) == COMPARE ){
- + compare_op0 = XEXP(SET_SRC(cmp_test), 0);
- + compare_op1 = XEXP(SET_SRC(cmp_test), 1);
- + } else {
- + compare_op0 = SET_SRC(cmp_test);
- + compare_op1 = const0_rtx;
- + }
- +
- + return gen_rtx_fmt_ee (GET_CODE(test), GET_MODE (compare_op0),
- + compare_op0, compare_op1);
- +}
- +
- +
- +rtx
- +avr32_ifcvt_modify_insn (ce_if_block_t *ce_info, rtx pattern, rtx insn,
- + int *num_true_changes)
- +{
- + rtx test = COND_EXEC_TEST(pattern);
- + rtx op = COND_EXEC_CODE(pattern);
- + rtx cmp_insn;
- + rtx cond_exec_insn;
- + int inputs_set_outside_ifblock = 1;
- + basic_block current_bb = BLOCK_FOR_INSN (insn);
- + rtx bb_insn ;
- + enum machine_mode mode = GET_MODE (XEXP (op, 0));
- +
- + if (CC0_P(XEXP(test, 0)))
- + test = avr32_ifcvt_modify_test (ce_info,
- + test );
- +
- + /* We do not support multiple tests. */
- + if ( ce_info
- + && ce_info->num_multiple_test_blocks > 0 )
- + return NULL_RTX;
- +
- + pattern = gen_rtx_COND_EXEC (VOIDmode, test, op);
- +
- + if ( !reload_completed )
- + {
- + rtx start;
- + int num_insns;
- + int max_insns = MAX_CONDITIONAL_EXECUTE;
- +
- + if ( !ce_info )
- + return op;
- +
- + /* Check if the insn is not suitable for conditional
- + execution. */
- + start_sequence ();
- + cond_exec_insn = emit_insn (pattern);
- + if ( recog_memoized (cond_exec_insn) < 0
- + && can_create_pseudo_p () )
- + {
- + /* Insn is not suitable for conditional execution, try
- + to fix it up by using an extra scratch register or
- + by pulling the operation outside the if-then-else
- + and then emiting a conditional move inside the if-then-else. */
- + end_sequence ();
- + if ( GET_CODE (op) != SET
- + || !REG_P (SET_DEST (op))
- + || GET_CODE (SET_SRC (op)) == IF_THEN_ELSE
- + || GET_MODE_SIZE (mode) > UNITS_PER_WORD )
- + return NULL_RTX;
- +
- + /* Check if any of the input operands to the insn is set inside the
- + current block. */
- + if ( current_bb->index == ce_info->then_bb->index )
- + start = PREV_INSN (BB_HEAD (ce_info->then_bb));
- + else
- + start = PREV_INSN (BB_HEAD (ce_info->else_bb));
- +
- +
- + for ( bb_insn = next_nonnote_insn (start); bb_insn != insn; bb_insn = next_nonnote_insn (bb_insn) )
- + {
- + rtx set = single_set (bb_insn);
- +
- + if ( set && reg_mentioned_p (SET_DEST (set), SET_SRC (op)))
- + {
- + inputs_set_outside_ifblock = 0;
- + break;
- + }
- + }
- +
- + cmp_insn = prev_nonnote_insn (BB_END (ce_info->test_bb));
- +
- +
- + /* Check if we can insert more insns. */
- + num_insns = ( ce_info->num_then_insns +
- + ce_info->num_else_insns +
- + ce_info->num_cond_clobber_insns +
- + ce_info->num_extra_move_insns );
- +
- + if ( ce_info->num_else_insns != 0 )
- + max_insns *=2;
- +
- + if ( num_insns >= max_insns )
- + return NULL_RTX;
- +
- + /* Check if we have an instruction which might be converted to
- + conditional form if we give it a scratch register to clobber. */
- + {
- + rtx clobber_insn;
- + rtx scratch_reg = gen_reg_rtx (mode);
- + rtx new_pattern = copy_rtx (pattern);
- + rtx set_src = SET_SRC (COND_EXEC_CODE (new_pattern));
- +
- + rtx clobber = gen_rtx_CLOBBER (mode, scratch_reg);
- + rtx vec[2] = { COND_EXEC_CODE (new_pattern), clobber };
- + COND_EXEC_CODE (new_pattern) = gen_rtx_PARALLEL (mode, gen_rtvec_v (2, vec));
- +
- + start_sequence ();
- + clobber_insn = emit_insn (new_pattern);
- +
- + if ( recog_memoized (clobber_insn) >= 0
- + && ( ( GET_RTX_LENGTH (GET_CODE (set_src)) == 2
- + && CONST_INT_P (XEXP (set_src, 1))
- + && avr32_const_ok_for_constraint_p (INTVAL (XEXP (set_src, 1)), 'K', "Ks08") )
- + || !ce_info->else_bb
- + || current_bb->index == ce_info->else_bb->index ))
- + {
- + end_sequence ();
- + /* Force the insn to be recognized again. */
- + INSN_CODE (insn) = -1;
- +
- + /* If this is the first change in this IF-block then
- + signal that we have made a change. */
- + if ( ce_info->num_cond_clobber_insns == 0
- + && ce_info->num_extra_move_insns == 0 )
- + *num_true_changes += 1;
- +
- + ce_info->num_cond_clobber_insns++;
- +
- + if (dump_file)
- + fprintf (dump_file,
- + "\nReplacing INSN %d with an insn using a scratch register for later ifcvt passes...\n",
- + INSN_UID (insn));
- +
- + return COND_EXEC_CODE (new_pattern);
- + }
- + end_sequence ();
- + }
- +
- + if ( inputs_set_outside_ifblock )
- + {
- + /* Check if the insn before the cmp is an and which used
- + together with the cmp can be optimized into a bld. If
- + so then we should try to put the insn before the and
- + so that we can catch the bld peephole. */
- + rtx set;
- + rtx insn_before_cmp_insn = prev_nonnote_insn (cmp_insn);
- + if (insn_before_cmp_insn
- + && (set = single_set (insn_before_cmp_insn))
- + && GET_CODE (SET_SRC (set)) == AND
- + && one_bit_set_operand (XEXP (SET_SRC (set), 1), SImode)
- + /* Also make sure that the insn does not set any
- + of the input operands to the insn we are pulling out. */
- + && !reg_mentioned_p (SET_DEST (set), SET_SRC (op)) )
- + cmp_insn = prev_nonnote_insn (cmp_insn);
- +
- + /* We can try to put the operation outside the if-then-else
- + blocks and insert a move. */
- + if ( !insn_invalid_p (insn)
- + /* Do not allow conditional insns to be moved outside the
- + if-then-else. */
- + && !reg_mentioned_p (cc0_rtx, insn)
- + /* We cannot move memory loads outside of the if-then-else
- + since the memory access should not be perfomed if the
- + condition is not met. */
- + && !mem_mentioned_p (SET_SRC (op)) )
- + {
- + rtx scratch_reg = gen_reg_rtx (mode);
- + rtx op_pattern = copy_rtx (op);
- + rtx new_insn, seq;
- + rtx link, prev_link;
- + op = copy_rtx (op);
- + /* Emit the operation to a temp reg before the compare,
- + and emit a move inside the if-then-else, hoping that the
- + whole if-then-else can be converted to conditional
- + execution. */
- + SET_DEST (op_pattern) = scratch_reg;
- + start_sequence ();
- + new_insn = emit_insn (op_pattern);
- + seq = get_insns();
- + end_sequence ();
- +
- + /* Check again that the insn is valid. For some insns the insn might
- + become invalid if the destination register is changed. Ie. for mulacc
- + operations. */
- + if ( insn_invalid_p (new_insn) )
- + return NULL_RTX;
- +
- + emit_insn_before_setloc (seq, cmp_insn, INSN_LOCATOR (insn));
- +
- + if (dump_file)
- + fprintf (dump_file,
- + "\nMoving INSN %d out of IF-block by adding INSN %d...\n",
- + INSN_UID (insn), INSN_UID (new_insn));
- +
- + ce_info->extra_move_insns[ce_info->num_extra_move_insns] = insn;
- + ce_info->moved_insns[ce_info->num_extra_move_insns] = new_insn;
- + XEXP (op, 1) = scratch_reg;
- + /* Force the insn to be recognized again. */
- + INSN_CODE (insn) = -1;
- +
- + /* Move REG_DEAD notes to the moved insn. */
- + prev_link = NULL_RTX;
- + for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
- + {
- + if (REG_NOTE_KIND (link) == REG_DEAD)
- + {
- + /* Add the REG_DEAD note to the new insn. */
- + rtx dead_reg = XEXP (link, 0);
- + REG_NOTES (new_insn) = gen_rtx_EXPR_LIST (REG_DEAD, dead_reg, REG_NOTES (new_insn));
- + /* Remove the REG_DEAD note from the insn we convert to a move. */
- + if ( prev_link )
- + XEXP (prev_link, 1) = XEXP (link, 1);
- + else
- + REG_NOTES (insn) = XEXP (link, 1);
- + }
- + else
- + {
- + prev_link = link;
- + }
- + }
- + /* Add a REG_DEAD note to signal that the scratch register is dead. */
- + REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_DEAD, scratch_reg, REG_NOTES (insn));
- +
- + /* If this is the first change in this IF-block then
- + signal that we have made a change. */
- + if ( ce_info->num_cond_clobber_insns == 0
- + && ce_info->num_extra_move_insns == 0 )
- + *num_true_changes += 1;
- +
- + ce_info->num_extra_move_insns++;
- + return op;
- + }
- + }
- +
- + /* We failed to fixup the insns, so this if-then-else can not be made
- + conditional. Just return NULL_RTX so that the if-then-else conversion
- + for this if-then-else will be cancelled. */
- + return NULL_RTX;
- + }
- + end_sequence ();
- + return op;
- + }
- +
- + /* Signal that we have started if conversion after reload, which means
- + that it should be safe to split all the predicable clobber insns which
- + did not become cond_exec back into a simpler form if possible. */
- + cfun->machine->ifcvt_after_reload = 1;
- +
- + return pattern;
- +}
- +
- +
- +void
- +avr32_ifcvt_modify_cancel ( ce_if_block_t *ce_info, int *num_true_changes)
- +{
- + int n;
- +
- + if ( ce_info->num_extra_move_insns > 0
- + && ce_info->num_cond_clobber_insns == 0)
- + /* Signal that we did not do any changes after all. */
- + *num_true_changes -= 1;
- +
- + /* Remove any inserted move insns. */
- + for ( n = 0; n < ce_info->num_extra_move_insns; n++ )
- + {
- + rtx link, prev_link;
- +
- + /* Remove REG_DEAD note since we are not needing the scratch register anyway. */
- + prev_link = NULL_RTX;
- + for (link = REG_NOTES (ce_info->extra_move_insns[n]); link; link = XEXP (link, 1))
- + {
- + if (REG_NOTE_KIND (link) == REG_DEAD)
- + {
- + if ( prev_link )
- + XEXP (prev_link, 1) = XEXP (link, 1);
- + else
- + REG_NOTES (ce_info->extra_move_insns[n]) = XEXP (link, 1);
- + }
- + else
- + {
- + prev_link = link;
- + }
- + }
- +
- + /* Revert all reg_notes for the moved insn. */
- + for (link = REG_NOTES (ce_info->moved_insns[n]); link; link = XEXP (link, 1))
- + {
- + REG_NOTES (ce_info->extra_move_insns[n]) = gen_rtx_EXPR_LIST (REG_NOTE_KIND (link),
- + XEXP (link, 0),
- + REG_NOTES (ce_info->extra_move_insns[n]));
- + }
- +
- + /* Remove the moved insn. */
- + remove_insn ( ce_info->moved_insns[n] );
- + }
- +}
- +
- +
- +/* Function returning TRUE if INSN with OPERANDS is a splittable
- + conditional immediate clobber insn. We assume that the insn is
- + already a conditional immediate clobber insns and do not check
- + for that. */
- +int
- +avr32_cond_imm_clobber_splittable (rtx insn, rtx operands[])
- +{
- + if ( REGNO (operands[0]) == REGNO (operands[1]) )
- + {
- + if ( (GET_CODE (SET_SRC (XVECEXP (PATTERN (insn),0,0))) == PLUS
- + && !avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'I', "Is21"))
- + || (GET_CODE (SET_SRC (XVECEXP (PATTERN (insn),0,0))) == MINUS
- + && !avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks21")))
- + return FALSE;
- + }
- + else if ( (logical_binary_operator (SET_SRC (XVECEXP (PATTERN (insn),0,0)), VOIDmode)
- + || (GET_CODE (SET_SRC (XVECEXP (PATTERN (insn),0,0))) == PLUS
- + && !avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'I', "Is16"))
- + || (GET_CODE (SET_SRC (XVECEXP (PATTERN (insn),0,0))) == MINUS
- + && !avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks16"))) )
- + return FALSE;
- +
- + return TRUE;
- +}
- +
- +
- +/* Function for getting an integer value from a const_int or const_double
- + expression regardless of the HOST_WIDE_INT size. Each target cpu word
- + will be put into the val array where the LSW will be stored at the lowest
- + address and so forth. Assumes that const_expr is either a const_int or
- + const_double. Only valid for modes which have sizes that are a multiple
- + of the word size.
- +*/
- +void
- +avr32_get_intval (enum machine_mode mode, rtx const_expr, HOST_WIDE_INT *val)
- +{
- + int words_in_mode = GET_MODE_SIZE (mode)/UNITS_PER_WORD;
- + const int words_in_const_int = HOST_BITS_PER_WIDE_INT / BITS_PER_WORD;
- +
- + if ( GET_CODE(const_expr) == CONST_DOUBLE ){
- + HOST_WIDE_INT hi = CONST_DOUBLE_HIGH(const_expr);
- + HOST_WIDE_INT lo = CONST_DOUBLE_LOW(const_expr);
- + /* Evaluate hi and lo values of const_double. */
- + avr32_get_intval (mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0),
- + GEN_INT (lo),
- + &val[0]);
- + avr32_get_intval (mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0),
- + GEN_INT (hi),
- + &val[words_in_const_int]);
- + } else if ( GET_CODE(const_expr) == CONST_INT ){
- + HOST_WIDE_INT value = INTVAL(const_expr);
- + int word;
- + for ( word = 0; (word < words_in_mode) && (word < words_in_const_int); word++ ){
- + /* Shift word up to the MSW and shift down again to extract the
- + word and sign-extend. */
- + int lshift = (words_in_const_int - word - 1) * BITS_PER_WORD;
- + int rshift = (words_in_const_int-1) * BITS_PER_WORD;
- + val[word] = (value << lshift) >> rshift;
- + }
- +
- + for ( ; word < words_in_mode; word++ ){
- + /* Just put the sign bits in the remaining words. */
- + val[word] = value < 0 ? -1 : 0;
- + }
- + }
- +}
- +
- +
- +void
- +avr32_split_const_expr (enum machine_mode mode, enum machine_mode new_mode,
- + rtx expr, rtx *split_expr)
- +{
- + int i, word;
- + int words_in_intval = GET_MODE_SIZE (mode)/UNITS_PER_WORD;
- + int words_in_split_values = GET_MODE_SIZE (new_mode)/UNITS_PER_WORD;
- + const int words_in_const_int = HOST_BITS_PER_WIDE_INT / BITS_PER_WORD;
- + HOST_WIDE_INT *val = alloca (words_in_intval * UNITS_PER_WORD);
- +
- + avr32_get_intval (mode, expr, val);
- +
- + for ( i=0; i < (words_in_intval/words_in_split_values); i++ )
- + {
- + HOST_WIDE_INT value_lo = 0, value_hi = 0;
- + for ( word = 0; word < words_in_split_values; word++ )
- + {
- + if ( word >= words_in_const_int )
- + value_hi |= ((val[i * words_in_split_values + word] &
- + (((HOST_WIDE_INT)1 << BITS_PER_WORD)-1))
- + << (BITS_PER_WORD * (word - words_in_const_int)));
- + else
- + value_lo |= ((val[i * words_in_split_values + word] &
- + (((HOST_WIDE_INT)1 << BITS_PER_WORD)-1))
- + << (BITS_PER_WORD * word));
- + }
- + split_expr[i] = immed_double_const(value_lo, value_hi, new_mode);
- + }
- +}
- +
- +
- +/* Set up library functions to comply to AVR32 ABI */
- +static void
- +avr32_init_libfuncs (void)
- +{
- + /* Convert gcc run-time function names to AVR32 ABI names */
- +
- + /* Double-precision floating-point arithmetic. */
- + set_optab_libfunc (neg_optab, DFmode, NULL);
- +
- + /* Double-precision comparisons. */
- + set_optab_libfunc (eq_optab, DFmode, "__avr32_f64_cmp_eq");
- + set_optab_libfunc (ne_optab, DFmode, NULL);
- + set_optab_libfunc (lt_optab, DFmode, "__avr32_f64_cmp_lt");
- + set_optab_libfunc (le_optab, DFmode, NULL);
- + set_optab_libfunc (ge_optab, DFmode, "__avr32_f64_cmp_ge");
- + set_optab_libfunc (gt_optab, DFmode, NULL);
- +
- + /* Single-precision floating-point arithmetic. */
- + set_optab_libfunc (smul_optab, SFmode, "__avr32_f32_mul");
- + set_optab_libfunc (neg_optab, SFmode, NULL);
- +
- + /* Single-precision comparisons. */
- + set_optab_libfunc (eq_optab, SFmode, "__avr32_f32_cmp_eq");
- + set_optab_libfunc (ne_optab, SFmode, NULL);
- + set_optab_libfunc (lt_optab, SFmode, "__avr32_f32_cmp_lt");
- + set_optab_libfunc (le_optab, SFmode, NULL);
- + set_optab_libfunc (ge_optab, SFmode, "__avr32_f32_cmp_ge");
- + set_optab_libfunc (gt_optab, SFmode, NULL);
- +
- + /* Floating-point to integer conversions. */
- + set_conv_libfunc (sfix_optab, SImode, DFmode, "__avr32_f64_to_s32");
- + set_conv_libfunc (ufix_optab, SImode, DFmode, "__avr32_f64_to_u32");
- + set_conv_libfunc (sfix_optab, DImode, DFmode, "__avr32_f64_to_s64");
- + set_conv_libfunc (ufix_optab, DImode, DFmode, "__avr32_f64_to_u64");
- + set_conv_libfunc (sfix_optab, SImode, SFmode, "__avr32_f32_to_s32");
- + set_conv_libfunc (ufix_optab, SImode, SFmode, "__avr32_f32_to_u32");
- + set_conv_libfunc (sfix_optab, DImode, SFmode, "__avr32_f32_to_s64");
- + set_conv_libfunc (ufix_optab, DImode, SFmode, "__avr32_f32_to_u64");
- +
- + /* Conversions between floating types. */
- + set_conv_libfunc (trunc_optab, SFmode, DFmode, "__avr32_f64_to_f32");
- + set_conv_libfunc (sext_optab, DFmode, SFmode, "__avr32_f32_to_f64");
- +
- + /* Integer to floating-point conversions. Table 8. */
- + set_conv_libfunc (sfloat_optab, DFmode, SImode, "__avr32_s32_to_f64");
- + set_conv_libfunc (sfloat_optab, DFmode, DImode, "__avr32_s64_to_f64");
- + set_conv_libfunc (sfloat_optab, SFmode, SImode, "__avr32_s32_to_f32");
- + set_conv_libfunc (sfloat_optab, SFmode, DImode, "__avr32_s64_to_f32");
- + set_conv_libfunc (ufloat_optab, DFmode, SImode, "__avr32_u32_to_f64");
- + set_conv_libfunc (ufloat_optab, SFmode, SImode, "__avr32_u32_to_f32");
- + /* TODO: Add these to gcc library functions */
- + //set_conv_libfunc (ufloat_optab, DFmode, DImode, NULL);
- + //set_conv_libfunc (ufloat_optab, SFmode, DImode, NULL);
- +
- + /* Long long. Table 9. */
- + set_optab_libfunc (smul_optab, DImode, "__avr32_mul64");
- + set_optab_libfunc (sdiv_optab, DImode, "__avr32_sdiv64");
- + set_optab_libfunc (udiv_optab, DImode, "__avr32_udiv64");
- + set_optab_libfunc (smod_optab, DImode, "__avr32_smod64");
- + set_optab_libfunc (umod_optab, DImode, "__avr32_umod64");
- + set_optab_libfunc (ashl_optab, DImode, "__avr32_lsl64");
- + set_optab_libfunc (lshr_optab, DImode, "__avr32_lsr64");
- + set_optab_libfunc (ashr_optab, DImode, "__avr32_asr64");
- +
- + /* Floating point library functions which have fast versions. */
- + if ( TARGET_FAST_FLOAT )
- + {
- + set_optab_libfunc (sdiv_optab, DFmode, "__avr32_f64_div_fast");
- + set_optab_libfunc (smul_optab, DFmode, "__avr32_f64_mul_fast");
- + set_optab_libfunc (add_optab, DFmode, "__avr32_f64_add_fast");
- + set_optab_libfunc (sub_optab, DFmode, "__avr32_f64_sub_fast");
- + set_optab_libfunc (add_optab, SFmode, "__avr32_f32_add_fast");
- + set_optab_libfunc (sub_optab, SFmode, "__avr32_f32_sub_fast");
- + set_optab_libfunc (sdiv_optab, SFmode, "__avr32_f32_div_fast");
- + }
- + else
- + {
- + set_optab_libfunc (sdiv_optab, DFmode, "__avr32_f64_div");
- + set_optab_libfunc (smul_optab, DFmode, "__avr32_f64_mul");
- + set_optab_libfunc (add_optab, DFmode, "__avr32_f64_add");
- + set_optab_libfunc (sub_optab, DFmode, "__avr32_f64_sub");
- + set_optab_libfunc (add_optab, SFmode, "__avr32_f32_add");
- + set_optab_libfunc (sub_optab, SFmode, "__avr32_f32_sub");
- + set_optab_libfunc (sdiv_optab, SFmode, "__avr32_f32_div");
- + }
- +}
- +
- +
- +/* Record a flashvault declaration. */
- +static void
- +flashvault_decl_list_add (unsigned int vector_num, const char *name)
- +{
- + struct flashvault_decl_list *p;
- +
- + p = (struct flashvault_decl_list *)
- + xmalloc (sizeof (struct flashvault_decl_list));
- + p->next = flashvault_decl_list_head;
- + p->name = name;
- + p->vector_num = vector_num;
- + flashvault_decl_list_head = p;
- +}
- +
- +
- +static void
- +avr32_file_end (void)
- +{
- + struct flashvault_decl_list *p;
- + unsigned int num_entries = 0;
- +
- + /* Check if a list of flashvault declarations exists. */
- + if (flashvault_decl_list_head != NULL)
- + {
- + /* Calculate the number of entries in the table. */
- + for (p = flashvault_decl_list_head; p != NULL; p = p->next)
- + {
- + num_entries++;
- + }
- +
- + /* Generate the beginning of the flashvault data table. */
- + fputs ("\t.global __fv_table\n"
- + "\t.data\n"
- + "\t.align 2\n"
- + "\t.set .LFVTABLE, . + 0\n"
- + "\t.type __fv_table, @object\n", asm_out_file);
- + /* Each table entry is 8 bytes. */
- + fprintf (asm_out_file, "\t.size __fv_table, %u\n", (num_entries * 8));
- +
- + fputs("__fv_table:\n", asm_out_file);
- +
- + for (p = flashvault_decl_list_head; p != NULL; p = p->next)
- + {
- + /* Output table entry. */
- + fprintf (asm_out_file,
- + "\t.align 2\n"
- + "\t.int %u\n", p->vector_num);
- + fprintf (asm_out_file,
- + "\t.align 2\n"
- + "\t.int %s\n", p->name);
- + }
- + }
- +}
- diff -Nur gcc-4.4.6.orig/gcc/config/avr32/avr32.h gcc-4.4.6/gcc/config/avr32/avr32.h
- --- gcc-4.4.6.orig/gcc/config/avr32/avr32.h 1970-01-01 01:00:00.000000000 +0100
- +++ gcc-4.4.6/gcc/config/avr32/avr32.h 2011-10-22 19:23:08.520581302 +0200
- @@ -0,0 +1,3316 @@
- +/*
- + Definitions of target machine for AVR32.
- + Copyright 2003,2004,2005,2006,2007,2008,2009,2010 Atmel Corporation.
- +
- + This file is part of GCC.
- +
- + This program is free software; you can redistribute it and/or modify
- + it under the terms of the GNU General Public License as published by
- + the Free Software Foundation; either version 2 of the License, or
- + (at your option) any later version.
- +
- + This program is distributed in the hope that it will be useful,
- + but WITHOUT ANY WARRANTY; without even the implied warranty of
- + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- + GNU General Public License for more details.
- +
- + You should have received a copy of the GNU General Public License
- + along with this program; if not, write to the Free Software
- + Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
- +
- +#ifndef GCC_AVR32_H
- +#define GCC_AVR32_H
- +
- +
- +#ifndef OBJECT_FORMAT_ELF
- +#error avr32.h included before elfos.h
- +#endif
- +
- +#ifndef LOCAL_LABEL_PREFIX
- +#define LOCAL_LABEL_PREFIX "."
- +#endif
- +
- +#ifndef SUBTARGET_CPP_SPEC
- +#define SUBTARGET_CPP_SPEC "-D__ELF__"
- +#endif
- +
- +
- +extern struct rtx_def *avr32_compare_op0;
- +extern struct rtx_def *avr32_compare_op1;
- +
- +/* comparison type */
- +enum avr32_cmp_type {
- + CMP_QI, /* 1 byte ->char */
- + CMP_HI, /* 2 byte->half word */
- + CMP_SI, /* four byte->word*/
- + CMP_DI, /* eight byte->double word */
- + CMP_SF, /* single precision floats */
- + CMP_MAX /* max comparison type */
- +};
- +
- +extern enum avr32_cmp_type avr32_branch_type; /* type of branch to use */
- +
- +
- +extern struct rtx_def *avr32_acc_cache;
- +
- +/* cache instruction op5 codes */
- +#define AVR32_CACHE_INVALIDATE_ICACHE 1
- +
- +/*
- +These bits describe the different types of function supported by the AVR32
- +backend. They are exclusive, e.g. a function cannot be both a normal function
- +and an interworked function. Knowing the type of a function is important for
- +determining its prologue and epilogue sequences. Note value 7 is currently
- +unassigned. Also note that the interrupt function types all have bit 2 set,
- +so that they can be tested for easily. Note that 0 is deliberately chosen for
- +AVR32_FT_UNKNOWN so that when the machine_function structure is initialized
- +(to zero) func_type will default to unknown. This will force the first use of
- +avr32_current_func_type to call avr32_compute_func_type.
- +*/
- +#define AVR32_FT_UNKNOWN 0 /* Type has not yet been determined. */
- +#define AVR32_FT_NORMAL 1 /* Normal function. */
- +#define AVR32_FT_ACALL 2 /* An acall function. */
- +#define AVR32_FT_EXCEPTION_HANDLER 3 /* A C++ exception handler. */
- +#define AVR32_FT_ISR_FULL 4 /* A fully shadowed interrupt mode. */
- +#define AVR32_FT_ISR_HALF 5 /* A half shadowed interrupt mode. */
- +#define AVR32_FT_ISR_NONE 6 /* No shadow registers. */
- +
- +#define AVR32_FT_TYPE_MASK ((1 << 3) - 1)
- +
- +/* In addition functions can have several type modifiers, outlined by these bit masks: */
- +#define AVR32_FT_INTERRUPT (1 << 2) /* Note overlap with FT_ISR and above. */
- +#define AVR32_FT_NAKED (1 << 3) /* No prologue or epilogue. */
- +#define AVR32_FT_VOLATILE (1 << 4) /* Does not return. */
- +#define AVR32_FT_NESTED (1 << 5) /* Embedded inside another func. */
- +#define AVR32_FT_FLASHVAULT (1 << 6) /* Flashvault function call. */
- +#define AVR32_FT_FLASHVAULT_IMPL (1 << 7) /* Function definition in FlashVault. */
- +
- +
- +/* Some macros to test these flags. */
- +#define AVR32_FUNC_TYPE(t) (t & AVR32_FT_TYPE_MASK)
- +#define IS_INTERRUPT(t) (t & AVR32_FT_INTERRUPT)
- +#define IS_NAKED(t) (t & AVR32_FT_NAKED)
- +#define IS_VOLATILE(t) (t & AVR32_FT_VOLATILE)
- +#define IS_NESTED(t) (t & AVR32_FT_NESTED)
- +#define IS_FLASHVAULT(t) (t & AVR32_FT_FLASHVAULT)
- +#define IS_FLASHVAULT_IMPL(t) (t & AVR32_FT_FLASHVAULT_IMPL)
- +
- +#define SYMBOL_FLAG_RMW_ADDR_SHIFT SYMBOL_FLAG_MACH_DEP_SHIFT
- +#define SYMBOL_REF_RMW_ADDR(RTX) \
- + ((SYMBOL_REF_FLAGS (RTX) & (1 << SYMBOL_FLAG_RMW_ADDR_SHIFT)) != 0)
- +
- +
- +typedef struct minipool_labels
- +GTY ((chain_next ("%h.next"), chain_prev ("%h.prev")))
- +{
- + rtx label;
- + struct minipool_labels *prev;
- + struct minipool_labels *next;
- +} minipool_labels;
- +
- +/* A C structure for machine-specific, per-function data.
- + This is added to the cfun structure. */
- +
- +typedef struct machine_function
- +GTY (())
- +{
- + /* Records the type of the current function. */
- + unsigned long func_type;
- + /* List of minipool labels, use for checking if code label is valid in a
- + memory expression */
- + minipool_labels *minipool_label_head;
- + minipool_labels *minipool_label_tail;
- + int ifcvt_after_reload;
- +} machine_function;
- +
- +/* Initialize data used by insn expanders. This is called from insn_emit,
- + once for every function before code is generated. */
- +#define INIT_EXPANDERS avr32_init_expanders ()
- +
- +/******************************************************************************
- + * SPECS
- + *****************************************************************************/
- +
- +#ifndef ASM_SPEC
- +#define ASM_SPEC "%{fpic:--pic} %{mrelax|O*:%{mno-relax|O0|O1: ;:--linkrelax}} %{march=ucr2nomul:-march=ucr2;:%{march=*:-march=%*}} %{mpart=uc3a3revd:-mpart=uc3a3256s;:%{mpart=*:-mpart=%*}}"
- +#endif
- +
- +#ifndef MULTILIB_DEFAULTS
- +#define MULTILIB_DEFAULTS { "march=ap", "" }
- +#endif
- +
- +/******************************************************************************
- + * Run-time Target Specification
- + *****************************************************************************/
- +#ifndef TARGET_VERSION
- +#define TARGET_VERSION fprintf(stderr, " (AVR32, GNU assembler syntax)");
- +#endif
- +
- +
- +/* Part types. Keep this in sync with the order of avr32_part_types in avr32.c*/
- +enum part_type
- +{
- + PART_TYPE_AVR32_NONE,
- + PART_TYPE_AVR32_AP7000,
- + PART_TYPE_AVR32_AP7001,
- + PART_TYPE_AVR32_AP7002,
- + PART_TYPE_AVR32_AP7200,
- + PART_TYPE_AVR32_UC3A0128,
- + PART_TYPE_AVR32_UC3A0256,
- + PART_TYPE_AVR32_UC3A0512,
- + PART_TYPE_AVR32_UC3A0512ES,
- + PART_TYPE_AVR32_UC3A1128,
- + PART_TYPE_AVR32_UC3A1256,
- + PART_TYPE_AVR32_UC3A1512,
- + PART_TYPE_AVR32_UC3A1512ES,
- + PART_TYPE_AVR32_UC3A3REVD,
- + PART_TYPE_AVR32_UC3A364,
- + PART_TYPE_AVR32_UC3A364S,
- + PART_TYPE_AVR32_UC3A3128,
- + PART_TYPE_AVR32_UC3A3128S,
- + PART_TYPE_AVR32_UC3A3256,
- + PART_TYPE_AVR32_UC3A3256S,
- + PART_TYPE_AVR32_UC3A464,
- + PART_TYPE_AVR32_UC3A464S,
- + PART_TYPE_AVR32_UC3A4128,
- + PART_TYPE_AVR32_UC3A4128S,
- + PART_TYPE_AVR32_UC3A4256,
- + PART_TYPE_AVR32_UC3A4256S,
- + PART_TYPE_AVR32_UC3B064,
- + PART_TYPE_AVR32_UC3B0128,
- + PART_TYPE_AVR32_UC3B0256,
- + PART_TYPE_AVR32_UC3B0256ES,
- + PART_TYPE_AVR32_UC3B0512,
- + PART_TYPE_AVR32_UC3B0512REVC,
- + PART_TYPE_AVR32_UC3B164,
- + PART_TYPE_AVR32_UC3B1128,
- + PART_TYPE_AVR32_UC3B1256,
- + PART_TYPE_AVR32_UC3B1256ES,
- + PART_TYPE_AVR32_UC3B1512,
- + PART_TYPE_AVR32_UC3B1512REVC,
- + PART_TYPE_AVR32_UC64D3,
- + PART_TYPE_AVR32_UC128D3,
- + PART_TYPE_AVR32_UC64D4,
- + PART_TYPE_AVR32_UC128D4,
- + PART_TYPE_AVR32_UC3C0512CREVC,
- + PART_TYPE_AVR32_UC3C1512CREVC,
- + PART_TYPE_AVR32_UC3C2512CREVC,
- + PART_TYPE_AVR32_UC3L0256,
- + PART_TYPE_AVR32_UC3L0128,
- + PART_TYPE_AVR32_UC3L064,
- + PART_TYPE_AVR32_UC3L032,
- + PART_TYPE_AVR32_UC3L016,
- + PART_TYPE_AVR32_UC3L064REVB,
- + PART_TYPE_AVR32_UC64L3U,
- + PART_TYPE_AVR32_UC128L3U,
- + PART_TYPE_AVR32_UC256L3U,
- + PART_TYPE_AVR32_UC64L4U,
- + PART_TYPE_AVR32_UC128L4U,
- + PART_TYPE_AVR32_UC256L4U,
- + PART_TYPE_AVR32_UC3C064C,
- + PART_TYPE_AVR32_UC3C0128C,
- + PART_TYPE_AVR32_UC3C0256C,
- + PART_TYPE_AVR32_UC3C0512C,
- + PART_TYPE_AVR32_UC3C164C,
- + PART_TYPE_AVR32_UC3C1128C,
- + PART_TYPE_AVR32_UC3C1256C,
- + PART_TYPE_AVR32_UC3C1512C,
- + PART_TYPE_AVR32_UC3C264C,
- + PART_TYPE_AVR32_UC3C2128C,
- + PART_TYPE_AVR32_UC3C2256C,
- + PART_TYPE_AVR32_UC3C2512C,
- + PART_TYPE_AVR32_MXT768E
- +};
- +
- +/* Microarchitectures. */
- +enum microarchitecture_type
- +{
- + UARCH_TYPE_AVR32A,
- + UARCH_TYPE_AVR32B,
- + UARCH_TYPE_NONE
- +};
- +
- +/* Architectures types which specifies the pipeline.
- + Keep this in sync with avr32_arch_types in avr32.c
- + and the pipeline attribute in avr32.md */
- +enum architecture_type
- +{
- + ARCH_TYPE_AVR32_AP,
- + ARCH_TYPE_AVR32_UCR1,
- + ARCH_TYPE_AVR32_UCR2,
- + ARCH_TYPE_AVR32_UCR2NOMUL,
- + ARCH_TYPE_AVR32_UCR3,
- + ARCH_TYPE_AVR32_UCR3FP,
- + ARCH_TYPE_AVR32_NONE
- +};
- +
- +/* Flag specifying if the cpu has support for DSP instructions.*/
- +#define FLAG_AVR32_HAS_DSP (1 << 0)
- +/* Flag specifying if the cpu has support for Read-Modify-Write
- + instructions.*/
- +#define FLAG_AVR32_HAS_RMW (1 << 1)
- +/* Flag specifying if the cpu has support for SIMD instructions. */
- +#define FLAG_AVR32_HAS_SIMD (1 << 2)
- +/* Flag specifying if the cpu has support for unaligned memory word access. */
- +#define FLAG_AVR32_HAS_UNALIGNED_WORD (1 << 3)
- +/* Flag specifying if the cpu has support for branch prediction. */
- +#define FLAG_AVR32_HAS_BRANCH_PRED (1 << 4)
- +/* Flag specifying if the cpu has support for a return stack. */
- +#define FLAG_AVR32_HAS_RETURN_STACK (1 << 5)
- +/* Flag specifying if the cpu has caches. */
- +#define FLAG_AVR32_HAS_CACHES (1 << 6)
- +/* Flag specifying if the cpu has support for v2 insns. */
- +#define FLAG_AVR32_HAS_V2_INSNS (1 << 7)
- +/* Flag specifying that the cpu has buggy mul insns. */
- +#define FLAG_AVR32_HAS_NO_MUL_INSNS (1 << 8)
- +/* Flag specifying that the device has FPU instructions according
- + to AVR32002 specifications*/
- +#define FLAG_AVR32_HAS_FPU (1 << 9)
- +
- +/* Structure for holding information about different avr32 CPUs/parts */
- +struct part_type_s
- +{
- + const char *const name;
- + enum part_type part_type;
- + enum architecture_type arch_type;
- + /* Must lie outside user's namespace. NULL == no macro. */
- + const char *const macro;
- +};
- +
- +/* Structure for holding information about different avr32 pipeline
- + architectures. */
- +struct arch_type_s
- +{
- + const char *const name;
- + enum architecture_type arch_type;
- + enum microarchitecture_type uarch_type;
- + const unsigned long feature_flags;
- + /* Must lie outside user's namespace. NULL == no macro. */
- + const char *const macro;
- +};
- +
- +extern const struct part_type_s *avr32_part;
- +extern const struct arch_type_s *avr32_arch;
- +
- +#define TARGET_SIMD (avr32_arch->feature_flags & FLAG_AVR32_HAS_SIMD)
- +#define TARGET_DSP (avr32_arch->feature_flags & FLAG_AVR32_HAS_DSP)
- +#define TARGET_RMW (avr32_arch->feature_flags & FLAG_AVR32_HAS_RMW)
- +#define TARGET_UNALIGNED_WORD (avr32_arch->feature_flags & FLAG_AVR32_HAS_UNALIGNED_WORD)
- +#define TARGET_BRANCH_PRED (avr32_arch->feature_flags & FLAG_AVR32_HAS_BRANCH_PRED)
- +#define TARGET_RETURN_STACK (avr32_arch->feature_flags & FLAG_AVR32_HAS_RETURN_STACK)
- +#define TARGET_V2_INSNS (avr32_arch->feature_flags & FLAG_AVR32_HAS_V2_INSNS)
- +#define TARGET_CACHES (avr32_arch->feature_flags & FLAG_AVR32_HAS_CACHES)
- +#define TARGET_NO_MUL_INSNS (avr32_arch->feature_flags & FLAG_AVR32_HAS_NO_MUL_INSNS)
- +#define TARGET_ARCH_AP (avr32_arch->arch_type == ARCH_TYPE_AVR32_AP)
- +#define TARGET_ARCH_UCR1 (avr32_arch->arch_type == ARCH_TYPE_AVR32_UCR1)
- +#define TARGET_ARCH_UCR2 (avr32_arch->arch_type == ARCH_TYPE_AVR32_UCR2)
- +#define TARGET_ARCH_UC (TARGET_ARCH_UCR1 || TARGET_ARCH_UCR2)
- +#define TARGET_UARCH_AVR32A (avr32_arch->uarch_type == UARCH_TYPE_AVR32A)
- +#define TARGET_UARCH_AVR32B (avr32_arch->uarch_type == UARCH_TYPE_AVR32B)
- +#define TARGET_ARCH_FPU (avr32_arch->feature_flags & FLAG_AVR32_HAS_FPU)
- +
- +#define CAN_DEBUG_WITHOUT_FP
- +
- +
- +
- +
- +/******************************************************************************
- + * Storage Layout
- + *****************************************************************************/
- +
- +/*
- +Define this macro to have the value 1 if the most significant bit in a
- +byte has the lowest number; otherwise define it to have the value zero.
- +This means that bit-field instructions count from the most significant
- +bit. If the machine has no bit-field instructions, then this must still
- +be defined, but it doesn't matter which value it is defined to. This
- +macro need not be a constant.
- +
- +This macro does not affect the way structure fields are packed into
- +bytes or words; that is controlled by BYTES_BIG_ENDIAN.
- +*/
- +#define BITS_BIG_ENDIAN 0
- +
- +/*
- +Define this macro to have the value 1 if the most significant byte in a
- +word has the lowest number. This macro need not be a constant.
- +*/
- +/*
- + Data is stored in an big-endian way.
- +*/
- +#define BYTES_BIG_ENDIAN 1
- +
- +/*
- +Define this macro to have the value 1 if, in a multiword object, the
- +most significant word has the lowest number. This applies to both
- +memory locations and registers; GCC fundamentally assumes that the
- +order of words in memory is the same as the order in registers. This
- +macro need not be a constant.
- +*/
- +/*
- + Data is stored in an bin-endian way.
- +*/
- +#define WORDS_BIG_ENDIAN 1
- +
- +/*
- +Define this macro if WORDS_BIG_ENDIAN is not constant. This must be a
- +constant value with the same meaning as WORDS_BIG_ENDIAN, which will be
- +used only when compiling libgcc2.c. Typically the value will be set
- +based on preprocessor defines.
- +*/
- +#define LIBGCC2_WORDS_BIG_ENDIAN WORDS_BIG_ENDIAN
- +
- +/*
- +Define this macro to have the value 1 if DFmode, XFmode or
- +TFmode floating point numbers are stored in memory with the word
- +containing the sign bit at the lowest address; otherwise define it to
- +have the value 0. This macro need not be a constant.
- +
- +You need not define this macro if the ordering is the same as for
- +multi-word integers.
- +*/
- +/* #define FLOAT_WORDS_BIG_ENDIAN 1 */
- +
- +/*
- +Define this macro to be the number of bits in an addressable storage
- +unit (byte); normally 8.
- +*/
- +#define BITS_PER_UNIT 8
- +
- +/*
- +Number of bits in a word; normally 32.
- +*/
- +#define BITS_PER_WORD 32
- +
- +/*
- +Maximum number of bits in a word. If this is undefined, the default is
- +BITS_PER_WORD. Otherwise, it is the constant value that is the
- +largest value that BITS_PER_WORD can have at run-time.
- +*/
- +/* MAX_BITS_PER_WORD not defined*/
- +
- +/*
- +Number of storage units in a word; normally 4.
- +*/
- +#define UNITS_PER_WORD 4
- +
- +/*
- +Minimum number of units in a word. If this is undefined, the default is
- +UNITS_PER_WORD. Otherwise, it is the constant value that is the
- +smallest value that UNITS_PER_WORD can have at run-time.
- +*/
- +/* MIN_UNITS_PER_WORD not defined */
- +
- +/*
- +Width of a pointer, in bits. You must specify a value no wider than the
- +width of Pmode. If it is not equal to the width of Pmode,
- +you must define POINTERS_EXTEND_UNSIGNED.
- +*/
- +#define POINTER_SIZE 32
- +
- +/*
- +A C expression whose value is greater than zero if pointers that need to be
- +extended from being POINTER_SIZE bits wide to Pmode are to
- +be zero-extended and zero if they are to be sign-extended. If the value
- +is less then zero then there must be an "ptr_extend" instruction that
- +extends a pointer from POINTER_SIZE to Pmode.
- +
- +You need not define this macro if the POINTER_SIZE is equal
- +to the width of Pmode.
- +*/
- +/* #define POINTERS_EXTEND_UNSIGNED */
- +
- +/*
- +A Macro to update M and UNSIGNEDP when an object whose type
- +is TYPE and which has the specified mode and signedness is to be
- +stored in a register. This macro is only called when TYPE is a
- +scalar type.
- +
- +On most RISC machines, which only have operations that operate on a full
- +register, define this macro to set M to word_mode if
- +M is an integer mode narrower than BITS_PER_WORD. In most
- +cases, only integer modes should be widened because wider-precision
- +floating-point operations are usually more expensive than their narrower
- +counterparts.
- +
- +For most machines, the macro definition does not change UNSIGNEDP.
- +However, some machines, have instructions that preferentially handle
- +either signed or unsigned quantities of certain modes. For example, on
- +the DEC Alpha, 32-bit loads from memory and 32-bit add instructions
- +sign-extend the result to 64 bits. On such machines, set
- +UNSIGNEDP according to which kind of extension is more efficient.
- +
- +Do not define this macro if it would never modify M.
- +*/
- +#define PROMOTE_MODE(M, UNSIGNEDP, TYPE) \
- + { \
- + if (!AGGREGATE_TYPE_P (TYPE) \
- + && GET_MODE_CLASS (mode) == MODE_INT \
- + && GET_MODE_SIZE (mode) < 4) \
- + { \
- + if (M == QImode) \
- + (UNSIGNEDP) = 1; \
- + else if (M == HImode) \
- + (UNSIGNEDP) = 0; \
- + (M) = SImode; \
- + } \
- + }
- +
- +#define PROMOTE_FUNCTION_MODE(M, UNSIGNEDP, TYPE) \
- + PROMOTE_MODE(M, UNSIGNEDP, TYPE)
- +
- +/* Define if operations between registers always perform the operation
- + on the full register even if a narrower mode is specified. */
- +#define WORD_REGISTER_OPERATIONS
- +
- +/* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD
- + will either zero-extend or sign-extend. The value of this macro should
- + be the code that says which one of the two operations is implicitly
- + done, UNKNOWN if not known. */
- +#define LOAD_EXTEND_OP(MODE) \
- + (((MODE) == QImode) ? ZERO_EXTEND \
- + : ((MODE) == HImode) ? SIGN_EXTEND : UNKNOWN)
- +
- +
- +/*
- +Normal alignment required for function parameters on the stack, in
- +bits. All stack parameters receive at least this much alignment
- +regardless of data type. On most machines, this is the same as the
- +size of an integer.
- +*/
- +#define PARM_BOUNDARY 32
- +
- +/*
- +Define this macro to the minimum alignment enforced by hardware for the
- +stack pointer on this machine. The definition is a C expression for the
- +desired alignment (measured in bits). This value is used as a default
- +if PREFERRED_STACK_BOUNDARY is not defined. On most machines,
- +this should be the same as PARM_BOUNDARY.
- +*/
- +#define STACK_BOUNDARY 32
- +
- +/*
- +Define this macro if you wish to preserve a certain alignment for the
- +stack pointer, greater than what the hardware enforces. The definition
- +is a C expression for the desired alignment (measured in bits). This
- +macro must evaluate to a value equal to or larger than
- +STACK_BOUNDARY.
- +*/
- +#define PREFERRED_STACK_BOUNDARY (TARGET_FORCE_DOUBLE_ALIGN ? 64 : 32 )
- +
- +/*
- +Alignment required for a function entry point, in bits.
- +*/
- +#define FUNCTION_BOUNDARY 16
- +
- +/*
- +Biggest alignment that any data type can require on this machine, in bits.
- +*/
- +#define BIGGEST_ALIGNMENT (TARGET_FORCE_DOUBLE_ALIGN ? 64 : 32 )
- +
- +/*
- +If defined, the smallest alignment, in bits, that can be given to an
- +object that can be referenced in one operation, without disturbing any
- +nearby object. Normally, this is BITS_PER_UNIT, but may be larger
- +on machines that don't have byte or half-word store operations.
- +*/
- +#define MINIMUM_ATOMIC_ALIGNMENT BITS_PER_UNIT
- +
- +
- +/*
- +An integer expression for the size in bits of the largest integer machine mode that
- +should actually be used. All integer machine modes of this size or smaller can be
- +used for structures and unions with the appropriate sizes. If this macro is undefined,
- +GET_MODE_BITSIZE (DImode) is assumed.*/
- +#define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (DImode)
- +
- +
- +/*
- +If defined, a C expression to compute the alignment given to a constant
- +that is being placed in memory. CONSTANT is the constant and
- +BASIC_ALIGN is the alignment that the object would ordinarily
- +have. The value of this macro is used instead of that alignment to
- +align the object.
- +
- +If this macro is not defined, then BASIC_ALIGN is used.
- +
- +The typical use of this macro is to increase alignment for string
- +constants to be word aligned so that strcpy calls that copy
- +constants can be done inline.
- +*/
- +#define CONSTANT_ALIGNMENT(CONSTANT, BASIC_ALIGN) \
- + ((TREE_CODE(CONSTANT) == STRING_CST) ? BITS_PER_WORD : BASIC_ALIGN)
- +
- +/* Try to align string to a word. */
- +#define DATA_ALIGNMENT(TYPE, ALIGN) \
- + ({(TREE_CODE (TYPE) == ARRAY_TYPE \
- + && TYPE_MODE (TREE_TYPE (TYPE)) == QImode \
- + && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN));})
- +
- +/* Try to align local store strings to a word. */
- +#define LOCAL_ALIGNMENT(TYPE, ALIGN) \
- + ({(TREE_CODE (TYPE) == ARRAY_TYPE \
- + && TYPE_MODE (TREE_TYPE (TYPE)) == QImode \
- + && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN));})
- +
- +/*
- +Define this macro to be the value 1 if instructions will fail to work
- +if given data not on the nominal alignment. If instructions will merely
- +go slower in that case, define this macro as 0.
- +*/
- +#define STRICT_ALIGNMENT 1
- +
- +/*
- +Define this if you wish to imitate the way many other C compilers handle
- +alignment of bit-fields and the structures that contain them.
- +
- +The behavior is that the type written for a bit-field (int,
- +short, or other integer type) imposes an alignment for the
- +entire structure, as if the structure really did contain an ordinary
- +field of that type. In addition, the bit-field is placed within the
- +structure so that it would fit within such a field, not crossing a
- +boundary for it.
- +
- +Thus, on most machines, a bit-field whose type is written as int
- +would not cross a four-byte boundary, and would force four-byte
- +alignment for the whole structure. (The alignment used may not be four
- +bytes; it is controlled by the other alignment parameters.)
- +
- +If the macro is defined, its definition should be a C expression;
- +a nonzero value for the expression enables this behavior.
- +
- +Note that if this macro is not defined, or its value is zero, some
- +bit-fields may cross more than one alignment boundary. The compiler can
- +support such references if there are insv, extv, and
- +extzv insns that can directly reference memory.
- +
- +The other known way of making bit-fields work is to define
- +STRUCTURE_SIZE_BOUNDARY as large as BIGGEST_ALIGNMENT.
- +Then every structure can be accessed with fullwords.
- +
- +Unless the machine has bit-field instructions or you define
- +STRUCTURE_SIZE_BOUNDARY that way, you must define
- +PCC_BITFIELD_TYPE_MATTERS to have a nonzero value.
- +
- +If your aim is to make GCC use the same conventions for laying out
- +bit-fields as are used by another compiler, here is how to investigate
- +what the other compiler does. Compile and run this program:
- +
- +struct foo1
- +{
- + char x;
- + char :0;
- + char y;
- +};
- +
- +struct foo2
- +{
- + char x;
- + int :0;
- + char y;
- +};
- +
- +main ()
- +{
- + printf ("Size of foo1 is %d\n",
- + sizeof (struct foo1));
- + printf ("Size of foo2 is %d\n",
- + sizeof (struct foo2));
- + exit (0);
- +}
- +
- +If this prints 2 and 5, then the compiler's behavior is what you would
- +get from PCC_BITFIELD_TYPE_MATTERS.
- +*/
- +#define PCC_BITFIELD_TYPE_MATTERS 1
- +
- +
- +/******************************************************************************
- + * Layout of Source Language Data Types
- + *****************************************************************************/
- +
- +/*
- +A C expression for the size in bits of the type int on the
- +target machine. If you don't define this, the default is one word.
- +*/
- +#define INT_TYPE_SIZE 32
- +
- +/*
- +A C expression for the size in bits of the type short on the
- +target machine. If you don't define this, the default is half a word. (If
- +this would be less than one storage unit, it is rounded up to one unit.)
- +*/
- +#define SHORT_TYPE_SIZE 16
- +
- +/*
- +A C expression for the size in bits of the type long on the
- +target machine. If you don't define this, the default is one word.
- +*/
- +#define LONG_TYPE_SIZE 32
- +
- +
- +/*
- +A C expression for the size in bits of the type long long on the
- +target machine. If you don't define this, the default is two
- +words. If you want to support GNU Ada on your machine, the value of this
- +macro must be at least 64.
- +*/
- +#define LONG_LONG_TYPE_SIZE 64
- +
- +/*
- +A C expression for the size in bits of the type char on the
- +target machine. If you don't define this, the default is
- +BITS_PER_UNIT.
- +*/
- +#define CHAR_TYPE_SIZE 8
- +
- +
- +/*
- +A C expression for the size in bits of the C++ type bool and
- +C99 type _Bool on the target machine. If you don't define
- +this, and you probably shouldn't, the default is CHAR_TYPE_SIZE.
- +*/
- +#define BOOL_TYPE_SIZE 8
- +
- +
- +/*
- +An expression whose value is 1 or 0, according to whether the type
- +char should be signed or unsigned by default. The user can
- +always override this default with the options -fsigned-char
- +and -funsigned-char.
- +*/
- +/* We are using unsigned char */
- +#define DEFAULT_SIGNED_CHAR 0
- +
- +
- +/*
- +A C expression for a string describing the name of the data type to use
- +for size values. The typedef name size_t is defined using the
- +contents of the string.
- +
- +The string can contain more than one keyword. If so, separate them with
- +spaces, and write first any length keyword, then unsigned if
- +appropriate, and finally int. The string must exactly match one
- +of the data type names defined in the function
- +init_decl_processing in the file c-decl.c. You may not
- +omit int or change the order - that would cause the compiler to
- +crash on startup.
- +
- +If you don't define this macro, the default is "long unsigned int".
- +*/
- +#define SIZE_TYPE "long unsigned int"
- +
- +/*
- +A C expression for a string describing the name of the data type to use
- +for the result of subtracting two pointers. The typedef name
- +ptrdiff_t is defined using the contents of the string. See
- +SIZE_TYPE above for more information.
- +
- +If you don't define this macro, the default is "long int".
- +*/
- +#define PTRDIFF_TYPE "long int"
- +
- +
- +/*
- +A C expression for the size in bits of the data type for wide
- +characters. This is used in cpp, which cannot make use of
- +WCHAR_TYPE.
- +*/
- +#define WCHAR_TYPE_SIZE 32
- +
- +
- +/*
- +A C expression for a string describing the name of the data type to
- +use for wide characters passed to printf and returned from
- +getwc. The typedef name wint_t is defined using the
- +contents of the string. See SIZE_TYPE above for more
- +information.
- +
- +If you don't define this macro, the default is "unsigned int".
- +*/
- +#define WINT_TYPE "unsigned int"
- +
- +/*
- +A C expression for a string describing the name of the data type that
- +can represent any value of any standard or extended signed integer type.
- +The typedef name intmax_t is defined using the contents of the
- +string. See SIZE_TYPE above for more information.
- +
- +If you don't define this macro, the default is the first of
- +"int", "long int", or "long long int" that has as
- +much precision as long long int.
- +*/
- +#define INTMAX_TYPE "long long int"
- +
- +/*
- +A C expression for a string describing the name of the data type that
- +can represent any value of any standard or extended unsigned integer
- +type. The typedef name uintmax_t is defined using the contents
- +of the string. See SIZE_TYPE above for more information.
- +
- +If you don't define this macro, the default is the first of
- +"unsigned int", "long unsigned int", or "long long unsigned int"
- +that has as much precision as long long unsigned int.
- +*/
- +#define UINTMAX_TYPE "long long unsigned int"
- +
- +
- +/******************************************************************************
- + * Register Usage
- + *****************************************************************************/
- +
- +/* Convert from gcc internal register number to register number
- + used in assembly code */
- +#define ASM_REGNUM(reg) (LAST_REGNUM - (reg))
- +
- +/* Convert between register number used in assembly to gcc
- + internal register number */
- +#define INTERNAL_REGNUM(reg) (LAST_REGNUM - (reg))
- +
- +/** Basic Characteristics of Registers **/
- +
- +/*
- +Number of hardware registers known to the compiler. They receive
- +numbers 0 through FIRST_PSEUDO_REGISTER-1; thus, the first
- +pseudo register's number really is assigned the number
- +FIRST_PSEUDO_REGISTER.
- +*/
- +#define FIRST_PSEUDO_REGISTER (LAST_REGNUM + 1)
- +
- +#define FIRST_REGNUM 0
- +#define LAST_REGNUM 15
- +
- +/*
- +An initializer that says which registers are used for fixed purposes
- +all throughout the compiled code and are therefore not available for
- +general allocation. These would include the stack pointer, the frame
- +pointer (except on machines where that can be used as a general
- +register when no frame pointer is needed), the program counter on
- +machines where that is considered one of the addressable registers,
- +and any other numbered register with a standard use.
- +
- +This information is expressed as a sequence of numbers, separated by
- +commas and surrounded by braces. The nth number is 1 if
- +register n is fixed, 0 otherwise.
- +
- +The table initialized from this macro, and the table initialized by
- +the following one, may be overridden at run time either automatically,
- +by the actions of the macro CONDITIONAL_REGISTER_USAGE, or by
- +the user with the command options -ffixed-[reg],
- +-fcall-used-[reg] and -fcall-saved-[reg].
- +*/
- +
- +/* The internal gcc register numbers are reversed
- + compared to the real register numbers since
- + gcc expects data types stored over multiple
- + registers in the register file to be big endian
- + if the memory layout is big endian. But this
- + is not the case for avr32 so we fake a big
- + endian register file. */
- +
- +#define FIXED_REGISTERS { \
- + 1, /* Program Counter */ \
- + 0, /* Link Register */ \
- + 1, /* Stack Pointer */ \
- + 0, /* r12 */ \
- + 0, /* r11 */ \
- + 0, /* r10 */ \
- + 0, /* r9 */ \
- + 0, /* r8 */ \
- + 0, /* r7 */ \
- + 0, /* r6 */ \
- + 0, /* r5 */ \
- + 0, /* r4 */ \
- + 0, /* r3 */ \
- + 0, /* r2 */ \
- + 0, /* r1 */ \
- + 0, /* r0 */ \
- +}
- +
- +/*
- +Like FIXED_REGISTERS but has 1 for each register that is
- +clobbered (in general) by function calls as well as for fixed
- +registers. This macro therefore identifies the registers that are not
- +available for general allocation of values that must live across
- +function calls.
- +
- +If a register has 0 in CALL_USED_REGISTERS, the compiler
- +automatically saves it on function entry and restores it on function
- +exit, if the register is used within the function.
- +*/
- +#define CALL_USED_REGISTERS { \
- + 1, /* Program Counter */ \
- + 0, /* Link Register */ \
- + 1, /* Stack Pointer */ \
- + 1, /* r12 */ \
- + 1, /* r11 */ \
- + 1, /* r10 */ \
- + 1, /* r9 */ \
- + 1, /* r8 */ \
- + 0, /* r7 */ \
- + 0, /* r6 */ \
- + 0, /* r5 */ \
- + 0, /* r4 */ \
- + 0, /* r3 */ \
- + 0, /* r2 */ \
- + 0, /* r1 */ \
- + 0, /* r0 */ \
- +}
- +
- +/* Interrupt functions can only use registers that have already been
- + saved by the prologue, even if they would normally be
- + call-clobbered. */
- +#define HARD_REGNO_RENAME_OK(SRC, DST) \
- + (! IS_INTERRUPT (cfun->machine->func_type) || \
- + df_regs_ever_live_p (DST))
- +
- +
- +/*
- +Zero or more C statements that may conditionally modify five variables
- +fixed_regs, call_used_regs, global_regs,
- +reg_names, and reg_class_contents, to take into account
- +any dependence of these register sets on target flags. The first three
- +of these are of type char [] (interpreted as Boolean vectors).
- +global_regs is a const char *[], and
- +reg_class_contents is a HARD_REG_SET. Before the macro is
- +called, fixed_regs, call_used_regs,
- +reg_class_contents, and reg_names have been initialized
- +from FIXED_REGISTERS, CALL_USED_REGISTERS,
- +REG_CLASS_CONTENTS, and REGISTER_NAMES, respectively.
- +global_regs has been cleared, and any -ffixed-[reg],
- +-fcall-used-[reg] and -fcall-saved-[reg]
- +command options have been applied.
- +
- +You need not define this macro if it has no work to do.
- +
- +If the usage of an entire class of registers depends on the target
- +flags, you may indicate this to GCC by using this macro to modify
- +fixed_regs and call_used_regs to 1 for each of the
- +registers in the classes which should not be used by GCC. Also define
- +the macro REG_CLASS_FROM_LETTER to return NO_REGS if it
- +is called with a letter for a class that shouldn't be used.
- +
- + (However, if this class is not included in GENERAL_REGS and all
- +of the insn patterns whose constraints permit this class are
- +controlled by target switches, then GCC will automatically avoid using
- +these registers when the target switches are opposed to them.)
- +*/
- +#define CONDITIONAL_REGISTER_USAGE \
- + do \
- + { \
- + if (flag_pic) \
- + { \
- + fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
- + call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
- + } \
- + } \
- + while (0)
- +
- +
- +/*
- +If the program counter has a register number, define this as that
- +register number. Otherwise, do not define it.
- +*/
- +
- +#define LAST_AVR32_REGNUM 16
- +
- +
- +/** Order of Allocation of Registers **/
- +
- +/*
- +If defined, an initializer for a vector of integers, containing the
- +numbers of hard registers in the order in which GCC should prefer
- +to use them (from most preferred to least).
- +
- +If this macro is not defined, registers are used lowest numbered first
- +(all else being equal).
- +
- +One use of this macro is on machines where the highest numbered
- +registers must always be saved and the save-multiple-registers
- +instruction supports only sequences of consecutive registers. On such
- +machines, define REG_ALLOC_ORDER to be an initializer that lists
- +the highest numbered allocable register first.
- +*/
- +#define REG_ALLOC_ORDER \
- +{ \
- + INTERNAL_REGNUM(8), \
- + INTERNAL_REGNUM(9), \
- + INTERNAL_REGNUM(10), \
- + INTERNAL_REGNUM(11), \
- + INTERNAL_REGNUM(12), \
- + LR_REGNUM, \
- + INTERNAL_REGNUM(7), \
- + INTERNAL_REGNUM(6), \
- + INTERNAL_REGNUM(5), \
- + INTERNAL_REGNUM(4), \
- + INTERNAL_REGNUM(3), \
- + INTERNAL_REGNUM(2), \
- + INTERNAL_REGNUM(1), \
- + INTERNAL_REGNUM(0), \
- + SP_REGNUM, \
- + PC_REGNUM \
- +}
- +
- +
- +/** How Values Fit in Registers **/
- +
- +/*
- +A C expression for the number of consecutive hard registers, starting
- +at register number REGNO, required to hold a value of mode
- +MODE.
- +
- +On a machine where all registers are exactly one word, a suitable
- +definition of this macro is
- +
- +#define HARD_REGNO_NREGS(REGNO, MODE) \
- + ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) \
- + / UNITS_PER_WORD)
- +*/
- +#define HARD_REGNO_NREGS(REGNO, MODE) \
- + ((unsigned int)((GET_MODE_SIZE(MODE) + UNITS_PER_WORD -1 ) / UNITS_PER_WORD))
- +
- +/*
- +A C expression that is nonzero if it is permissible to store a value
- +of mode MODE in hard register number REGNO (or in several
- +registers starting with that one). For a machine where all registers
- +are equivalent, a suitable definition is
- +
- + #define HARD_REGNO_MODE_OK(REGNO, MODE) 1
- +
- +You need not include code to check for the numbers of fixed registers,
- +because the allocation mechanism considers them to be always occupied.
- +
- +On some machines, double-precision values must be kept in even/odd
- +register pairs. You can implement that by defining this macro to reject
- +odd register numbers for such modes.
- +
- +The minimum requirement for a mode to be OK in a register is that the
- +mov[mode] instruction pattern support moves between the
- +register and other hard register in the same class and that moving a
- +value into the register and back out not alter it.
- +
- +Since the same instruction used to move word_mode will work for
- +all narrower integer modes, it is not necessary on any machine for
- +HARD_REGNO_MODE_OK to distinguish between these modes, provided
- +you define patterns movhi, etc., to take advantage of this. This
- +is useful because of the interaction between HARD_REGNO_MODE_OK
- +and MODES_TIEABLE_P; it is very desirable for all integer modes
- +to be tieable.
- +
- +Many machines have special registers for floating point arithmetic.
- +Often people assume that floating point machine modes are allowed only
- +in floating point registers. This is not true. Any registers that
- +can hold integers can safely hold a floating point machine
- +mode, whether or not floating arithmetic can be done on it in those
- +registers. Integer move instructions can be used to move the values.
- +
- +On some machines, though, the converse is true: fixed-point machine
- +modes may not go in floating registers. This is true if the floating
- +registers normalize any value stored in them, because storing a
- +non-floating value there would garble it. In this case,
- +HARD_REGNO_MODE_OK should reject fixed-point machine modes in
- +floating registers. But if the floating registers do not automatically
- +normalize, if you can store any bit pattern in one and retrieve it
- +unchanged without a trap, then any machine mode may go in a floating
- +register, so you can define this macro to say so.
- +
- +The primary significance of special floating registers is rather that
- +they are the registers acceptable in floating point arithmetic
- +instructions. However, this is of no concern to
- +HARD_REGNO_MODE_OK. You handle it by writing the proper
- +constraints for those instructions.
- +
- +On some machines, the floating registers are especially slow to access,
- +so that it is better to store a value in a stack frame than in such a
- +register if floating point arithmetic is not being done. As long as the
- +floating registers are not in class GENERAL_REGS, they will not
- +be used unless some pattern's constraint asks for one.
- +*/
- +#define HARD_REGNO_MODE_OK(REGNO, MODE) avr32_hard_regno_mode_ok(REGNO, MODE)
- +
- +/*
- +A C expression that is nonzero if a value of mode
- +MODE1 is accessible in mode MODE2 without copying.
- +
- +If HARD_REGNO_MODE_OK(R, MODE1) and
- +HARD_REGNO_MODE_OK(R, MODE2) are always the same for
- +any R, then MODES_TIEABLE_P(MODE1, MODE2)
- +should be nonzero. If they differ for any R, you should define
- +this macro to return zero unless some other mechanism ensures the
- +accessibility of the value in a narrower mode.
- +
- +You should define this macro to return nonzero in as many cases as
- +possible since doing so will allow GCC to perform better register
- +allocation.
- +*/
- +#define MODES_TIEABLE_P(MODE1, MODE2) \
- + (GET_MODE_CLASS (MODE1) == GET_MODE_CLASS (MODE2))
- +
- +
- +
- +/******************************************************************************
- + * Register Classes
- + *****************************************************************************/
- +
- +/*
- +An enumeral type that must be defined with all the register class names
- +as enumeral values. NO_REGS must be first. ALL_REGS
- +must be the last register class, followed by one more enumeral value,
- +LIM_REG_CLASSES, which is not a register class but rather
- +tells how many classes there are.
- +
- +Each register class has a number, which is the value of casting
- +the class name to type int. The number serves as an index
- +in many of the tables described below.
- +*/
- +enum reg_class
- +{
- + NO_REGS,
- + GENERAL_REGS,
- + ALL_REGS,
- + LIM_REG_CLASSES
- +};
- +
- +/*
- +The number of distinct register classes, defined as follows:
- + #define N_REG_CLASSES (int) LIM_REG_CLASSES
- +*/
- +#define N_REG_CLASSES (int)LIM_REG_CLASSES
- +
- +/*
- +An initializer containing the names of the register classes as C string
- +constants. These names are used in writing some of the debugging dumps.
- +*/
- +#define REG_CLASS_NAMES \
- +{ \
- + "NO_REGS", \
- + "GENERAL_REGS", \
- + "ALL_REGS" \
- +}
- +
- +/*
- +An initializer containing the contents of the register classes, as integers
- +which are bit masks. The nth integer specifies the contents of class
- +n. The way the integer mask is interpreted is that
- +register r is in the class if mask & (1 << r) is 1.
- +
- +When the machine has more than 32 registers, an integer does not suffice.
- +Then the integers are replaced by sub-initializers, braced groupings containing
- +several integers. Each sub-initializer must be suitable as an initializer
- +for the type HARD_REG_SET which is defined in hard-reg-set.h.
- +In this situation, the first integer in each sub-initializer corresponds to
- +registers 0 through 31, the second integer to registers 32 through 63, and
- +so on.
- +*/
- +#define REG_CLASS_CONTENTS { \
- + {0x00000000}, /* NO_REGS */ \
- + {0x0000FFFF}, /* GENERAL_REGS */ \
- + {0x7FFFFFFF}, /* ALL_REGS */ \
- +}
- +
- +
- +/*
- +A C expression whose value is a register class containing hard register
- +REGNO. In general there is more than one such class; choose a class
- +which is minimal, meaning that no smaller class also contains the
- +register.
- +*/
- +#define REGNO_REG_CLASS(REGNO) (GENERAL_REGS)
- +
- +/*
- +A macro whose definition is the name of the class to which a valid
- +base register must belong. A base register is one used in an address
- +which is the register value plus a displacement.
- +*/
- +#define BASE_REG_CLASS GENERAL_REGS
- +
- +/*
- +This is a variation of the BASE_REG_CLASS macro which allows
- +the selection of a base register in a mode depenedent manner. If
- +mode is VOIDmode then it should return the same value as
- +BASE_REG_CLASS.
- +*/
- +#define MODE_BASE_REG_CLASS(MODE) BASE_REG_CLASS
- +
- +/*
- +A macro whose definition is the name of the class to which a valid
- +index register must belong. An index register is one used in an
- +address where its value is either multiplied by a scale factor or
- +added to another register (as well as added to a displacement).
- +*/
- +#define INDEX_REG_CLASS BASE_REG_CLASS
- +
- +/*
- +A C expression which defines the machine-dependent operand constraint
- +letters for register classes. If CHAR is such a letter, the
- +value should be the register class corresponding to it. Otherwise,
- +the value should be NO_REGS. The register letter r,
- +corresponding to class GENERAL_REGS, will not be passed
- +to this macro; you do not need to handle it.
- +*/
- +#define REG_CLASS_FROM_LETTER(CHAR) NO_REGS
- +
- +/* These assume that REGNO is a hard or pseudo reg number.
- + They give nonzero only if REGNO is a hard reg of the suitable class
- + or a pseudo reg currently allocated to a suitable hard reg.
- + Since they use reg_renumber, they are safe only once reg_renumber
- + has been allocated, which happens in local-alloc.c. */
- +#define TEST_REGNO(R, TEST, VALUE) \
- + ((R TEST VALUE) || ((unsigned) reg_renumber[R] TEST VALUE))
- +
- +/*
- +A C expression which is nonzero if register number num is suitable for use as a base
- +register in operand addresses. It may be either a suitable hard register or a pseudo
- +register that has been allocated such a hard register.
- +*/
- +#define REGNO_OK_FOR_BASE_P(NUM) TEST_REGNO(NUM, <=, LAST_REGNUM)
- +
- +/* The following macro defines cover classes for Integrated Register
- + Allocator. Cover classes is a set of non-intersected register
- + classes covering all hard registers used for register allocation
- + purpose. Any move between two registers of a cover class should be
- + cheaper than load or store of the registers. The macro value is
- + array of register classes with LIM_REG_CLASSES used as the end
- + marker. */
- +
- +#define IRA_COVER_CLASSES \
- +{ \
- + GENERAL_REGS, LIM_REG_CLASSES \
- +}
- +
- +/*
- +A C expression which is nonzero if register number NUM is
- +suitable for use as an index register in operand addresses. It may be
- +either a suitable hard register or a pseudo register that has been
- +allocated such a hard register.
- +
- +The difference between an index register and a base register is that
- +the index register may be scaled. If an address involves the sum of
- +two registers, neither one of them scaled, then either one may be
- +labeled the ``base'' and the other the ``index''; but whichever
- +labeling is used must fit the machine's constraints of which registers
- +may serve in each capacity. The compiler will try both labelings,
- +looking for one that is valid, and will reload one or both registers
- +only if neither labeling works.
- +*/
- +#define REGNO_OK_FOR_INDEX_P(NUM) TEST_REGNO(NUM, <=, LAST_REGNUM)
- +
- +/*
- +A C expression that places additional restrictions on the register class
- +to use when it is necessary to copy value X into a register in class
- +CLASS. The value is a register class; perhaps CLASS, or perhaps
- +another, smaller class. On many machines, the following definition is
- +safe: #define PREFERRED_RELOAD_CLASS(X,CLASS) CLASS
- +
- +Sometimes returning a more restrictive class makes better code. For
- +example, on the 68000, when X is an integer constant that is in range
- +for a 'moveq' instruction, the value of this macro is always
- +DATA_REGS as long as CLASS includes the data registers.
- +Requiring a data register guarantees that a 'moveq' will be used.
- +
- +If X is a const_double, by returning NO_REGS
- +you can force X into a memory constant. This is useful on
- +certain machines where immediate floating values cannot be loaded into
- +certain kinds of registers.
- +*/
- +#define PREFERRED_RELOAD_CLASS(X, CLASS) CLASS
- +
- +
- +
- +/*
- +A C expression for the maximum number of consecutive registers
- +of class CLASS needed to hold a value of mode MODE.
- +
- +This is closely related to the macro HARD_REGNO_NREGS. In fact,
- +the value of the macro CLASS_MAX_NREGS(CLASS, MODE)
- +should be the maximum value of HARD_REGNO_NREGS(REGNO, MODE)
- +for all REGNO values in the class CLASS.
- +
- +This macro helps control the handling of multiple-word values
- +in the reload pass.
- +*/
- +#define CLASS_MAX_NREGS(CLASS, MODE) /* ToDo:fixme */ \
- + (unsigned int)((GET_MODE_SIZE(MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
- +
- +
- +/*
- + Using CONST_OK_FOR_CONSTRAINT_P instead of CONS_OK_FOR_LETTER_P
- + in order to support constraints with more than one letter.
- + Only two letters are then used for constant constraints,
- + the letter 'K' and the letter 'I'. The constraint starting with
- + these letters must consist of four characters. The character following
- + 'K' or 'I' must be either 'u' (unsigned) or 's' (signed) to specify
- + if the constant is zero or sign extended. The last two characters specify
- + the length in bits of the constant. The base constraint letter 'I' means
- + that this is an negated constant, meaning that actually -VAL should be
- + checked to lie withing the valid range instead of VAL which is used when
- + 'K' is the base constraint letter.
- +
- +*/
- +
- +#define CONSTRAINT_LEN(C, STR) \
- + ( ((C) == 'K' || (C) == 'I') ? 4 : \
- + ((C) == 'R') ? 5 : \
- + ((C) == 'P') ? -1 : \
- + DEFAULT_CONSTRAINT_LEN((C), (STR)) )
- +
- +#define CONST_OK_FOR_CONSTRAINT_P(VALUE, C, STR) \
- + avr32_const_ok_for_constraint_p(VALUE, C, STR)
- +
- +/*
- +A C expression that defines the machine-dependent operand constraint
- +letters that specify particular ranges of const_double values ('G' or 'H').
- +
- +If C is one of those letters, the expression should check that
- +VALUE, an RTX of code const_double, is in the appropriate
- +range and return 1 if so, 0 otherwise. If C is not one of those
- +letters, the value should be 0 regardless of VALUE.
- +
- +const_double is used for all floating-point constants and for
- +DImode fixed-point constants. A given letter can accept either
- +or both kinds of values. It can use GET_MODE to distinguish
- +between these kinds.
- +*/
- +#define CONST_DOUBLE_OK_FOR_LETTER_P(OP, C) \
- + ((C) == 'G' ? avr32_const_double_immediate(OP) : 0)
- +
- +/*
- +A C expression that defines the optional machine-dependent constraint
- +letters that can be used to segregate specific types of operands, usually
- +memory references, for the target machine. Any letter that is not
- +elsewhere defined and not matched by REG_CLASS_FROM_LETTER
- +may be used. Normally this macro will not be defined.
- +
- +If it is required for a particular target machine, it should return 1
- +if VALUE corresponds to the operand type represented by the
- +constraint letter C. If C is not defined as an extra
- +constraint, the value returned should be 0 regardless of VALUE.
- +
- +For example, on the ROMP, load instructions cannot have their output
- +in r0 if the memory reference contains a symbolic address. Constraint
- +letter 'Q' is defined as representing a memory address that does
- +not contain a symbolic address. An alternative is specified with
- +a 'Q' constraint on the input and 'r' on the output. The next
- +alternative specifies 'm' on the input and a register class that
- +does not include r0 on the output.
- +*/
- +#define EXTRA_CONSTRAINT_STR(OP, C, STR) \
- + ((C) == 'W' ? avr32_address_operand(OP, GET_MODE(OP)) : \
- + (C) == 'R' ? (avr32_indirect_register_operand(OP, GET_MODE(OP)) || \
- + (avr32_imm_disp_memory_operand(OP, GET_MODE(OP)) \
- + && avr32_const_ok_for_constraint_p( \
- + INTVAL(XEXP(XEXP(OP, 0), 1)), \
- + (STR)[1], &(STR)[1]))) : \
- + (C) == 'S' ? avr32_indexed_memory_operand(OP, GET_MODE(OP)) : \
- + (C) == 'T' ? avr32_const_pool_ref_operand(OP, GET_MODE(OP)) : \
- + (C) == 'U' ? SYMBOL_REF_RCALL_FUNCTION_P(OP) : \
- + (C) == 'Z' ? avr32_cop_memory_operand(OP, GET_MODE(OP)) : \
- + (C) == 'Q' ? avr32_non_rmw_memory_operand(OP, GET_MODE(OP)) : \
- + (C) == 'Y' ? avr32_rmw_memory_operand(OP, GET_MODE(OP)) : \
- + 0)
- +
- +
- +#define EXTRA_MEMORY_CONSTRAINT(C, STR) ( ((C) == 'R') || \
- + ((C) == 'Q') || \
- + ((C) == 'S') || \
- + ((C) == 'Y') || \
- + ((C) == 'Z') )
- +
- +
- +/* Returns nonzero if op is a function SYMBOL_REF which
- + can be called using an rcall instruction */
- +#define SYMBOL_REF_RCALL_FUNCTION_P(op) \
- + ( GET_CODE(op) == SYMBOL_REF \
- + && SYMBOL_REF_FUNCTION_P(op) \
- + && SYMBOL_REF_LOCAL_P(op) \
- + && !SYMBOL_REF_EXTERNAL_P(op) \
- + && !TARGET_HAS_ASM_ADDR_PSEUDOS )
- +
- +/******************************************************************************
- + * Stack Layout and Calling Conventions
- + *****************************************************************************/
- +
- +/** Basic Stack Layout **/
- +
- +/*
- +Define this macro if pushing a word onto the stack moves the stack
- +pointer to a smaller address.
- +
- +When we say, ``define this macro if ...,'' it means that the
- +compiler checks this macro only with #ifdef so the precise
- +definition used does not matter.
- +*/
- +/* pushm decrece SP: *(--SP) <-- Rx */
- +#define STACK_GROWS_DOWNWARD
- +
- +/*
- +This macro defines the operation used when something is pushed
- +on the stack. In RTL, a push operation will be
- +(set (mem (STACK_PUSH_CODE (reg sp))) ...)
- +
- +The choices are PRE_DEC, POST_DEC, PRE_INC,
- +and POST_INC. Which of these is correct depends on
- +the stack direction and on whether the stack pointer points
- +to the last item on the stack or whether it points to the
- +space for the next item on the stack.
- +
- +The default is PRE_DEC when STACK_GROWS_DOWNWARD is
- +defined, which is almost always right, and PRE_INC otherwise,
- +which is often wrong.
- +*/
- +/* pushm: *(--SP) <-- Rx */
- +#define STACK_PUSH_CODE PRE_DEC
- +
- +/* Define this to nonzero if the nominal address of the stack frame
- + is at the high-address end of the local variables;
- + that is, each additional local variable allocated
- + goes at a more negative offset in the frame. */
- +#define FRAME_GROWS_DOWNWARD 1
- +
- +
- +/*
- +Offset from the frame pointer to the first local variable slot to be allocated.
- +
- +If FRAME_GROWS_DOWNWARD, find the next slot's offset by
- +subtracting the first slot's length from STARTING_FRAME_OFFSET.
- +Otherwise, it is found by adding the length of the first slot to the
- +value STARTING_FRAME_OFFSET.
- + (i'm not sure if the above is still correct.. had to change it to get
- + rid of an overfull. --mew 2feb93 )
- +*/
- +#define STARTING_FRAME_OFFSET 0
- +
- +/*
- +Offset from the stack pointer register to the first location at which
- +outgoing arguments are placed. If not specified, the default value of
- +zero is used. This is the proper value for most machines.
- +
- +If ARGS_GROW_DOWNWARD, this is the offset to the location above
- +the first location at which outgoing arguments are placed.
- +*/
- +#define STACK_POINTER_OFFSET 0
- +
- +/*
- +Offset from the argument pointer register to the first argument's
- +address. On some machines it may depend on the data type of the
- +function.
- +
- +If ARGS_GROW_DOWNWARD, this is the offset to the location above
- +the first argument's address.
- +*/
- +#define FIRST_PARM_OFFSET(FUNDECL) 0
- +
- +
- +/*
- +A C expression whose value is RTL representing the address in a stack
- +frame where the pointer to the caller's frame is stored. Assume that
- +FRAMEADDR is an RTL expression for the address of the stack frame
- +itself.
- +
- +If you don't define this macro, the default is to return the value
- +of FRAMEADDR - that is, the stack frame address is also the
- +address of the stack word that points to the previous frame.
- +*/
- +#define DYNAMIC_CHAIN_ADDRESS(FRAMEADDR) plus_constant ((FRAMEADDR), 4)
- +
- +
- +/*
- +A C expression whose value is RTL representing the value of the return
- +address for the frame COUNT steps up from the current frame, after
- +the prologue. FRAMEADDR is the frame pointer of the COUNT
- +frame, or the frame pointer of the COUNT - 1 frame if
- +RETURN_ADDR_IN_PREVIOUS_FRAME is defined.
- +
- +The value of the expression must always be the correct address when
- +COUNT is zero, but may be NULL_RTX if there is not way to
- +determine the return address of other frames.
- +*/
- +#define RETURN_ADDR_RTX(COUNT, FRAMEADDR) avr32_return_addr(COUNT, FRAMEADDR)
- +
- +
- +/*
- +A C expression whose value is RTL representing the location of the
- +incoming return address at the beginning of any function, before the
- +prologue. This RTL is either a REG, indicating that the return
- +value is saved in 'REG', or a MEM representing a location in
- +the stack.
- +
- +You only need to define this macro if you want to support call frame
- +debugging information like that provided by DWARF 2.
- +
- +If this RTL is a REG, you should also define
- +DWARF_FRAME_RETURN_COLUMN to DWARF_FRAME_REGNUM (REGNO).
- +*/
- +#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, LR_REGNUM)
- +
- +/*
- +A C expression whose value is an integer giving the offset, in bytes,
- +from the value of the stack pointer register to the top of the stack
- +frame at the beginning of any function, before the prologue. The top of
- +the frame is defined to be the value of the stack pointer in the
- +previous frame, just before the call instruction.
- +
- +You only need to define this macro if you want to support call frame
- +debugging information like that provided by DWARF 2.
- +*/
- +#define INCOMING_FRAME_SP_OFFSET 0
- +
- +
- +/** Exception Handling Support **/
- +
- +/* Use setjump/longjump for exception handling. */
- +#define DWARF2_UNWIND_INFO 0
- +#define MUST_USE_SJLJ_EXCEPTIONS 1
- +
- +/*
- +A C expression whose value is the Nth register number used for
- +data by exception handlers, or INVALID_REGNUM if fewer than
- +N registers are usable.
- +
- +The exception handling library routines communicate with the exception
- +handlers via a set of agreed upon registers. Ideally these registers
- +should be call-clobbered; it is possible to use call-saved registers,
- +but may negatively impact code size. The target must support at least
- +2 data registers, but should define 4 if there are enough free registers.
- +
- +You must define this macro if you want to support call frame exception
- +handling like that provided by DWARF 2.
- +*/
- +/*
- + Use r9-r11
- +*/
- +#define EH_RETURN_DATA_REGNO(N) \
- + ((N<3) ? INTERNAL_REGNUM(N+9) : INVALID_REGNUM)
- +
- +/*
- +A C expression whose value is RTL representing a location in which
- +to store a stack adjustment to be applied before function return.
- +This is used to unwind the stack to an exception handler's call frame.
- +It will be assigned zero on code paths that return normally.
- +
- +Typically this is a call-clobbered hard register that is otherwise
- +untouched by the epilogue, but could also be a stack slot.
- +
- +You must define this macro if you want to support call frame exception
- +handling like that provided by DWARF 2.
- +*/
- +/*
- + Use r8
- +*/
- +#define EH_RETURN_STACKADJ_REGNO INTERNAL_REGNUM(8)
- +#define EH_RETURN_STACKADJ_RTX gen_rtx_REG(SImode, EH_RETURN_STACKADJ_REGNO)
- +
- +/*
- +A C expression whose value is RTL representing a location in which
- +to store the address of an exception handler to which we should
- +return. It will not be assigned on code paths that return normally.
- +
- +Typically this is the location in the call frame at which the normal
- +return address is stored. For targets that return by popping an
- +address off the stack, this might be a memory address just below
- +the target call frame rather than inside the current call
- +frame. EH_RETURN_STACKADJ_RTX will have already been assigned,
- +so it may be used to calculate the location of the target call frame.
- +
- +Some targets have more complex requirements than storing to an
- +address calculable during initial code generation. In that case
- +the eh_return instruction pattern should be used instead.
- +
- +If you want to support call frame exception handling, you must
- +define either this macro or the eh_return instruction pattern.
- +*/
- +/*
- + We define the eh_return instruction pattern, so this isn't needed.
- +*/
- +/* #define EH_RETURN_HANDLER_RTX gen_rtx_REG(Pmode, RET_REGISTER) */
- +
- +/*
- + This macro chooses the encoding of pointers embedded in the
- + exception handling sections. If at all possible, this should be
- + defined such that the exception handling section will not require
- + dynamic relocations, and so may be read-only.
- +
- + code is 0 for data, 1 for code labels, 2 for function
- + pointers. global is true if the symbol may be affected by dynamic
- + relocations. The macro should return a combination of the DW_EH_PE_*
- + defines as found in dwarf2.h.
- +
- + If this macro is not defined, pointers will not be encoded but
- + represented directly.
- +*/
- +#define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \
- + ((flag_pic && (GLOBAL) ? DW_EH_PE_indirect : 0) \
- + | (flag_pic ? DW_EH_PE_pcrel : DW_EH_PE_absptr) \
- + | DW_EH_PE_sdata4)
- +
- +/* ToDo: The rest of this subsection */
- +
- +/** Specifying How Stack Checking is Done **/
- +/* ToDo: All in this subsection */
- +
- +/** Registers That Address the Stack Frame **/
- +
- +/*
- +The register number of the stack pointer register, which must also be a
- +fixed register according to FIXED_REGISTERS. On most machines,
- +the hardware determines which register this is.
- +*/
- +/* Using r13 as stack pointer. */
- +#define STACK_POINTER_REGNUM INTERNAL_REGNUM(13)
- +
- +/*
- +The register number of the frame pointer register, which is used to
- +access automatic variables in the stack frame. On some machines, the
- +hardware determines which register this is. On other machines, you can
- +choose any register you wish for this purpose.
- +*/
- +/* Use r7 */
- +#define FRAME_POINTER_REGNUM INTERNAL_REGNUM(7)
- +
- +/*
- +The register number of the arg pointer register, which is used to access
- +the function's argument list. On some machines, this is the same as the
- +frame pointer register. On some machines, the hardware determines which
- +register this is. On other machines, you can choose any register you
- +wish for this purpose. If this is not the same register as the frame
- +pointer register, then you must mark it as a fixed register according to
- +FIXED_REGISTERS, or arrange to be able to eliminate it (see Section
- +10.10.5 [Elimination], page 224).
- +*/
- +/* Using r5 */
- +#define ARG_POINTER_REGNUM INTERNAL_REGNUM(4)
- +
- +
- +/*
- +Register numbers used for passing a function's static chain pointer. If
- +register windows are used, the register number as seen by the called
- +function is STATIC_CHAIN_INCOMING_REGNUM, while the register
- +number as seen by the calling function is STATIC_CHAIN_REGNUM. If
- +these registers are the same, STATIC_CHAIN_INCOMING_REGNUM need
- +not be defined.
- +
- +The static chain register need not be a fixed register.
- +
- +If the static chain is passed in memory, these macros should not be
- +defined; instead, the next two macros should be defined.
- +*/
- +/* Using r0 */
- +#define STATIC_CHAIN_REGNUM INTERNAL_REGNUM(0)
- +
- +/** Eliminating Frame Pointer and Arg Pointer **/
- +
- +/*
- +A C expression which is nonzero if a function must have and use a frame
- +pointer. This expression is evaluated in the reload pass. If its value is
- +nonzero the function will have a frame pointer.
- +
- +The expression can in principle examine the current function and decide
- +according to the facts, but on most machines the constant 0 or the
- +constant 1 suffices. Use 0 when the machine allows code to be generated
- +with no frame pointer, and doing so saves some time or space. Use 1
- +when there is no possible advantage to avoiding a frame pointer.
- +
- +In certain cases, the compiler does not know how to produce valid code
- +without a frame pointer. The compiler recognizes those cases and
- +automatically gives the function a frame pointer regardless of what
- +FRAME_POINTER_REQUIRED says. You don't need to worry about
- +them.
- +
- +In a function that does not require a frame pointer, the frame pointer
- +register can be allocated for ordinary usage, unless you mark it as a
- +fixed register. See FIXED_REGISTERS for more information.
- +*/
- +/* We need the frame pointer when compiling for profiling */
- +#define FRAME_POINTER_REQUIRED (crtl->profile)
- +
- +/*
- +A C statement to store in the variable DEPTH_VAR the difference
- +between the frame pointer and the stack pointer values immediately after
- +the function prologue. The value would be computed from information
- +such as the result of get_frame_size () and the tables of
- +registers regs_ever_live and call_used_regs.
- +
- +If ELIMINABLE_REGS is defined, this macro will be not be used and
- +need not be defined. Otherwise, it must be defined even if
- +FRAME_POINTER_REQUIRED is defined to always be true; in that
- +case, you may set DEPTH_VAR to anything.
- +*/
- +#define INITIAL_FRAME_POINTER_OFFSET(DEPTH_VAR) ((DEPTH_VAR) = get_frame_size())
- +
- +/*
- +If defined, this macro specifies a table of register pairs used to
- +eliminate unneeded registers that point into the stack frame. If it is not
- +defined, the only elimination attempted by the compiler is to replace
- +references to the frame pointer with references to the stack pointer.
- +
- +The definition of this macro is a list of structure initializations, each
- +of which specifies an original and replacement register.
- +
- +On some machines, the position of the argument pointer is not known until
- +the compilation is completed. In such a case, a separate hard register
- +must be used for the argument pointer. This register can be eliminated by
- +replacing it with either the frame pointer or the argument pointer,
- +depending on whether or not the frame pointer has been eliminated.
- +
- +In this case, you might specify:
- + #define ELIMINABLE_REGS \
- + {{ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
- + {ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM}, \
- + {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}}
- +
- +Note that the elimination of the argument pointer with the stack pointer is
- +specified first since that is the preferred elimination.
- +*/
- +#define ELIMINABLE_REGS \
- +{ \
- + { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM }, \
- + { ARG_POINTER_REGNUM, STACK_POINTER_REGNUM }, \
- + { ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM } \
- +}
- +
- +/*
- +A C expression that returns nonzero if the compiler is allowed to try
- +to replace register number FROM with register number
- +TO. This macro need only be defined if ELIMINABLE_REGS
- +is defined, and will usually be the constant 1, since most of the cases
- +preventing register elimination are things that the compiler already
- +knows about.
- +*/
- +#define CAN_ELIMINATE(FROM, TO) 1
- +
- +/*
- +This macro is similar to INITIAL_FRAME_POINTER_OFFSET. It
- +specifies the initial difference between the specified pair of
- +registers. This macro must be defined if ELIMINABLE_REGS is
- +defined.
- +*/
- +#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
- + ((OFFSET) = avr32_initial_elimination_offset(FROM, TO))
- +
- +/** Passing Function Arguments on the Stack **/
- +
- +
- +/*
- +A C expression. If nonzero, push insns will be used to pass
- +outgoing arguments.
- +If the target machine does not have a push instruction, set it to zero.
- +That directs GCC to use an alternate strategy: to
- +allocate the entire argument block and then store the arguments into
- +it. When PUSH_ARGS is nonzero, PUSH_ROUNDING must be defined too.
- +*/
- +#define PUSH_ARGS 1
- +
- +/*
- +A C expression that is the number of bytes actually pushed onto the
- +stack when an instruction attempts to push NPUSHED bytes.
- +
- +On some machines, the definition
- +
- + #define PUSH_ROUNDING(BYTES) (BYTES)
- +
- +will suffice. But on other machines, instructions that appear
- +to push one byte actually push two bytes in an attempt to maintain
- +alignment. Then the definition should be
- +
- + #define PUSH_ROUNDING(BYTES) (((BYTES) + 1) & ~1)
- +*/
- +/* Push 4 bytes at the time. */
- +#define PUSH_ROUNDING(NPUSHED) (((NPUSHED) + 3) & ~3)
- +
- +/*
- +A C expression. If nonzero, the maximum amount of space required for
- +outgoing arguments will be computed and placed into the variable
- +current_function_outgoing_args_size. No space will be pushed
- +onto the stack for each call; instead, the function prologue should
- +increase the stack frame size by this amount.
- +
- +Setting both PUSH_ARGS and ACCUMULATE_OUTGOING_ARGS is not proper.
- +*/
- +#define ACCUMULATE_OUTGOING_ARGS 0
- +
- +/*
- +A C expression that should indicate the number of bytes of its own
- +arguments that a function pops on returning, or 0 if the
- +function pops no arguments and the caller must therefore pop them all
- +after the function returns.
- +
- +FUNDECL is a C variable whose value is a tree node that describes
- +the function in question. Normally it is a node of type
- +FUNCTION_DECL that describes the declaration of the function.
- +From this you can obtain the DECL_ATTRIBUTES of the function.
- +
- +FUNTYPE is a C variable whose value is a tree node that
- +describes the function in question. Normally it is a node of type
- +FUNCTION_TYPE that describes the data type of the function.
- +From this it is possible to obtain the data types of the value and
- +arguments (if known).
- +
- +When a call to a library function is being considered, FUNDECL
- +will contain an identifier node for the library function. Thus, if
- +you need to distinguish among various library functions, you can do so
- +by their names. Note that ``library function'' in this context means
- +a function used to perform arithmetic, whose name is known specially
- +in the compiler and was not mentioned in the C code being compiled.
- +
- +STACK_SIZE is the number of bytes of arguments passed on the
- +stack. If a variable number of bytes is passed, it is zero, and
- +argument popping will always be the responsibility of the calling function.
- +
- +On the VAX, all functions always pop their arguments, so the definition
- +of this macro is STACK_SIZE. On the 68000, using the standard
- +calling convention, no functions pop their arguments, so the value of
- +the macro is always 0 in this case. But an alternative calling
- +convention is available in which functions that take a fixed number of
- +arguments pop them but other functions (such as printf) pop
- +nothing (the caller pops all). When this convention is in use,
- +FUNTYPE is examined to determine whether a function takes a fixed
- +number of arguments.
- +*/
- +#define RETURN_POPS_ARGS(FUNDECL, FUNTYPE, STACK_SIZE) 0
- +
- +
- +/*Return true if this function can we use a single return instruction*/
- +#define USE_RETURN_INSN(ISCOND) avr32_use_return_insn(ISCOND)
- +
- +/*
- +A C expression that should indicate the number of bytes a call sequence
- +pops off the stack. It is added to the value of RETURN_POPS_ARGS
- +when compiling a function call.
- +
- +CUM is the variable in which all arguments to the called function
- +have been accumulated.
- +
- +On certain architectures, such as the SH5, a call trampoline is used
- +that pops certain registers off the stack, depending on the arguments
- +that have been passed to the function. Since this is a property of the
- +call site, not of the called function, RETURN_POPS_ARGS is not
- +appropriate.
- +*/
- +#define CALL_POPS_ARGS(CUM) 0
- +
- +/* Passing Arguments in Registers */
- +
- +/*
- +A C expression that controls whether a function argument is passed
- +in a register, and which register.
- +
- +The arguments are CUM, which summarizes all the previous
- +arguments; MODE, the machine mode of the argument; TYPE,
- +the data type of the argument as a tree node or 0 if that is not known
- +(which happens for C support library functions); and NAMED,
- +which is 1 for an ordinary argument and 0 for nameless arguments that
- +correspond to '...' in the called function's prototype.
- +TYPE can be an incomplete type if a syntax error has previously
- +occurred.
- +
- +The value of the expression is usually either a reg RTX for the
- +hard register in which to pass the argument, or zero to pass the
- +argument on the stack.
- +
- +For machines like the VAX and 68000, where normally all arguments are
- +pushed, zero suffices as a definition.
- +
- +The value of the expression can also be a parallel RTX. This is
- +used when an argument is passed in multiple locations. The mode of the
- +of the parallel should be the mode of the entire argument. The
- +parallel holds any number of expr_list pairs; each one
- +describes where part of the argument is passed. In each
- +expr_list the first operand must be a reg RTX for the hard
- +register in which to pass this part of the argument, and the mode of the
- +register RTX indicates how large this part of the argument is. The
- +second operand of the expr_list is a const_int which gives
- +the offset in bytes into the entire argument of where this part starts.
- +As a special exception the first expr_list in the parallel
- +RTX may have a first operand of zero. This indicates that the entire
- +argument is also stored on the stack.
- +
- +The last time this macro is called, it is called with MODE == VOIDmode,
- +and its result is passed to the call or call_value
- +pattern as operands 2 and 3 respectively.
- +
- +The usual way to make the ISO library 'stdarg.h' work on a machine
- +where some arguments are usually passed in registers, is to cause
- +nameless arguments to be passed on the stack instead. This is done
- +by making FUNCTION_ARG return 0 whenever NAMED is 0.
- +
- +You may use the macro MUST_PASS_IN_STACK (MODE, TYPE)
- +in the definition of this macro to determine if this argument is of a
- +type that must be passed in the stack. If REG_PARM_STACK_SPACE
- +is not defined and FUNCTION_ARG returns nonzero for such an
- +argument, the compiler will abort. If REG_PARM_STACK_SPACE is
- +defined, the argument will be computed in the stack and then loaded into
- +a register. */
- +
- +#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
- + avr32_function_arg(&(CUM), MODE, TYPE, NAMED)
- +
- +/*
- +A C type for declaring a variable that is used as the first argument of
- +FUNCTION_ARG and other related values. For some target machines,
- +the type int suffices and can hold the number of bytes of
- +argument so far.
- +
- +There is no need to record in CUMULATIVE_ARGS anything about the
- +arguments that have been passed on the stack. The compiler has other
- +variables to keep track of that. For target machines on which all
- +arguments are passed on the stack, there is no need to store anything in
- +CUMULATIVE_ARGS; however, the data structure must exist and
- +should not be empty, so use int.
- +*/
- +typedef struct avr32_args
- +{
- + /* Index representing the argument register the current function argument
- + will occupy */
- + int index;
- + /* A mask with bits representing the argument registers: if a bit is set
- + then this register is used for an argument */
- + int used_index;
- + /* TRUE if this function has anonymous arguments */
- + int uses_anonymous_args;
- + /* The size in bytes of the named arguments pushed on the stack */
- + int stack_pushed_args_size;
- + /* Set to true if this function needs a Return Value Pointer */
- + int use_rvp;
- + /* Set to true if function is a flashvault function. */
- + int flashvault_func;
- +
- +} CUMULATIVE_ARGS;
- +
- +
- +#define FIRST_CUM_REG_INDEX 0
- +#define LAST_CUM_REG_INDEX 4
- +#define GET_REG_INDEX(CUM) ((CUM)->index)
- +#define SET_REG_INDEX(CUM, INDEX) ((CUM)->index = (INDEX));
- +#define GET_USED_INDEX(CUM, INDEX) ((CUM)->used_index & (1 << (INDEX)))
- +#define SET_USED_INDEX(CUM, INDEX) \
- + do \
- + { \
- + if (INDEX >= 0) \
- + (CUM)->used_index |= (1 << (INDEX)); \
- + } \
- + while (0)
- +#define SET_INDEXES_UNUSED(CUM) ((CUM)->used_index = 0)
- +
- +/*
- + A C statement (sans semicolon) for initializing the variable cum for the
- + state at the beginning of the argument list. The variable has type
- + CUMULATIVE_ARGS. The value of FNTYPE is the tree node for the data type of
- + the function which will receive the args, or 0 if the args are to a compiler
- + support library function. For direct calls that are not libcalls, FNDECL
- + contain the declaration node of the function. FNDECL is also set when
- + INIT_CUMULATIVE_ARGS is used to find arguments for the function being
- + compiled. N_NAMED_ARGS is set to the number of named arguments, including a
- + structure return address if it is passed as a parameter, when making a call.
- + When processing incoming arguments, N_NAMED_ARGS is set to -1.
- +
- + When processing a call to a compiler support library function, LIBNAME
- + identifies which one. It is a symbol_ref rtx which contains the name of the
- + function, as a string. LIBNAME is 0 when an ordinary C function call is
- + being processed. Thus, each time this macro is called, either LIBNAME or
- + FNTYPE is nonzero, but never both of them at once.
- +*/
- +#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS) \
- + avr32_init_cumulative_args(&(CUM), FNTYPE, LIBNAME, FNDECL)
- +
- +/*
- +A C statement (sans semicolon) to update the summarizer variable
- +CUM to advance past an argument in the argument list. The
- +values MODE, TYPE and NAMED describe that argument.
- +Once this is done, the variable CUM is suitable for analyzing
- +the following argument with FUNCTION_ARG, etc.
- +
- +This macro need not do anything if the argument in question was passed
- +on the stack. The compiler knows how to track the amount of stack space
- +used for arguments without any special help.
- +*/
- +#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
- + avr32_function_arg_advance(&(CUM), MODE, TYPE, NAMED)
- +
- +/*
- +If defined, a C expression which determines whether, and in which direction,
- +to pad out an argument with extra space. The value should be of type
- +enum direction: either 'upward' to pad above the argument,
- +'downward' to pad below, or 'none' to inhibit padding.
- +
- +The amount of padding is always just enough to reach the next
- +multiple of FUNCTION_ARG_BOUNDARY; this macro does not control
- +it.
- +
- +This macro has a default definition which is right for most systems.
- +For little-endian machines, the default is to pad upward. For
- +big-endian machines, the default is to pad downward for an argument of
- +constant size shorter than an int, and upward otherwise.
- +*/
- +#define FUNCTION_ARG_PADDING(MODE, TYPE) \
- + avr32_function_arg_padding(MODE, TYPE)
- +
- +/*
- + Specify padding for the last element of a block move between registers
- + and memory. First is nonzero if this is the only element. Defining
- + this macro allows better control of register function parameters on
- + big-endian machines, without using PARALLEL rtl. In particular,
- + MUST_PASS_IN_STACK need not test padding and mode of types in registers,
- + as there is no longer a "wrong" part of a register; For example, a three
- + byte aggregate may be passed in the high part of a register if so required.
- +*/
- +#define BLOCK_REG_PADDING(MODE, TYPE, FIRST) \
- + avr32_function_arg_padding(MODE, TYPE)
- +
- +/*
- +If defined, a C expression which determines whether the default
- +implementation of va_arg will attempt to pad down before reading the
- +next argument, if that argument is smaller than its aligned space as
- +controlled by PARM_BOUNDARY. If this macro is not defined, all such
- +arguments are padded down if BYTES_BIG_ENDIAN is true.
- +*/
- +#define PAD_VARARGS_DOWN \
- + (FUNCTION_ARG_PADDING (TYPE_MODE (type), type) == downward)
- +
- +/*
- +A C expression that is nonzero if REGNO is the number of a hard
- +register in which function arguments are sometimes passed. This does
- +not include implicit arguments such as the static chain and
- +the structure-value address. On many machines, no registers can be
- +used for this purpose since all function arguments are pushed on the
- +stack.
- +*/
- +/*
- + Use r8 - r12 for function arguments.
- +*/
- +#define FUNCTION_ARG_REGNO_P(REGNO) \
- + (REGNO >= 3 && REGNO <= 7)
- +
- +/* Number of registers used for passing function arguments */
- +#define NUM_ARG_REGS 5
- +
- +/*
- +If defined, the order in which arguments are loaded into their
- +respective argument registers is reversed so that the last
- +argument is loaded first. This macro only affects arguments
- +passed in registers.
- +*/
- +/* #define LOAD_ARGS_REVERSED */
- +
- +/** How Scalar Function Values Are Returned **/
- +
- +/* AVR32 is using r12 as return register. */
- +#define RET_REGISTER (15 - 12)
- +
- +/*
- +A C expression to create an RTX representing the place where a library
- +function returns a value of mode MODE. If the precise function
- +being called is known, FUNC is a tree node
- +(FUNCTION_DECL) for it; otherwise, func is a null
- +pointer. This makes it possible to use a different value-returning
- +convention for specific functions when all their calls are
- +known.
- +
- +Note that "library function" in this context means a compiler
- +support routine, used to perform arithmetic, whose name is known
- +specially by the compiler and was not mentioned in the C code being
- +compiled.
- +
- +The definition of LIBRARY_VALUE need not be concerned aggregate
- +data types, because none of the library functions returns such types.
- +*/
- +#define LIBCALL_VALUE(MODE) avr32_libcall_value(MODE)
- +
- +/*
- +A C expression that is nonzero if REGNO is the number of a hard
- +register in which the values of called function may come back.
- +
- +A register whose use for returning values is limited to serving as the
- +second of a pair (for a value of type double, say) need not be
- +recognized by this macro. So for most machines, this definition
- +suffices:
- + #define FUNCTION_VALUE_REGNO_P(N) ((N) == 0)
- +
- +If the machine has register windows, so that the caller and the called
- +function use different registers for the return value, this macro
- +should recognize only the caller's register numbers.
- +*/
- +/*
- + When returning a value of mode DImode, r11:r10 is used, else r12 is used.
- +*/
- +#define FUNCTION_VALUE_REGNO_P(REGNO) ((REGNO) == RET_REGISTER \
- + || (REGNO) == INTERNAL_REGNUM(11))
- +
- +
- +/** How Large Values Are Returned **/
- +
- +
- +/*
- +Define this macro to be 1 if all structure and union return values must be
- +in memory. Since this results in slower code, this should be defined
- +only if needed for compatibility with other compilers or with an ABI.
- +If you define this macro to be 0, then the conventions used for structure
- +and union return values are decided by the RETURN_IN_MEMORY macro.
- +
- +If not defined, this defaults to the value 1.
- +*/
- +#define DEFAULT_PCC_STRUCT_RETURN 0
- +
- +
- +
- +
- +/** Generating Code for Profiling **/
- +
- +/*
- +A C statement or compound statement to output to FILE some
- +assembler code to call the profiling subroutine mcount.
- +
- +The details of how mcount expects to be called are determined by
- +your operating system environment, not by GCC. To figure them out,
- +compile a small program for profiling using the system's installed C
- +compiler and look at the assembler code that results.
- +
- +Older implementations of mcount expect the address of a counter
- +variable to be loaded into some register. The name of this variable is
- +'LP' followed by the number LABELNO, so you would generate
- +the name using 'LP%d' in a fprintf.
- +*/
- +/* ToDo: fixme */
- +#ifndef FUNCTION_PROFILER
- +#define FUNCTION_PROFILER(FILE, LABELNO) \
- + fprintf((FILE), "/* profiler %d */", (LABELNO))
- +#endif
- +
- +
- +/*****************************************************************************
- + * Trampolines for Nested Functions *
- + *****************************************************************************/
- +
- +/*
- +A C statement to output, on the stream FILE, assembler code for a
- +block of data that contains the constant parts of a trampoline. This
- +code should not include a label - the label is taken care of
- +automatically.
- +
- +If you do not define this macro, it means no template is needed
- +for the target. Do not define this macro on systems where the block move
- +code to copy the trampoline into place would be larger than the code
- +to generate it on the spot.
- +*/
- +/* ToDo: correct? */
- +#define TRAMPOLINE_TEMPLATE(FILE) avr32_trampoline_template(FILE);
- +
- +
- +/*
- +A C expression for the size in bytes of the trampoline, as an integer.
- +*/
- +/* ToDo: fixme */
- +#define TRAMPOLINE_SIZE 0x0C
- +
- +/*
- +Alignment required for trampolines, in bits.
- +
- +If you don't define this macro, the value of BIGGEST_ALIGNMENT
- +is used for aligning trampolines.
- +*/
- +#define TRAMPOLINE_ALIGNMENT 16
- +
- +/*
- +A C statement to initialize the variable parts of a trampoline.
- +ADDR is an RTX for the address of the trampoline; FNADDR is
- +an RTX for the address of the nested function; STATIC_CHAIN is an
- +RTX for the static chain value that should be passed to the function
- +when it is called.
- +*/
- +#define INITIALIZE_TRAMPOLINE(ADDR, FNADDR, STATIC_CHAIN) \
- + avr32_initialize_trampoline(ADDR, FNADDR, STATIC_CHAIN)
- +
- +
- +/******************************************************************************
- + * Implicit Calls to Library Routines
- + *****************************************************************************/
- +
- +/* Tail calling. */
- +
- +/* A C expression that evaluates to true if it is ok to perform a sibling
- + call to DECL. */
- +#define FUNCTION_OK_FOR_SIBCALL(DECL) 0
- +
- +#define OVERRIDE_OPTIONS avr32_override_options ()
- +
- +#define OPTIMIZATION_OPTIONS(LEVEL, SIZE) avr32_optimization_options (LEVEL, SIZE)
- +
- +/******************************************************************************
- + * Addressing Modes
- + *****************************************************************************/
- +
- +/*
- +A C expression that is nonzero if the machine supports pre-increment,
- +pre-decrement, post-increment, or post-decrement addressing respectively.
- +*/
- +/*
- + AVR32 supports Rp++ and --Rp
- +*/
- +#define HAVE_PRE_INCREMENT 0
- +#define HAVE_PRE_DECREMENT 1
- +#define HAVE_POST_INCREMENT 1
- +#define HAVE_POST_DECREMENT 0
- +
- +/*
- +A C expression that is nonzero if the machine supports pre- or
- +post-address side-effect generation involving constants other than
- +the size of the memory operand.
- +*/
- +#define HAVE_PRE_MODIFY_DISP 0
- +#define HAVE_POST_MODIFY_DISP 0
- +
- +/*
- +A C expression that is nonzero if the machine supports pre- or
- +post-address side-effect generation involving a register displacement.
- +*/
- +#define HAVE_PRE_MODIFY_REG 0
- +#define HAVE_POST_MODIFY_REG 0
- +
- +/*
- +A C expression that is 1 if the RTX X is a constant which
- +is a valid address. On most machines, this can be defined as
- +CONSTANT_P (X), but a few machines are more restrictive
- +in which constant addresses are supported.
- +
- +CONSTANT_P accepts integer-values expressions whose values are
- +not explicitly known, such as symbol_ref, label_ref, and
- +high expressions and const arithmetic expressions, in
- +addition to const_int and const_double expressions.
- +*/
- +#define CONSTANT_ADDRESS_P(X) CONSTANT_P(X)
- +
- +/*
- +A number, the maximum number of registers that can appear in a valid
- +memory address. Note that it is up to you to specify a value equal to
- +the maximum number that GO_IF_LEGITIMATE_ADDRESS would ever
- +accept.
- +*/
- +#define MAX_REGS_PER_ADDRESS 2
- +
- +/*
- +A C compound statement with a conditional goto LABEL;
- +executed if X (an RTX) is a legitimate memory address on the
- +target machine for a memory operand of mode MODE.
- +
- +It usually pays to define several simpler macros to serve as
- +subroutines for this one. Otherwise it may be too complicated to
- +understand.
- +
- +This macro must exist in two variants: a strict variant and a
- +non-strict one. The strict variant is used in the reload pass. It
- +must be defined so that any pseudo-register that has not been
- +allocated a hard register is considered a memory reference. In
- +contexts where some kind of register is required, a pseudo-register
- +with no hard register must be rejected.
- +
- +The non-strict variant is used in other passes. It must be defined to
- +accept all pseudo-registers in every context where some kind of
- +register is required.
- +
- +Compiler source files that want to use the strict variant of this
- +macro define the macro REG_OK_STRICT. You should use an
- +#ifdef REG_OK_STRICT conditional to define the strict variant
- +in that case and the non-strict variant otherwise.
- +
- +Subroutines to check for acceptable registers for various purposes (one
- +for base registers, one for index registers, and so on) are typically
- +among the subroutines used to define GO_IF_LEGITIMATE_ADDRESS.
- +Then only these subroutine macros need have two variants; the higher
- +levels of macros may be the same whether strict or not.
- +
- +Normally, constant addresses which are the sum of a symbol_ref
- +and an integer are stored inside a const RTX to mark them as
- +constant. Therefore, there is no need to recognize such sums
- +specifically as legitimate addresses. Normally you would simply
- +recognize any const as legitimate.
- +
- +Usually PRINT_OPERAND_ADDRESS is not prepared to handle constant
- +sums that are not marked with const. It assumes that a naked
- +plus indicates indexing. If so, then you must reject such
- +naked constant sums as illegitimate addresses, so that none of them will
- +be given to PRINT_OPERAND_ADDRESS.
- +
- +On some machines, whether a symbolic address is legitimate depends on
- +the section that the address refers to. On these machines, define the
- +macro ENCODE_SECTION_INFO to store the information into the
- +symbol_ref, and then check for it here. When you see a
- +const, you will have to look inside it to find the
- +symbol_ref in order to determine the section.
- +
- +The best way to modify the name string is by adding text to the
- +beginning, with suitable punctuation to prevent any ambiguity. Allocate
- +the new name in saveable_obstack. You will have to modify
- +ASM_OUTPUT_LABELREF to remove and decode the added text and
- +output the name accordingly, and define STRIP_NAME_ENCODING to
- +access the original name string.
- +
- +You can check the information stored here into the symbol_ref in
- +the definitions of the macros GO_IF_LEGITIMATE_ADDRESS and
- +PRINT_OPERAND_ADDRESS.
- +*/
- +#ifdef REG_OK_STRICT
- +# define GO_IF_LEGITIMATE_ADDRESS(MODE, X, LABEL) \
- + do \
- + { \
- + if (avr32_legitimate_address(MODE, X, 1)) \
- + goto LABEL; \
- + } \
- + while (0)
- +#else
- +# define GO_IF_LEGITIMATE_ADDRESS(MODE, X, LABEL) \
- + do \
- + { \
- + if (avr32_legitimate_address(MODE, X, 0)) \
- + goto LABEL; \
- + } \
- + while (0)
- +#endif
- +
- +
- +
- +/*
- +A C compound statement that attempts to replace X with a valid
- +memory address for an operand of mode MODE. win will be a
- +C statement label elsewhere in the code; the macro definition may use
- +
- + GO_IF_LEGITIMATE_ADDRESS (MODE, X, WIN);
- +
- +to avoid further processing if the address has become legitimate.
- +
- +X will always be the result of a call to break_out_memory_refs,
- +and OLDX will be the operand that was given to that function to produce
- +X.
- +
- +The code generated by this macro should not alter the substructure of
- +X. If it transforms X into a more legitimate form, it
- +should assign X (which will always be a C variable) a new value.
- +
- +It is not necessary for this macro to come up with a legitimate
- +address. The compiler has standard ways of doing so in all cases. In
- +fact, it is safe for this macro to do nothing. But often a
- +machine-dependent strategy can generate better code.
- +*/
- +#define LEGITIMIZE_ADDRESS(X, OLDX, MODE, WIN) \
- + do \
- + { \
- + if (GET_CODE(X) == PLUS \
- + && GET_CODE(XEXP(X, 0)) == REG \
- + && GET_CODE(XEXP(X, 1)) == CONST_INT \
- + && !CONST_OK_FOR_CONSTRAINT_P(INTVAL(XEXP(X, 1)), \
- + 'K', "Ks16")) \
- + { \
- + rtx index = force_reg(SImode, XEXP(X, 1)); \
- + X = gen_rtx_PLUS( SImode, XEXP(X, 0), index); \
- + } \
- + GO_IF_LEGITIMATE_ADDRESS(MODE, X, WIN); \
- + } \
- + while(0)
- +
- +
- +/*
- +A C statement or compound statement with a conditional
- +goto LABEL; executed if memory address X (an RTX) can have
- +different meanings depending on the machine mode of the memory
- +reference it is used for or if the address is valid for some modes
- +but not others.
- +
- +Autoincrement and autodecrement addresses typically have mode-dependent
- +effects because the amount of the increment or decrement is the size
- +of the operand being addressed. Some machines have other mode-dependent
- +addresses. Many RISC machines have no mode-dependent addresses.
- +
- +You may assume that ADDR is a valid address for the machine.
- +*/
- +#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR, LABEL) \
- + do \
- + { \
- + if (GET_CODE (ADDR) == POST_INC \
- + || GET_CODE (ADDR) == PRE_DEC) \
- + goto LABEL; \
- + } \
- + while (0)
- +
- +/*
- +A C expression that is nonzero if X is a legitimate constant for
- +an immediate operand on the target machine. You can assume that
- +X satisfies CONSTANT_P, so you need not check this. In fact,
- +'1' is a suitable definition for this macro on machines where
- +anything CONSTANT_P is valid.
- +*/
- +#define LEGITIMATE_CONSTANT_P(X) avr32_legitimate_constant_p(X)
- +
- +
- +/******************************************************************************
- + * Condition Code Status
- + *****************************************************************************/
- +
- +/*
- +C code for a data type which is used for declaring the mdep
- +component of cc_status. It defaults to int.
- +
- +This macro is not used on machines that do not use cc0.
- +*/
- +
- +typedef struct
- +{
- + int flags;
- + rtx value;
- + int cond_exec_cmp_clobbered;
- +} avr32_status_reg;
- +
- +
- +#define CC_STATUS_MDEP avr32_status_reg
- +
- +/*
- +A C expression to initialize the mdep field to "empty".
- +The default definition does nothing, since most machines don't use
- +the field anyway. If you want to use the field, you should probably
- +define this macro to initialize it.
- +
- +This macro is not used on machines that do not use cc0.
- +*/
- +
- +#define CC_STATUS_MDEP_INIT \
- + (cc_status.mdep.flags = CC_NONE , cc_status.mdep.cond_exec_cmp_clobbered = 0, cc_status.mdep.value = 0)
- +
- +/*
- +A C compound statement to set the components of cc_status
- +appropriately for an insn INSN whose body is EXP. It is
- +this macro's responsibility to recognize insns that set the condition
- +code as a byproduct of other activity as well as those that explicitly
- +set (cc0).
- +
- +This macro is not used on machines that do not use cc0.
- +
- +If there are insns that do not set the condition code but do alter
- +other machine registers, this macro must check to see whether they
- +invalidate the expressions that the condition code is recorded as
- +reflecting. For example, on the 68000, insns that store in address
- +registers do not set the condition code, which means that usually
- +NOTICE_UPDATE_CC can leave cc_status unaltered for such
- +insns. But suppose that the previous insn set the condition code
- +based on location 'a4@@(102)' and the current insn stores a new
- +value in 'a4'. Although the condition code is not changed by
- +this, it will no longer be true that it reflects the contents of
- +'a4@@(102)'. Therefore, NOTICE_UPDATE_CC must alter
- +cc_status in this case to say that nothing is known about the
- +condition code value.
- +
- +The definition of NOTICE_UPDATE_CC must be prepared to deal
- +with the results of peephole optimization: insns whose patterns are
- +parallel RTXs containing various reg, mem or
- +constants which are just the operands. The RTL structure of these
- +insns is not sufficient to indicate what the insns actually do. What
- +NOTICE_UPDATE_CC should do when it sees one is just to run
- +CC_STATUS_INIT.
- +
- +A possible definition of NOTICE_UPDATE_CC is to call a function
- +that looks at an attribute (see Insn Attributes) named, for example,
- +'cc'. This avoids having detailed information about patterns in
- +two places, the 'md' file and in NOTICE_UPDATE_CC.
- +*/
- +
- +#define NOTICE_UPDATE_CC(EXP, INSN) avr32_notice_update_cc(EXP, INSN)
- +
- +
- +
- +
- +/******************************************************************************
- + * Describing Relative Costs of Operations
- + *****************************************************************************/
- +
- +
- +
- +/*
- +A C expression for the cost of moving data of mode MODE from a
- +register in class FROM to one in class TO. The classes are
- +expressed using the enumeration values such as GENERAL_REGS. A
- +value of 2 is the default; other values are interpreted relative to
- +that.
- +
- +It is not required that the cost always equal 2 when FROM is the
- +same as TO; on some machines it is expensive to move between
- +registers if they are not general registers.
- +
- +If reload sees an insn consisting of a single set between two
- +hard registers, and if REGISTER_MOVE_COST applied to their
- +classes returns a value of 2, reload does not check to ensure that the
- +constraints of the insn are met. Setting a cost of other than 2 will
- +allow reload to verify that the constraints are met. You should do this
- +if the movm pattern's constraints do not allow such copying.
- +*/
- +#define REGISTER_MOVE_COST(MODE, FROM, TO) \
- + ((GET_MODE_SIZE(MODE) <= 4) ? 2: \
- + (GET_MODE_SIZE(MODE) <= 8) ? 3: \
- + 4)
- +
- +/*
- +A C expression for the cost of moving data of mode MODE between a
- +register of class CLASS and memory; IN is zero if the value
- +is to be written to memory, nonzero if it is to be read in. This cost
- +is relative to those in REGISTER_MOVE_COST. If moving between
- +registers and memory is more expensive than between two registers, you
- +should define this macro to express the relative cost.
- +
- +If you do not define this macro, GCC uses a default cost of 4 plus
- +the cost of copying via a secondary reload register, if one is
- +needed. If your machine requires a secondary reload register to copy
- +between memory and a register of CLASS but the reload mechanism is
- +more complex than copying via an intermediate, define this macro to
- +reflect the actual cost of the move.
- +
- +GCC defines the function memory_move_secondary_cost if
- +secondary reloads are needed. It computes the costs due to copying via
- +a secondary register. If your machine copies from memory using a
- +secondary register in the conventional way but the default base value of
- +4 is not correct for your machine, define this macro to add some other
- +value to the result of that function. The arguments to that function
- +are the same as to this macro.
- +*/
- +/*
- + Memory moves are costly
- +*/
- +#define MEMORY_MOVE_COST(MODE, CLASS, IN) \
- + (((IN) ? ((GET_MODE_SIZE(MODE) < 4) ? 4 : \
- + (GET_MODE_SIZE(MODE) > 8) ? 6 : \
- + 3) \
- + : ((GET_MODE_SIZE(MODE) > 8) ? 6 : 3)))
- +
- +/*
- +A C expression for the cost of a branch instruction. A value of 1 is
- +the default; other values are interpreted relative to that.
- +*/
- + /* Try to use conditionals as much as possible */
- +#define BRANCH_COST(speed_p, predictable_p) (TARGET_BRANCH_PRED ? 3 : 4)
- +
- +/*A C expression for the maximum number of instructions to execute via conditional
- + execution instructions instead of a branch. A value of BRANCH_COST+1 is the default
- + if the machine does not use cc0, and 1 if it does use cc0.*/
- +#define MAX_CONDITIONAL_EXECUTE 4
- +
- +/*
- +Define this macro as a C expression which is nonzero if accessing less
- +than a word of memory (i.e.: a char or a short) is no
- +faster than accessing a word of memory, i.e., if such access
- +require more than one instruction or if there is no difference in cost
- +between byte and (aligned) word loads.
- +
- +When this macro is not defined, the compiler will access a field by
- +finding the smallest containing object; when it is defined, a fullword
- +load will be used if alignment permits. Unless bytes accesses are
- +faster than word accesses, using word accesses is preferable since it
- +may eliminate subsequent memory access if subsequent accesses occur to
- +other fields in the same word of the structure, but to different bytes.
- +*/
- +#define SLOW_BYTE_ACCESS 1
- +
- +
- +/*
- +Define this macro if it is as good or better to call a constant
- +function address than to call an address kept in a register.
- +*/
- +#define NO_FUNCTION_CSE
- +
- +
- +/******************************************************************************
- + * Adjusting the Instruction Scheduler
- + *****************************************************************************/
- +
- +/*****************************************************************************
- + * Dividing the Output into Sections (Texts, Data, ...) *
- + *****************************************************************************/
- +
- +/*
- +A C expression whose value is a string, including spacing, containing the
- +assembler operation that should precede instructions and read-only data.
- +Normally "\t.text" is right.
- +*/
- +#define TEXT_SECTION_ASM_OP "\t.text"
- +/*
- +A C statement that switches to the default section containing instructions.
- +Normally this is not needed, as simply defining TEXT_SECTION_ASM_OP
- +is enough. The MIPS port uses this to sort all functions after all data
- +declarations.
- +*/
- +/* #define TEXT_SECTION */
- +
- +/*
- +A C expression whose value is a string, including spacing, containing the
- +assembler operation to identify the following data as writable initialized
- +data. Normally "\t.data" is right.
- +*/
- +#define DATA_SECTION_ASM_OP "\t.data"
- +
- +/*
- +If defined, a C expression whose value is a string, including spacing,
- +containing the assembler operation to identify the following data as
- +shared data. If not defined, DATA_SECTION_ASM_OP will be used.
- +*/
- +
- +/*
- +A C expression whose value is a string, including spacing, containing
- +the assembler operation to identify the following data as read-only
- +initialized data.
- +*/
- +#undef READONLY_DATA_SECTION_ASM_OP
- +#define READONLY_DATA_SECTION_ASM_OP \
- + ((TARGET_USE_RODATA_SECTION) ? \
- + "\t.section\t.rodata" : \
- + TEXT_SECTION_ASM_OP )
- +
- +
- +/*
- +If defined, a C expression whose value is a string, including spacing,
- +containing the assembler operation to identify the following data as
- +uninitialized global data. If not defined, and neither
- +ASM_OUTPUT_BSS nor ASM_OUTPUT_ALIGNED_BSS are defined,
- +uninitialized global data will be output in the data section if
- +-fno-common is passed, otherwise ASM_OUTPUT_COMMON will be
- +used.
- +*/
- +#define BSS_SECTION_ASM_OP "\t.section\t.bss"
- +
- +/*
- +If defined, a C expression whose value is a string, including spacing,
- +containing the assembler operation to identify the following data as
- +uninitialized global shared data. If not defined, and
- +BSS_SECTION_ASM_OP is, the latter will be used.
- +*/
- +/*#define SHARED_BSS_SECTION_ASM_OP "\trseg\tshared_bbs_section:data:noroot(0)\n"*/
- +/*
- +If defined, a C expression whose value is a string, including spacing,
- +containing the assembler operation to identify the following data as
- +initialization code. If not defined, GCC will assume such a section does
- +not exist.
- +*/
- +#undef INIT_SECTION_ASM_OP
- +#define INIT_SECTION_ASM_OP "\t.section\t.init"
- +
- +/*
- +If defined, a C expression whose value is a string, including spacing,
- +containing the assembler operation to identify the following data as
- +finalization code. If not defined, GCC will assume such a section does
- +not exist.
- +*/
- +#undef FINI_SECTION_ASM_OP
- +#define FINI_SECTION_ASM_OP "\t.section\t.fini"
- +
- +/*
- +If defined, an ASM statement that switches to a different section
- +via SECTION_OP, calls FUNCTION, and switches back to
- +the text section. This is used in crtstuff.c if
- +INIT_SECTION_ASM_OP or FINI_SECTION_ASM_OP to calls
- +to initialization and finalization functions from the init and fini
- +sections. By default, this macro uses a simple function call. Some
- +ports need hand-crafted assembly code to avoid dependencies on
- +registers initialized in the function prologue or to ensure that
- +constant pools don't end up too far way in the text section.
- +*/
- +#define CRT_CALL_STATIC_FUNCTION(SECTION_OP, FUNC) \
- + asm ( SECTION_OP "\n" \
- + "mcall r6[" USER_LABEL_PREFIX #FUNC "@got]\n" \
- + TEXT_SECTION_ASM_OP);
- +
- +
- +/*
- +Define this macro to be an expression with a nonzero value if jump
- +tables (for tablejump insns) should be output in the text
- +section, along with the assembler instructions. Otherwise, the
- +readonly data section is used.
- +
- +This macro is irrelevant if there is no separate readonly data section.
- +*/
- +/* Put jump tables in text section if we have caches. Otherwise assume that
- + loading data from code memory is slow. */
- +#define JUMP_TABLES_IN_TEXT_SECTION \
- + (TARGET_CACHES ? 1 : 0)
- +
- +
- +/******************************************************************************
- + * Position Independent Code (PIC)
- + *****************************************************************************/
- +
- +#ifndef AVR32_ALWAYS_PIC
- +#define AVR32_ALWAYS_PIC 0
- +#endif
- +
- +/* GOT is set to r6 */
- +#define PIC_OFFSET_TABLE_REGNUM INTERNAL_REGNUM(6)
- +
- +/*
- +A C expression that is nonzero if X is a legitimate immediate
- +operand on the target machine when generating position independent code.
- +You can assume that X satisfies CONSTANT_P, so you need not
- +check this. You can also assume flag_pic is true, so you need not
- +check it either. You need not define this macro if all constants
- +(including SYMBOL_REF) can be immediate operands when generating
- +position independent code.
- +*/
- +/* We can't directly access anything that contains a symbol,
- + nor can we indirect via the constant pool. */
- +#define LEGITIMATE_PIC_OPERAND_P(X) avr32_legitimate_pic_operand_p(X)
- +
- +
- +/* We need to know when we are making a constant pool; this determines
- + whether data needs to be in the GOT or can be referenced via a GOT
- + offset. */
- +extern int making_const_table;
- +
- +/******************************************************************************
- + * Defining the Output Assembler Language
- + *****************************************************************************/
- +
- +
- +/*
- +A C string constant describing how to begin a comment in the target
- +assembler language. The compiler assumes that the comment will end at
- +the end of the line.
- +*/
- +#define ASM_COMMENT_START "# "
- +
- +/*
- +A C string constant for text to be output before each asm
- +statement or group of consecutive ones. Normally this is
- +"#APP", which is a comment that has no effect on most
- +assemblers but tells the GNU assembler that it must check the lines
- +that follow for all valid assembler constructs.
- +*/
- +#undef ASM_APP_ON
- +#define ASM_APP_ON "#APP\n"
- +
- +/*
- +A C string constant for text to be output after each asm
- +statement or group of consecutive ones. Normally this is
- +"#NO_APP", which tells the GNU assembler to resume making the
- +time-saving assumptions that are valid for ordinary compiler output.
- +*/
- +#undef ASM_APP_OFF
- +#define ASM_APP_OFF "#NO_APP\n"
- +
- +
- +
- +#define FILE_ASM_OP "\t.file\n"
- +#define IDENT_ASM_OP "\t.ident\t"
- +#define SET_ASM_OP "\t.set\t"
- +
- +
- +/*
- + * Output assembly directives to switch to section name. The section
- + * should have attributes as specified by flags, which is a bit mask
- + * of the SECTION_* flags defined in 'output.h'. If align is nonzero,
- + * it contains an alignment in bytes to be used for the section,
- + * otherwise some target default should be used. Only targets that
- + * must specify an alignment within the section directive need pay
- + * attention to align -- we will still use ASM_OUTPUT_ALIGN.
- + *
- + * NOTE: This one must not be moved to avr32.c
- + */
- +#undef TARGET_ASM_NAMED_SECTION
- +#define TARGET_ASM_NAMED_SECTION default_elf_asm_named_section
- +
- +
- +/*
- +You may define this macro as a C expression. You should define the
- +expression to have a nonzero value if GCC should output the constant
- +pool for a function before the code for the function, or a zero value if
- +GCC should output the constant pool after the function. If you do
- +not define this macro, the usual case, GCC will output the constant
- +pool before the function.
- +*/
- +#define CONSTANT_POOL_BEFORE_FUNCTION 0
- +
- +
- +/*
- +Define this macro as a C expression which is nonzero if the constant
- +EXP, of type tree, should be output after the code for a
- +function. The compiler will normally output all constants before the
- +function; you need not define this macro if this is OK.
- +*/
- +#define CONSTANT_AFTER_FUNCTION_P(EXP) 1
- +
- +
- +/*
- +Define this macro as a C expression which is nonzero if C is
- +as a logical line separator by the assembler. STR points to the
- +position in the string where C was found; this can be used if a
- +line separator uses multiple characters.
- +
- +If you do not define this macro, the default is that only
- +the character ';' is treated as a logical line separator.
- +*/
- +#define IS_ASM_LOGICAL_LINE_SEPARATOR(C,STR) (((C) == '\n') || ((C) == ';'))
- +
- +
- +/** Output of Uninitialized Variables **/
- +
- +/*
- +A C statement (sans semicolon) to output to the stdio stream
- +STREAM the assembler definition of a common-label named
- +NAME whose size is SIZE bytes. The variable ROUNDED
- +is the size rounded up to whatever alignment the caller wants.
- +
- +Use the expression assemble_name(STREAM, NAME) to
- +output the name itself; before and after that, output the additional
- +assembler syntax for defining the name, and a newline.
- +
- +This macro controls how the assembler definitions of uninitialized
- +common global variables are output.
- +*/
- +/*
- +#define ASM_OUTPUT_COMMON(STREAM, NAME, SIZE, ROUNDED) \
- + avr32_asm_output_common(STREAM, NAME, SIZE, ROUNDED)
- +*/
- +
- +#define ASM_OUTPUT_COMMON(FILE, NAME, SIZE, ROUNDED) \
- + do \
- + { \
- + fputs ("\t.comm ", (FILE)); \
- + assemble_name ((FILE), (NAME)); \
- + fprintf ((FILE), ",%d\n", (SIZE)); \
- + } \
- + while (0)
- +
- +/*
- + * Like ASM_OUTPUT_BSS except takes the required alignment as a
- + * separate, explicit argument. If you define this macro, it is used
- + * in place of ASM_OUTPUT_BSS, and gives you more flexibility in
- + * handling the required alignment of the variable. The alignment is
- + * specified as the number of bits.
- + *
- + * Try to use function asm_output_aligned_bss defined in file varasm.c
- + * when defining this macro.
- + */
- +#define ASM_OUTPUT_ALIGNED_BSS(STREAM, DECL, NAME, SIZE, ALIGNMENT) \
- + asm_output_aligned_bss (STREAM, DECL, NAME, SIZE, ALIGNMENT)
- +
- +/*
- +A C statement (sans semicolon) to output to the stdio stream
- +STREAM the assembler definition of a local-common-label named
- +NAME whose size is SIZE bytes. The variable ROUNDED
- +is the size rounded up to whatever alignment the caller wants.
- +
- +Use the expression assemble_name(STREAM, NAME) to
- +output the name itself; before and after that, output the additional
- +assembler syntax for defining the name, and a newline.
- +
- +This macro controls how the assembler definitions of uninitialized
- +static variables are output.
- +*/
- +#define ASM_OUTPUT_LOCAL(FILE, NAME, SIZE, ROUNDED) \
- + do \
- + { \
- + fputs ("\t.lcomm ", (FILE)); \
- + assemble_name ((FILE), (NAME)); \
- + fprintf ((FILE), ",%d, %d\n", (SIZE), 2); \
- + } \
- + while (0)
- +
- +
- +/*
- +A C statement (sans semicolon) to output to the stdio stream
- +STREAM the assembler definition of a label named NAME.
- +Use the expression assemble_name(STREAM, NAME) to
- +output the name itself; before and after that, output the additional
- +assembler syntax for defining the name, and a newline.
- +*/
- +#define ASM_OUTPUT_LABEL(STREAM, NAME) avr32_asm_output_label(STREAM, NAME)
- +
- +/* A C string containing the appropriate assembler directive to
- + * specify the size of a symbol, without any arguments. On systems
- + * that use ELF, the default (in 'config/elfos.h') is '"\t.size\t"';
- + * on other systems, the default is not to define this macro.
- + *
- + * Define this macro only if it is correct to use the default
- + * definitions of ASM_ OUTPUT_SIZE_DIRECTIVE and
- + * ASM_OUTPUT_MEASURED_SIZE for your system. If you need your own
- + * custom definitions of those macros, or if you do not need explicit
- + * symbol sizes at all, do not define this macro.
- + */
- +#define SIZE_ASM_OP "\t.size\t"
- +
- +
- +/*
- +A C statement (sans semicolon) to output to the stdio stream
- +STREAM some commands that will make the label NAME global;
- +that is, available for reference from other files. Use the expression
- +assemble_name(STREAM, NAME) to output the name
- +itself; before and after that, output the additional assembler syntax
- +for making that name global, and a newline.
- +*/
- +#define GLOBAL_ASM_OP "\t.global\t"
- +
- +
- +
- +/*
- +A C expression which evaluates to true if the target supports weak symbols.
- +
- +If you don't define this macro, defaults.h provides a default
- +definition. If either ASM_WEAKEN_LABEL or ASM_WEAKEN_DECL
- +is defined, the default definition is '1'; otherwise, it is
- +'0'. Define this macro if you want to control weak symbol support
- +with a compiler flag such as -melf.
- +*/
- +#define SUPPORTS_WEAK 1
- +
- +/*
- +A C statement (sans semicolon) to output to the stdio stream
- +STREAM a reference in assembler syntax to a label named
- +NAME. This should add '_' to the front of the name, if that
- +is customary on your operating system, as it is in most Berkeley Unix
- +systems. This macro is used in assemble_name.
- +*/
- +#define ASM_OUTPUT_LABELREF(STREAM, NAME) \
- + avr32_asm_output_labelref(STREAM, NAME)
- +
- +
- +
- +/*
- +A C expression to assign to OUTVAR (which is a variable of type
- +char *) a newly allocated string made from the string
- +NAME and the number NUMBER, with some suitable punctuation
- +added. Use alloca to get space for the string.
- +
- +The string will be used as an argument to ASM_OUTPUT_LABELREF to
- +produce an assembler label for an internal static variable whose name is
- +NAME. Therefore, the string must be such as to result in valid
- +assembler code. The argument NUMBER is different each time this
- +macro is executed; it prevents conflicts between similarly-named
- +internal static variables in different scopes.
- +
- +Ideally this string should not be a valid C identifier, to prevent any
- +conflict with the user's own symbols. Most assemblers allow periods
- +or percent signs in assembler symbols; putting at least one of these
- +between the name and the number will suffice.
- +*/
- +#define ASM_FORMAT_PRIVATE_NAME(OUTVAR, NAME, NUMBER) \
- + do \
- + { \
- + (OUTVAR) = (char *) alloca (strlen ((NAME)) + 10); \
- + sprintf ((OUTVAR), "%s.%d", (NAME), (NUMBER)); \
- + } \
- + while (0)
- +
- +
- +/** Macros Controlling Initialization Routines **/
- +
- +
- +/*
- +If defined, main will not call __main as described above.
- +This macro should be defined for systems that control start-up code
- +on a symbol-by-symbol basis, such as OSF/1, and should not
- +be defined explicitly for systems that support INIT_SECTION_ASM_OP.
- +*/
- +/*
- + __main is not defined when debugging.
- +*/
- +#define HAS_INIT_SECTION
- +
- +
- +/** Output of Assembler Instructions **/
- +
- +/*
- +A C initializer containing the assembler's names for the machine
- +registers, each one as a C string constant. This is what translates
- +register numbers in the compiler into assembler language.
- +*/
- +
- +#define REGISTER_NAMES \
- +{ \
- + "pc", "lr", \
- + "sp", "r12", \
- + "r11", "r10", \
- + "r9", "r8", \
- + "r7", "r6", \
- + "r5", "r4", \
- + "r3", "r2", \
- + "r1", "r0", \
- +}
- +
- +/*
- +A C compound statement to output to stdio stream STREAM the
- +assembler syntax for an instruction operand X. X is an
- +RTL expression.
- +
- +CODE is a value that can be used to specify one of several ways
- +of printing the operand. It is used when identical operands must be
- +printed differently depending on the context. CODE comes from
- +the '%' specification that was used to request printing of the
- +operand. If the specification was just '%digit' then
- +CODE is 0; if the specification was '%ltr digit'
- +then CODE is the ASCII code for ltr.
- +
- +If X is a register, this macro should print the register's name.
- +The names can be found in an array reg_names whose type is
- +char *[]. reg_names is initialized from REGISTER_NAMES.
- +
- +When the machine description has a specification '%punct'
- +(a '%' followed by a punctuation character), this macro is called
- +with a null pointer for X and the punctuation character for
- +CODE.
- +*/
- +#define PRINT_OPERAND(STREAM, X, CODE) avr32_print_operand(STREAM, X, CODE)
- +
- +/* A C statement to be executed just prior to the output of
- + assembler code for INSN, to modify the extracted operands so
- + they will be output differently.
- +
- + Here the argument OPVEC is the vector containing the operands
- + extracted from INSN, and NOPERANDS is the number of elements of
- + the vector which contain meaningful data for this insn.
- + The contents of this vector are what will be used to convert the insn
- + template into assembler code, so you can change the assembler output
- + by changing the contents of the vector. */
- +#define FINAL_PRESCAN_INSN(INSN, OPVEC, NOPERANDS) \
- + avr32_final_prescan_insn ((INSN), (OPVEC), (NOPERANDS))
- +
- +/*
- +A C expression which evaluates to true if CODE is a valid
- +punctuation character for use in the PRINT_OPERAND macro. If
- +PRINT_OPERAND_PUNCT_VALID_P is not defined, it means that no
- +punctuation characters (except for the standard one, '%') are used
- +in this way.
- +*/
- +#define PRINT_OPERAND_PUNCT_VALID_P(CODE) \
- + (((CODE) == '?') \
- + || ((CODE) == '!'))
- +
- +/*
- +A C compound statement to output to stdio stream STREAM the
- +assembler syntax for an instruction operand that is a memory reference
- +whose address is X. X is an RTL expression.
- +
- +On some machines, the syntax for a symbolic address depends on the
- +section that the address refers to. On these machines, define the macro
- +ENCODE_SECTION_INFO to store the information into the
- +symbol_ref, and then check for it here. (see Assembler Format.)
- +*/
- +#define PRINT_OPERAND_ADDRESS(STREAM, X) avr32_print_operand_address(STREAM, X)
- +
- +
- +/** Output of Dispatch Tables **/
- +
- +/*
- + * A C statement to output to the stdio stream stream an assembler
- + * pseudo-instruction to generate a difference between two
- + * labels. value and rel are the numbers of two internal labels. The
- + * definitions of these labels are output using
- + * (*targetm.asm_out.internal_label), and they must be printed in the
- + * same way here. For example,
- + *
- + * fprintf (stream, "\t.word L%d-L%d\n",
- + * value, rel)
- + *
- + * You must provide this macro on machines where the addresses in a
- + * dispatch table are relative to the table's own address. If defined,
- + * GCC will also use this macro on all machines when producing
- + * PIC. body is the body of the ADDR_DIFF_VEC; it is provided so that
- + * the mode and flags can be read.
- + */
- +#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL) \
- + fprintf(STREAM, "\tbral\t%sL%d\n", LOCAL_LABEL_PREFIX, VALUE)
- +
- +/*
- +This macro should be provided on machines where the addresses
- +in a dispatch table are absolute.
- +
- +The definition should be a C statement to output to the stdio stream
- +STREAM an assembler pseudo-instruction to generate a reference to
- +a label. VALUE is the number of an internal label whose
- +definition is output using ASM_OUTPUT_INTERNAL_LABEL.
- +For example,
- +
- +fprintf(STREAM, "\t.word L%d\n", VALUE)
- +*/
- +
- +#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM, VALUE) \
- + fprintf(STREAM, "\t.long %sL%d\n", LOCAL_LABEL_PREFIX, VALUE)
- +
- +/** Assembler Commands for Exception Regions */
- +
- +/* ToDo: All of this subsection */
- +
- +/** Assembler Commands for Alignment */
- +
- +
- +/*
- +A C statement to output to the stdio stream STREAM an assembler
- +command to advance the location counter to a multiple of 2 to the
- +POWER bytes. POWER will be a C expression of type int.
- +*/
- +#define ASM_OUTPUT_ALIGN(STREAM, POWER) \
- + do \
- + { \
- + if ((POWER) != 0) \
- + fprintf(STREAM, "\t.align\t%d\n", POWER); \
- + } \
- + while (0)
- +
- +/*
- +Like ASM_OUTPUT_ALIGN, except that the \nop" instruction is used for padding, if
- +necessary.
- +*/
- +#define ASM_OUTPUT_ALIGN_WITH_NOP(STREAM, POWER) \
- + fprintf(STREAM, "\t.balignw\t%d, 0xd703\n", (1 << POWER))
- +
- +
- +
- +/******************************************************************************
- + * Controlling Debugging Information Format
- + *****************************************************************************/
- +
- +/* How to renumber registers for dbx and gdb. */
- +#define DBX_REGISTER_NUMBER(REGNO) ASM_REGNUM (REGNO)
- +
- +/* The DWARF 2 CFA column which tracks the return address. */
- +#define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM(LR_REGNUM)
- +
- +/*
- +Define this macro if GCC should produce dwarf version 2 format
- +debugging output in response to the -g option.
- +
- +To support optional call frame debugging information, you must also
- +define INCOMING_RETURN_ADDR_RTX and either set
- +RTX_FRAME_RELATED_P on the prologue insns if you use RTL for the
- +prologue, or call dwarf2out_def_cfa and dwarf2out_reg_save
- +as appropriate from TARGET_ASM_FUNCTION_PROLOGUE if you don't.
- +*/
- +#define DWARF2_DEBUGGING_INFO 1
- +
- +
- +#define DWARF2_ASM_LINE_DEBUG_INFO 1
- +#define DWARF2_FRAME_INFO 1
- +
- +
- +/******************************************************************************
- + * Miscellaneous Parameters
- + *****************************************************************************/
- +
- +/* ToDo: a lot */
- +
- +/*
- +An alias for a machine mode name. This is the machine mode that
- +elements of a jump-table should have.
- +*/
- +#define CASE_VECTOR_MODE SImode
- +
- +/*
- +Define this macro to be a C expression to indicate when jump-tables
- +should contain relative addresses. If jump-tables never contain
- +relative addresses, then you need not define this macro.
- +*/
- +#define CASE_VECTOR_PC_RELATIVE 0
- +
- +/* Increase the threshold for using table jumps on the UC arch. */
- +#define CASE_VALUES_THRESHOLD (TARGET_BRANCH_PRED ? 4 : 7)
- +
- +/*
- +The maximum number of bytes that a single instruction can move quickly
- +between memory and registers or between two memory locations.
- +*/
- +#define MOVE_MAX (2*UNITS_PER_WORD)
- +
- +
- +/* A C expression that is nonzero if on this machine the number of bits actually used
- + for the count of a shift operation is equal to the number of bits needed to represent
- + the size of the object being shifted. When this macro is nonzero, the compiler will
- + assume that it is safe to omit a sign-extend, zero-extend, and certain bitwise 'and'
- + instructions that truncates the count of a shift operation. On machines that have
- + instructions that act on bit-fields at variable positions, which may include 'bit test'
- + 378 GNU Compiler Collection (GCC) Internals
- + instructions, a nonzero SHIFT_COUNT_TRUNCATED also enables deletion of truncations
- + of the values that serve as arguments to bit-field instructions.
- + If both types of instructions truncate the count (for shifts) and position (for bit-field
- + operations), or if no variable-position bit-field instructions exist, you should define
- + this macro.
- + However, on some machines, such as the 80386 and the 680x0, truncation only applies
- + to shift operations and not the (real or pretended) bit-field operations. Define SHIFT_
- + COUNT_TRUNCATED to be zero on such machines. Instead, add patterns to the 'md' file
- + that include the implied truncation of the shift instructions.
- + You need not dene this macro if it would always have the value of zero. */
- +#define SHIFT_COUNT_TRUNCATED 1
- +
- +/*
- +A C expression which is nonzero if on this machine it is safe to
- +convert an integer of INPREC bits to one of OUTPREC
- +bits (where OUTPREC is smaller than INPREC) by merely
- +operating on it as if it had only OUTPREC bits.
- +
- +On many machines, this expression can be 1.
- +
- +When TRULY_NOOP_TRUNCATION returns 1 for a pair of sizes for
- +modes for which MODES_TIEABLE_P is 0, suboptimal code can result.
- +If this is the case, making TRULY_NOOP_TRUNCATION return 0 in
- +such cases may improve things.
- +*/
- +#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
- +
- +/*
- +An alias for the machine mode for pointers. On most machines, define
- +this to be the integer mode corresponding to the width of a hardware
- +pointer; SImode on 32-bit machine or DImode on 64-bit machines.
- +On some machines you must define this to be one of the partial integer
- +modes, such as PSImode.
- +
- +The width of Pmode must be at least as large as the value of
- +POINTER_SIZE. If it is not equal, you must define the macro
- +POINTERS_EXTEND_UNSIGNED to specify how pointers are extended
- +to Pmode.
- +*/
- +#define Pmode SImode
- +
- +/*
- +An alias for the machine mode used for memory references to functions
- +being called, in call RTL expressions. On most machines this
- +should be QImode.
- +*/
- +#define FUNCTION_MODE SImode
- +
- +
- +#define REG_S_P(x) \
- + (REG_P (x) || (GET_CODE (x) == SUBREG && REG_P (XEXP (x, 0))))
- +
- +
- +/* If defined, modifies the length assigned to instruction INSN as a
- + function of the context in which it is used. LENGTH is an lvalue
- + that contains the initially computed length of the insn and should
- + be updated with the correct length of the insn. */
- +#define ADJUST_INSN_LENGTH(INSN, LENGTH) \
- + ((LENGTH) = avr32_adjust_insn_length ((INSN), (LENGTH)))
- +
- +
- +#define CLZ_DEFINED_VALUE_AT_ZERO(mode, value) \
- + (value = 32, (mode == SImode))
- +
- +#define CTZ_DEFINED_VALUE_AT_ZERO(mode, value) \
- + (value = 32, (mode == SImode))
- +
- +#define UNITS_PER_SIMD_WORD(mode) UNITS_PER_WORD
- +
- +#define STORE_FLAG_VALUE 1
- +
- +
- +/* IF-conversion macros. */
- +#define IFCVT_MODIFY_INSN( CE_INFO, PATTERN, INSN ) \
- + { \
- + (PATTERN) = avr32_ifcvt_modify_insn (CE_INFO, PATTERN, INSN, &num_true_changes); \
- + }
- +
- +#define IFCVT_EXTRA_FIELDS \
- + int num_cond_clobber_insns; \
- + int num_extra_move_insns; \
- + rtx extra_move_insns[MAX_CONDITIONAL_EXECUTE]; \
- + rtx moved_insns[MAX_CONDITIONAL_EXECUTE];
- +
- +#define IFCVT_INIT_EXTRA_FIELDS( CE_INFO ) \
- + { \
- + (CE_INFO)->num_cond_clobber_insns = 0; \
- + (CE_INFO)->num_extra_move_insns = 0; \
- + }
- +
- +
- +#define IFCVT_MODIFY_CANCEL( CE_INFO ) avr32_ifcvt_modify_cancel (CE_INFO, &num_true_changes)
- +
- +#define IFCVT_ALLOW_MODIFY_TEST_IN_INSN 1
- +#define IFCVT_COND_EXEC_BEFORE_RELOAD (TARGET_COND_EXEC_BEFORE_RELOAD)
- +
- +enum avr32_builtins
- +{
- + AVR32_BUILTIN_MTSR,
- + AVR32_BUILTIN_MFSR,
- + AVR32_BUILTIN_MTDR,
- + AVR32_BUILTIN_MFDR,
- + AVR32_BUILTIN_CACHE,
- + AVR32_BUILTIN_SYNC,
- + AVR32_BUILTIN_SSRF,
- + AVR32_BUILTIN_CSRF,
- + AVR32_BUILTIN_TLBR,
- + AVR32_BUILTIN_TLBS,
- + AVR32_BUILTIN_TLBW,
- + AVR32_BUILTIN_BREAKPOINT,
- + AVR32_BUILTIN_XCHG,
- + AVR32_BUILTIN_LDXI,
- + AVR32_BUILTIN_BSWAP16,
- + AVR32_BUILTIN_BSWAP32,
- + AVR32_BUILTIN_COP,
- + AVR32_BUILTIN_MVCR_W,
- + AVR32_BUILTIN_MVRC_W,
- + AVR32_BUILTIN_MVCR_D,
- + AVR32_BUILTIN_MVRC_D,
- + AVR32_BUILTIN_MULSATHH_H,
- + AVR32_BUILTIN_MULSATHH_W,
- + AVR32_BUILTIN_MULSATRNDHH_H,
- + AVR32_BUILTIN_MULSATRNDWH_W,
- + AVR32_BUILTIN_MULSATWH_W,
- + AVR32_BUILTIN_MACSATHH_W,
- + AVR32_BUILTIN_SATADD_H,
- + AVR32_BUILTIN_SATSUB_H,
- + AVR32_BUILTIN_SATADD_W,
- + AVR32_BUILTIN_SATSUB_W,
- + AVR32_BUILTIN_MULWH_D,
- + AVR32_BUILTIN_MULNWH_D,
- + AVR32_BUILTIN_MACWH_D,
- + AVR32_BUILTIN_MACHH_D,
- + AVR32_BUILTIN_MUSFR,
- + AVR32_BUILTIN_MUSTR,
- + AVR32_BUILTIN_SATS,
- + AVR32_BUILTIN_SATU,
- + AVR32_BUILTIN_SATRNDS,
- + AVR32_BUILTIN_SATRNDU,
- + AVR32_BUILTIN_MEMS,
- + AVR32_BUILTIN_MEMC,
- + AVR32_BUILTIN_MEMT,
- + AVR32_BUILTIN_SLEEP,
- + AVR32_BUILTIN_DELAY_CYCLES
- +};
- +
- +
- +#define FLOAT_LIB_COMPARE_RETURNS_BOOL(MODE, COMPARISON) \
- + ((MODE == SFmode) || (MODE == DFmode))
- +
- +#define RENAME_LIBRARY_SET ".set"
- +
- +/* Make ABI_NAME an alias for __GCC_NAME. */
- +#define RENAME_LIBRARY(GCC_NAME, ABI_NAME) \
- + __asm__ (".globl\t__avr32_" #ABI_NAME "\n" \
- + ".set\t__avr32_" #ABI_NAME \
- + ", __" #GCC_NAME "\n");
- +
- +/* Give libgcc functions avr32 ABI name. */
- +#ifdef L_muldi3
- +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (muldi3, mul64)
- +#endif
- +#ifdef L_divdi3
- +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (divdi3, sdiv64)
- +#endif
- +#ifdef L_udivdi3
- +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (udivdi3, udiv64)
- +#endif
- +#ifdef L_moddi3
- +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (moddi3, smod64)
- +#endif
- +#ifdef L_umoddi3
- +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (umoddi3, umod64)
- +#endif
- +#ifdef L_ashldi3
- +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (ashldi3, lsl64)
- +#endif
- +#ifdef L_lshrdi3
- +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (lshrdi3, lsr64)
- +#endif
- +#ifdef L_ashrdi3
- +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (ashrdi3, asr64)
- +#endif
- +
- +#ifdef L_fixsfdi
- +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixsfdi, f32_to_s64)
- +#endif
- +#ifdef L_fixunssfdi
- +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixunssfdi, f32_to_u64)
- +#endif
- +#ifdef L_floatdidf
- +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (floatdidf, s64_to_f64)
- +#endif
- +#ifdef L_floatdisf
- +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (floatdisf, s64_to_f32)
- +#endif
- +
- +#endif
- diff -Nur gcc-4.4.6.orig/gcc/config/avr32/avr32.md gcc-4.4.6/gcc/config/avr32/avr32.md
- --- gcc-4.4.6.orig/gcc/config/avr32/avr32.md 1970-01-01 01:00:00.000000000 +0100
- +++ gcc-4.4.6/gcc/config/avr32/avr32.md 2011-10-22 19:23:08.524581303 +0200
- @@ -0,0 +1,5198 @@
- +;; AVR32 machine description file.
- +;; Copyright 2003,2004,2005,2006,2007,2008,2009 Atmel Corporation.
- +;;
- +;; This file is part of GCC.
- +;;
- +;; This program is free software; you can redistribute it and/or modify
- +;; it under the terms of the GNU General Public License as published by
- +;; the Free Software Foundation; either version 2 of the License, or
- +;; (at your option) any later version.
- +;;
- +;; This program is distributed in the hope that it will be useful,
- +;; but WITHOUT ANY WARRANTY; without even the implied warranty of
- +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- +;; GNU General Public License for more details.
- +;;
- +;; You should have received a copy of the GNU General Public License
- +;; along with this program; if not, write to the Free Software
- +;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- +
- +;; -*- Mode: Scheme -*-
- +
- +(define_attr "type" "alu,alu2,alu_sat,mulhh,mulwh,mulww_w,mulww_d,div,machh_w,macww_w,macww_d,branch,call,load,load_rm,store,load2,load4,store2,store4,fmul,fcmps,fcmpd,fcast,fmv,fmvcpu,fldd,fstd,flds,fsts,fstm"
- + (const_string "alu"))
- +
- +
- +(define_attr "cc" "none,set_vncz,set_ncz,set_cz,set_z,set_z_if_not_v2,bld,compare,cmp_cond_insn,clobber,call_set,fpcompare,from_fpcc"
- + (const_string "none"))
- +
- +
- +; NB! Keep this in sync with enum architecture_type in avr32.h
- +(define_attr "pipeline" "ap,ucr1,ucr2,ucr2nomul,ucr3,ucr3fp"
- + (const (symbol_ref "avr32_arch->arch_type")))
- +
- +; Insn length in bytes
- +(define_attr "length" ""
- + (const_int 4))
- +
- +; Signal if an insn is predicable and hence can be conditionally executed.
- +(define_attr "predicable" "no,yes" (const_string "no"))
- +
- +;; Uses of UNSPEC in this file:
- +(define_constants
- + [(UNSPEC_PUSHM 0)
- + (UNSPEC_POPM 1)
- + (UNSPEC_UDIVMODSI4_INTERNAL 2)
- + (UNSPEC_DIVMODSI4_INTERNAL 3)
- + (UNSPEC_STM 4)
- + (UNSPEC_LDM 5)
- + (UNSPEC_MOVSICC 6)
- + (UNSPEC_ADDSICC 7)
- + (UNSPEC_COND_MI 8)
- + (UNSPEC_COND_PL 9)
- + (UNSPEC_PIC_SYM 10)
- + (UNSPEC_PIC_BASE 11)
- + (UNSPEC_STORE_MULTIPLE 12)
- + (UNSPEC_STMFP 13)
- + (UNSPEC_FRCPA 14)
- + (UNSPEC_REG_TO_CC 15)
- + (UNSPEC_FORCE_MINIPOOL 16)
- + (UNSPEC_SATS 17)
- + (UNSPEC_SATU 18)
- + (UNSPEC_SATRNDS 19)
- + (UNSPEC_SATRNDU 20)
- + ])
- +
- +(define_constants
- + [(VUNSPEC_EPILOGUE 0)
- + (VUNSPEC_CACHE 1)
- + (VUNSPEC_MTSR 2)
- + (VUNSPEC_MFSR 3)
- + (VUNSPEC_BLOCKAGE 4)
- + (VUNSPEC_SYNC 5)
- + (VUNSPEC_TLBR 6)
- + (VUNSPEC_TLBW 7)
- + (VUNSPEC_TLBS 8)
- + (VUNSPEC_BREAKPOINT 9)
- + (VUNSPEC_MTDR 10)
- + (VUNSPEC_MFDR 11)
- + (VUNSPEC_MVCR 12)
- + (VUNSPEC_MVRC 13)
- + (VUNSPEC_COP 14)
- + (VUNSPEC_ALIGN 15)
- + (VUNSPEC_POOL_START 16)
- + (VUNSPEC_POOL_END 17)
- + (VUNSPEC_POOL_4 18)
- + (VUNSPEC_POOL_8 19)
- + (VUNSPEC_POOL_16 20)
- + (VUNSPEC_MUSFR 21)
- + (VUNSPEC_MUSTR 22)
- + (VUNSPEC_SYNC_CMPXCHG 23)
- + (VUNSPEC_SYNC_SET_LOCK_AND_LOAD 24)
- + (VUNSPEC_SYNC_STORE_IF_LOCK 25)
- + (VUNSPEC_EH_RETURN 26)
- + (VUNSPEC_FRS 27)
- + (VUNSPEC_CSRF 28)
- + (VUNSPEC_SSRF 29)
- + (VUNSPEC_SLEEP 30)
- + (VUNSPEC_DELAY_CYCLES 31)
- + (VUNSPEC_DELAY_CYCLES_1 32)
- + (VUNSPEC_DELAY_CYCLES_2 33)
- + (VUNSPEC_NOP 34)
- + (VUNSPEC_NOP3 35)
- + ])
- +
- +(define_constants
- + [
- + ;; R7 = 15-7 = 8
- + (FP_REGNUM 8)
- + ;; Return Register = R12 = 15 - 12 = 3
- + (RETVAL_REGNUM 3)
- + ;; SP = R13 = 15 - 13 = 2
- + (SP_REGNUM 2)
- + ;; LR = R14 = 15 - 14 = 1
- + (LR_REGNUM 1)
- + ;; PC = R15 = 15 - 15 = 0
- + (PC_REGNUM 0)
- + ;; FPSR = GENERAL_REGS + 1 = 17
- + (FPCC_REGNUM 17)
- + ])
- +
- +
- +
- +
- +;;******************************************************************************
- +;; Macros
- +;;******************************************************************************
- +
- +;; Integer Modes for basic alu insns
- +(define_mode_iterator INTM [SI HI QI])
- +(define_mode_attr alu_cc_attr [(SI "set_vncz") (HI "clobber") (QI "clobber")])
- +
- +;; Move word modes
- +(define_mode_iterator MOVM [SI V2HI V4QI])
- +
- +;; For mov/addcc insns
- +(define_mode_iterator ADDCC [SI HI QI])
- +(define_mode_iterator MOVCC [SF SI HI QI])
- +(define_mode_iterator CMP [DI SI HI QI])
- +(define_mode_attr store_postfix [(SF ".w") (SI ".w") (HI ".h") (QI ".b")])
- +(define_mode_attr load_postfix [(SF ".w") (SI ".w") (HI ".sh") (QI ".ub")])
- +(define_mode_attr load_postfix_s [(SI ".w") (HI ".sh") (QI ".sb")])
- +(define_mode_attr load_postfix_u [(SI ".w") (HI ".uh") (QI ".ub")])
- +(define_mode_attr pred_mem_constraint [(SF "RKu11") (SI "RKu11") (HI "RKu10") (QI "RKu09")])
- +(define_mode_attr cmp_constraint [(DI "rKu20") (SI "rKs21") (HI "r") (QI "r")])
- +(define_mode_attr cmp_predicate [(DI "register_immediate_operand")
- + (SI "register_const_int_operand")
- + (HI "register_operand")
- + (QI "register_operand")])
- +(define_mode_attr cmp_length [(DI "6")
- + (SI "4")
- + (HI "4")
- + (QI "4")])
- +
- +;; For all conditional insns
- +(define_code_iterator any_cond_b [ge lt geu ltu])
- +(define_code_iterator any_cond [gt ge lt le gtu geu ltu leu])
- +(define_code_iterator any_cond4 [gt le gtu leu])
- +(define_code_attr cond [(eq "eq") (ne "ne") (gt "gt") (ge "ge") (lt "lt") (le "le")
- + (gtu "hi") (geu "hs") (ltu "lo") (leu "ls")])
- +(define_code_attr invcond [(eq "ne") (ne "eq") (gt "le") (ge "lt") (lt "ge") (le "gt")
- + (gtu "ls") (geu "lo") (ltu "hs") (leu "hi")])
- +
- +;; For logical operations
- +(define_code_iterator logical [and ior xor])
- +(define_code_attr logical_insn [(and "and") (ior "or") (xor "eor")])
- +
- +;; Predicable operations with three register operands
- +(define_code_iterator predicable_op3 [and ior xor plus minus])
- +(define_code_attr predicable_insn3 [(and "and") (ior "or") (xor "eor") (plus "add") (minus "sub")])
- +(define_code_attr predicable_commutative3 [(and "%") (ior "%") (xor "%") (plus "%") (minus "")])
- +
- +;; Load the predicates
- +(include "predicates.md")
- +
- +
- +;;******************************************************************************
- +;; Automaton pipeline description for avr32
- +;;******************************************************************************
- +
- +(define_automaton "avr32_ap")
- +
- +
- +(define_cpu_unit "is" "avr32_ap")
- +(define_cpu_unit "a1,m1,da" "avr32_ap")
- +(define_cpu_unit "a2,m2,d" "avr32_ap")
- +
- +;;Alu instructions
- +(define_insn_reservation "alu_op" 1
- + (and (eq_attr "pipeline" "ap")
- + (eq_attr "type" "alu"))
- + "is,a1,a2")
- +
- +(define_insn_reservation "alu2_op" 2
- + (and (eq_attr "pipeline" "ap")
- + (eq_attr "type" "alu2"))
- + "is,is+a1,a1+a2,a2")
- +
- +(define_insn_reservation "alu_sat_op" 2
- + (and (eq_attr "pipeline" "ap")
- + (eq_attr "type" "alu_sat"))
- + "is,a1,a2")
- +
- +
- +;;Mul instructions
- +(define_insn_reservation "mulhh_op" 2
- + (and (eq_attr "pipeline" "ap")
- + (eq_attr "type" "mulhh,mulwh"))
- + "is,m1,m2")
- +
- +(define_insn_reservation "mulww_w_op" 3
- + (and (eq_attr "pipeline" "ap")
- + (eq_attr "type" "mulww_w"))
- + "is,m1,m1+m2,m2")
- +
- +(define_insn_reservation "mulww_d_op" 5
- + (and (eq_attr "pipeline" "ap")
- + (eq_attr "type" "mulww_d"))
- + "is,m1,m1+m2,m1+m2,m2,m2")
- +
- +(define_insn_reservation "div_op" 33
- + (and (eq_attr "pipeline" "ap")
- + (eq_attr "type" "div"))
- + "is,m1,m1*31 + m2*31,m2")
- +
- +(define_insn_reservation "machh_w_op" 3
- + (and (eq_attr "pipeline" "ap")
- + (eq_attr "type" "machh_w"))
- + "is*2,m1,m2")
- +
- +
- +(define_insn_reservation "macww_w_op" 4
- + (and (eq_attr "pipeline" "ap")
- + (eq_attr "type" "macww_w"))
- + "is*2,m1,m1,m2")
- +
- +
- +(define_insn_reservation "macww_d_op" 6
- + (and (eq_attr "pipeline" "ap")
- + (eq_attr "type" "macww_d"))
- + "is*2,m1,m1+m2,m1+m2,m2")
- +
- +;;Bypasses for Mac instructions, because of accumulator cache.
- +;;Set latency as low as possible in order to let the compiler let
- +;;mul -> mac and mac -> mac combinations which use the same
- +;;accumulator cache be placed close together to avoid any
- +;;instructions which can ruin the accumulator cache come inbetween.
- +(define_bypass 4 "machh_w_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass")
- +(define_bypass 5 "macww_w_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass")
- +(define_bypass 7 "macww_d_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass")
- +
- +(define_bypass 3 "mulhh_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass")
- +(define_bypass 4 "mulww_w_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass")
- +(define_bypass 6 "mulww_d_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass")
- +
- +
- +;;Bypasses for all mul/mac instructions followed by an instruction
- +;;which reads the output AND writes the result to the same register.
- +;;This will generate an Write After Write hazard which gives an
- +;;extra cycle before the result is ready.
- +(define_bypass 0 "machh_w_op" "machh_w_op" "avr32_valid_macmac_bypass")
- +(define_bypass 0 "macww_w_op" "macww_w_op" "avr32_valid_macmac_bypass")
- +(define_bypass 0 "macww_d_op" "macww_d_op" "avr32_valid_macmac_bypass")
- +
- +(define_bypass 0 "mulhh_op" "machh_w_op" "avr32_valid_mulmac_bypass")
- +(define_bypass 0 "mulww_w_op" "macww_w_op" "avr32_valid_mulmac_bypass")
- +(define_bypass 0 "mulww_d_op" "macww_d_op" "avr32_valid_mulmac_bypass")
- +
- +;;Branch and call instructions
- +;;We assume that all branches and rcalls are predicted correctly :-)
- +;;while calls use a lot of cycles.
- +(define_insn_reservation "branch_op" 0
- + (and (eq_attr "pipeline" "ap")
- + (eq_attr "type" "branch"))
- + "nothing")
- +
- +(define_insn_reservation "call_op" 10
- + (and (eq_attr "pipeline" "ap")
- + (eq_attr "type" "call"))
- + "nothing")
- +
- +
- +;;Load store instructions
- +(define_insn_reservation "load_op" 2
- + (and (eq_attr "pipeline" "ap")
- + (eq_attr "type" "load"))
- + "is,da,d")
- +
- +(define_insn_reservation "load_rm_op" 3
- + (and (eq_attr "pipeline" "ap")
- + (eq_attr "type" "load_rm"))
- + "is,da,d")
- +
- +
- +(define_insn_reservation "store_op" 0
- + (and (eq_attr "pipeline" "ap")
- + (eq_attr "type" "store"))
- + "is,da,d")
- +
- +
- +(define_insn_reservation "load_double_op" 3
- + (and (eq_attr "pipeline" "ap")
- + (eq_attr "type" "load2"))
- + "is,da,da+d,d")
- +
- +(define_insn_reservation "load_quad_op" 4
- + (and (eq_attr "pipeline" "ap")
- + (eq_attr "type" "load4"))
- + "is,da,da+d,da+d,d")
- +
- +(define_insn_reservation "store_double_op" 0
- + (and (eq_attr "pipeline" "ap")
- + (eq_attr "type" "store2"))
- + "is,da,da+d,d")
- +
- +
- +(define_insn_reservation "store_quad_op" 0
- + (and (eq_attr "pipeline" "ap")
- + (eq_attr "type" "store4"))
- + "is,da,da+d,da+d,d")
- +
- +;;For store the operand to write to memory is read in d and
- +;;the real latency between any instruction and a store is therefore
- +;;one less than for the instructions which reads the operands in the first
- +;;excecution stage
- +(define_bypass 2 "load_double_op" "store_double_op" "avr32_store_bypass")
- +(define_bypass 3 "load_quad_op" "store_quad_op" "avr32_store_bypass")
- +(define_bypass 1 "load_op" "store_op" "avr32_store_bypass")
- +(define_bypass 2 "load_rm_op" "store_op" "avr32_store_bypass")
- +(define_bypass 1 "alu_sat_op" "store_op" "avr32_store_bypass")
- +(define_bypass 1 "alu2_op" "store_op" "avr32_store_bypass")
- +(define_bypass 1 "mulhh_op" "store_op" "avr32_store_bypass")
- +(define_bypass 2 "mulww_w_op" "store_op" "avr32_store_bypass")
- +(define_bypass 4 "mulww_d_op" "store_op" "avr32_store_bypass" )
- +(define_bypass 2 "machh_w_op" "store_op" "avr32_store_bypass")
- +(define_bypass 3 "macww_w_op" "store_op" "avr32_store_bypass")
- +(define_bypass 5 "macww_d_op" "store_op" "avr32_store_bypass")
- +
- +
- +; Bypass for load double operation. If only the first loaded word is needed
- +; then the latency is 2
- +(define_bypass 2 "load_double_op"
- + "load_op,load_rm_op,alu_sat_op, alu2_op, alu_op, mulhh_op, mulww_w_op,
- + mulww_d_op, machh_w_op, macww_w_op, macww_d_op"
- + "avr32_valid_load_double_bypass")
- +
- +; Bypass for load quad operation. If only the first or second loaded word is needed
- +; we set the latency to 2
- +(define_bypass 2 "load_quad_op"
- + "load_op,load_rm_op,alu_sat_op, alu2_op, alu_op, mulhh_op, mulww_w_op,
- + mulww_d_op, machh_w_op, macww_w_op, macww_d_op"
- + "avr32_valid_load_quad_bypass")
- +
- +
- +;;******************************************************************************
- +;; End of Automaton pipeline description for avr32
- +;;******************************************************************************
- +
- +(define_cond_exec
- + [(match_operator 0 "avr32_comparison_operator"
- + [(match_operand:CMP 1 "register_operand" "r")
- + (match_operand:CMP 2 "<CMP:cmp_predicate>" "<CMP:cmp_constraint>")])]
- + "TARGET_V2_INSNS"
- + "%!"
- +)
- +
- +(define_cond_exec
- + [(match_operator 0 "avr32_comparison_operator"
- + [(and:SI (match_operand:SI 1 "register_operand" "r")
- + (match_operand:SI 2 "one_bit_set_operand" "i"))
- + (const_int 0)])]
- + "TARGET_V2_INSNS"
- + "%!"
- + )
- +
- +;;=============================================================================
- +;; move
- +;;-----------------------------------------------------------------------------
- +
- +
- +;;== char - 8 bits ============================================================
- +(define_expand "movqi"
- + [(set (match_operand:QI 0 "nonimmediate_operand" "")
- + (match_operand:QI 1 "general_operand" ""))]
- + ""
- + {
- + if ( can_create_pseudo_p () ){
- + if (GET_CODE (operands[1]) == MEM && optimize){
- + rtx reg = gen_reg_rtx (SImode);
- +
- + emit_insn (gen_zero_extendqisi2 (reg, operands[1]));
- + operands[1] = gen_lowpart (QImode, reg);
- + }
- +
- + /* One of the ops has to be in a register. */
- + if (GET_CODE (operands[0]) == MEM)
- + operands[1] = force_reg (QImode, operands[1]);
- + }
- +
- + })
- +
- +(define_insn "*movqi_internal"
- + [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,m,r")
- + (match_operand:QI 1 "general_operand" "rKs08,m,r,i"))]
- + "register_operand (operands[0], QImode)
- + || register_operand (operands[1], QImode)"
- + "@
- + mov\t%0, %1
- + ld.ub\t%0, %1
- + st.b\t%0, %1
- + mov\t%0, %1"
- + [(set_attr "length" "2,4,4,4")
- + (set_attr "type" "alu,load_rm,store,alu")])
- +
- +
- +
- +;;== short - 16 bits ==========================================================
- +(define_expand "movhi"
- + [(set (match_operand:HI 0 "nonimmediate_operand" "")
- + (match_operand:HI 1 "general_operand" ""))]
- + ""
- + {
- + if ( can_create_pseudo_p () ){
- + if (GET_CODE (operands[1]) == MEM && optimize){
- + rtx reg = gen_reg_rtx (SImode);
- +
- + emit_insn (gen_extendhisi2 (reg, operands[1]));
- + operands[1] = gen_lowpart (HImode, reg);
- + }
- +
- + /* One of the ops has to be in a register. */
- + if (GET_CODE (operands[0]) == MEM)
- + operands[1] = force_reg (HImode, operands[1]);
- + }
- +
- + })
- +
- +
- +(define_insn "*movhi_internal"
- + [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,m,r")
- + (match_operand:HI 1 "general_operand" "rKs08,m,r,i"))]
- + "register_operand (operands[0], HImode)
- + || register_operand (operands[1], HImode)"
- + "@
- + mov\t%0, %1
- + ld.sh\t%0, %1
- + st.h\t%0, %1
- + mov\t%0, %1"
- + [(set_attr "length" "2,4,4,4")
- + (set_attr "type" "alu,load_rm,store,alu")])
- +
- +
- +;;== int - 32 bits ============================================================
- +
- +(define_expand "movmisalignsi"
- + [(set (match_operand:SI 0 "nonimmediate_operand" "")
- + (match_operand:SI 1 "nonimmediate_operand" ""))]
- + "TARGET_UNALIGNED_WORD"
- + {
- + }
- +)
- +
- +(define_expand "mov<mode>"
- + [(set (match_operand:MOVM 0 "avr32_non_rmw_nonimmediate_operand" "")
- + (match_operand:MOVM 1 "avr32_non_rmw_general_operand" ""))]
- + ""
- + {
- +
- + /* One of the ops has to be in a register. */
- + if (GET_CODE (operands[0]) == MEM)
- + operands[1] = force_reg (<MODE>mode, operands[1]);
- +
- + /* Check for out of range immediate constants as these may
- + occur during reloading, since it seems like reload does
- + not check if the immediate is legitimate. Don't know if
- + this is a bug? */
- + if ( reload_in_progress
- + && avr32_imm_in_const_pool
- + && GET_CODE(operands[1]) == CONST_INT
- + && !avr32_const_ok_for_constraint_p(INTVAL(operands[1]), 'K', "Ks21") ){
- + operands[1] = force_const_mem(SImode, operands[1]);
- + }
- + /* Check for RMW memory operands. They are not allowed for mov operations
- + only the atomic memc/s/t operations */
- + if ( !reload_in_progress
- + && avr32_rmw_memory_operand (operands[0], <MODE>mode) ){
- + operands[0] = copy_rtx (operands[0]);
- + XEXP(operands[0], 0) = force_reg (<MODE>mode, XEXP(operands[0], 0));
- + }
- +
- + if ( !reload_in_progress
- + && avr32_rmw_memory_operand (operands[1], <MODE>mode) ){
- + operands[1] = copy_rtx (operands[1]);
- + XEXP(operands[1], 0) = force_reg (<MODE>mode, XEXP(operands[1], 0));
- + }
- + if ( (flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS)
- + && !avr32_legitimate_pic_operand_p(operands[1]) )
- + operands[1] = legitimize_pic_address (operands[1], <MODE>mode,
- + (can_create_pseudo_p () ? 0: operands[0]));
- + else if ( flag_pic && avr32_address_operand(operands[1], GET_MODE(operands[1])) )
- + /* If we have an address operand then this function uses the pic register. */
- + crtl->uses_pic_offset_table = 1;
- + })
- +
- +
- +(define_insn "mov<mode>_internal"
- + [(set (match_operand:MOVM 0 "avr32_non_rmw_nonimmediate_operand" "=r, r, r,r,r,Q,r")
- + (match_operand:MOVM 1 "avr32_non_rmw_general_operand" "rKs08,Ks21,J,n,Q,r,W"))]
- + "(register_operand (operands[0], <MODE>mode)
- + || register_operand (operands[1], <MODE>mode))
- + && !avr32_rmw_memory_operand (operands[0], <MODE>mode)
- + && !avr32_rmw_memory_operand (operands[1], <MODE>mode)"
- + {
- + switch (which_alternative) {
- + case 0:
- + case 1: return "mov\t%0, %1";
- + case 2:
- + if ( TARGET_V2_INSNS )
- + return "movh\t%0, hi(%1)";
- + /* Fallthrough */
- + case 3: return "mov\t%0, lo(%1)\;orh\t%0,hi(%1)";
- + case 4:
- + if ( (REG_P(XEXP(operands[1], 0))
- + && REGNO(XEXP(operands[1], 0)) == SP_REGNUM)
- + || (GET_CODE(XEXP(operands[1], 0)) == PLUS
- + && REGNO(XEXP(XEXP(operands[1], 0), 0)) == SP_REGNUM
- + && GET_CODE(XEXP(XEXP(operands[1], 0), 1)) == CONST_INT
- + && INTVAL(XEXP(XEXP(operands[1], 0), 1)) % 4 == 0
- + && INTVAL(XEXP(XEXP(operands[1], 0), 1)) <= 0x1FC) )
- + return "lddsp\t%0, %1";
- + else if ( avr32_const_pool_ref_operand(operands[1], GET_MODE(operands[1])) )
- + return "lddpc\t%0, %1";
- + else
- + return "ld.w\t%0, %1";
- + case 5:
- + if ( (REG_P(XEXP(operands[0], 0))
- + && REGNO(XEXP(operands[0], 0)) == SP_REGNUM)
- + || (GET_CODE(XEXP(operands[0], 0)) == PLUS
- + && REGNO(XEXP(XEXP(operands[0], 0), 0)) == SP_REGNUM
- + && GET_CODE(XEXP(XEXP(operands[0], 0), 1)) == CONST_INT
- + && INTVAL(XEXP(XEXP(operands[0], 0), 1)) % 4 == 0
- + && INTVAL(XEXP(XEXP(operands[0], 0), 1)) <= 0x1FC) )
- + return "stdsp\t%0, %1";
- + else
- + return "st.w\t%0, %1";
- + case 6:
- + if ( TARGET_HAS_ASM_ADDR_PSEUDOS )
- + return "lda.w\t%0, %1";
- + else
- + return "ld.w\t%0, r6[%1@got]";
- + default:
- + abort();
- + }
- + }
- +
- + [(set_attr "length" "2,4,4,8,4,4,8")
- + (set_attr "type" "alu,alu,alu,alu2,load,store,load")
- + (set_attr "cc" "none,none,set_z_if_not_v2,set_z,none,none,clobber")])
- +
- +
- +(define_expand "reload_out_rmw_memory_operand"
- + [(set (match_operand:SI 2 "register_operand" "=r")
- + (match_operand:SI 0 "address_operand" ""))
- + (set (mem:SI (match_dup 2))
- + (match_operand:SI 1 "register_operand" ""))]
- + ""
- + {
- + operands[0] = XEXP(operands[0], 0);
- + }
- +)
- +
- +(define_expand "reload_in_rmw_memory_operand"
- + [(set (match_operand:SI 2 "register_operand" "=r")
- + (match_operand:SI 1 "address_operand" ""))
- + (set (match_operand:SI 0 "register_operand" "")
- + (mem:SI (match_dup 2)))]
- + ""
- + {
- + operands[1] = XEXP(operands[1], 0);
- + }
- +)
- +
- +
- +;; These instructions are for loading constants which cannot be loaded
- +;; directly from the constant pool because the offset is too large
- +;; high and lo_sum are used even tough for our case it should be
- +;; low and high sum :-)
- +(define_insn "mov_symbol_lo"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (high:SI (match_operand:SI 1 "immediate_operand" "i" )))]
- + ""
- + "mov\t%0, lo(%1)"
- + [(set_attr "type" "alu")
- + (set_attr "length" "4")]
- +)
- +
- +(define_insn "add_symbol_hi"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (lo_sum:SI (match_dup 0)
- + (match_operand:SI 1 "immediate_operand" "i" )))]
- + ""
- + "orh\t%0, hi(%1)"
- + [(set_attr "type" "alu")
- + (set_attr "length" "4")]
- +)
- +
- +
- +
- +;; When generating pic, we need to load the symbol offset into a register.
- +;; So that the optimizer does not confuse this with a normal symbol load
- +;; we use an unspec. The offset will be loaded from a constant pool entry,
- +;; since that is the only type of relocation we can use.
- +(define_insn "pic_load_addr"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (unspec:SI [(match_operand:SI 1 "" "")] UNSPEC_PIC_SYM))]
- + "flag_pic && CONSTANT_POOL_ADDRESS_P(XEXP(operands[1], 0))"
- + "lddpc\t%0, %1"
- + [(set_attr "type" "load")
- + (set_attr "length" "4")]
- +)
- +
- +(define_insn "pic_compute_got_from_pc"
- + [(set (match_operand:SI 0 "register_operand" "+r")
- + (unspec:SI [(minus:SI (pc)
- + (match_dup 0))] UNSPEC_PIC_BASE))
- + (use (label_ref (match_operand 1 "" "")))]
- + "flag_pic"
- + {
- + (*targetm.asm_out.internal_label) (asm_out_file, "L",
- + CODE_LABEL_NUMBER (operands[1]));
- + return \"rsub\t%0, pc\";
- + }
- + [(set_attr "cc" "clobber")
- + (set_attr "length" "2")]
- +)
- +
- +;;== long long int - 64 bits ==================================================
- +
- +(define_expand "movdi"
- + [(set (match_operand:DI 0 "nonimmediate_operand" "")
- + (match_operand:DI 1 "general_operand" ""))]
- + ""
- + {
- +
- + /* One of the ops has to be in a register. */
- + if (GET_CODE (operands[0]) != REG)
- + operands[1] = force_reg (DImode, operands[1]);
- +
- + })
- +
- +
- +(define_insn_and_split "*movdi_internal"
- + [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r, r, r,r,r,m")
- + (match_operand:DI 1 "general_operand" "r, Ks08,Ks21,G,n,m,r"))]
- + "register_operand (operands[0], DImode)
- + || register_operand (operands[1], DImode)"
- + {
- + switch (which_alternative ){
- + case 0:
- + case 1:
- + case 2:
- + case 3:
- + case 4:
- + return "#";
- + case 5:
- + if ( avr32_const_pool_ref_operand(operands[1], GET_MODE(operands[1])))
- + return "ld.d\t%0, pc[%1 - .]";
- + else
- + return "ld.d\t%0, %1";
- + case 6:
- + return "st.d\t%0, %1";
- + default:
- + abort();
- + }
- + }
- +;; Lets split all reg->reg or imm->reg transfers into two SImode transfers
- + "reload_completed &&
- + (REG_P (operands[0]) &&
- + (REG_P (operands[1])
- + || GET_CODE (operands[1]) == CONST_INT
- + || GET_CODE (operands[1]) == CONST_DOUBLE))"
- + [(set (match_dup 0) (match_dup 1))
- + (set (match_dup 2) (match_dup 3))]
- + {
- + operands[2] = gen_highpart (SImode, operands[0]);
- + operands[0] = gen_lowpart (SImode, operands[0]);
- + if ( REG_P(operands[1]) ){
- + operands[3] = gen_highpart(SImode, operands[1]);
- + operands[1] = gen_lowpart(SImode, operands[1]);
- + } else if ( GET_CODE(operands[1]) == CONST_DOUBLE
- + || GET_CODE(operands[1]) == CONST_INT ){
- + rtx split_const[2];
- + avr32_split_const_expr (DImode, SImode, operands[1], split_const);
- + operands[3] = split_const[1];
- + operands[1] = split_const[0];
- + } else {
- + internal_error("Illegal operand[1] for movdi split!");
- + }
- + }
- +
- + [(set_attr "length" "*,*,*,*,*,4,4")
- + (set_attr "type" "*,*,*,*,*,load2,store2")
- + (set_attr "cc" "*,*,*,*,*,none,none")])
- +
- +
- +;;== 128 bits ==================================================
- +(define_expand "movti"
- + [(set (match_operand:TI 0 "nonimmediate_operand" "")
- + (match_operand:TI 1 "nonimmediate_operand" ""))]
- + "TARGET_ARCH_AP"
- + {
- +
- + /* One of the ops has to be in a register. */
- + if (GET_CODE (operands[0]) != REG)
- + operands[1] = force_reg (TImode, operands[1]);
- +
- + /* We must fix any pre_dec for loads and post_inc stores */
- + if ( GET_CODE (operands[0]) == MEM
- + && GET_CODE (XEXP(operands[0],0)) == POST_INC ){
- + emit_move_insn(gen_rtx_MEM(TImode, XEXP(XEXP(operands[0],0),0)), operands[1]);
- + emit_insn(gen_addsi3(XEXP(XEXP(operands[0],0),0), XEXP(XEXP(operands[0],0),0), GEN_INT(GET_MODE_SIZE(TImode))));
- + DONE;
- + }
- +
- + if ( GET_CODE (operands[1]) == MEM
- + && GET_CODE (XEXP(operands[1],0)) == PRE_DEC ){
- + emit_insn(gen_addsi3(XEXP(XEXP(operands[1],0),0), XEXP(XEXP(operands[1],0),0), GEN_INT(-GET_MODE_SIZE(TImode))));
- + emit_move_insn(operands[0], gen_rtx_MEM(TImode, XEXP(XEXP(operands[1],0),0)));
- + DONE;
- + }
- + })
- +
- +
- +(define_insn_and_split "*movti_internal"
- + [(set (match_operand:TI 0 "avr32_movti_dst_operand" "=r,&r, r, <RKu00,r,r")
- + (match_operand:TI 1 "avr32_movti_src_operand" " r,RKu00>,RKu00,r, n,T"))]
- + "(register_operand (operands[0], TImode)
- + || register_operand (operands[1], TImode))"
- + {
- + switch (which_alternative ){
- + case 0:
- + case 2:
- + case 4:
- + return "#";
- + case 1:
- + return "ldm\t%p1, %0";
- + case 3:
- + return "stm\t%p0, %1";
- + case 5:
- + return "ld.d\t%U0, pc[%1 - .]\;ld.d\t%B0, pc[%1 - . + 8]";
- + }
- + }
- +
- + "reload_completed &&
- + (REG_P (operands[0]) &&
- + (REG_P (operands[1])
- + /* If this is a load from the constant pool we split it into
- + two double loads. */
- + || (GET_CODE (operands[1]) == MEM
- + && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
- + && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
- + /* If this is a load where the pointer register is a part
- + of the register list, we must split it into two double
- + loads in order for it to be exception safe. */
- + || (GET_CODE (operands[1]) == MEM
- + && register_operand (XEXP (operands[1], 0), SImode)
- + && reg_overlap_mentioned_p (operands[0], XEXP (operands[1], 0)))
- + || GET_CODE (operands[1]) == CONST_INT
- + || GET_CODE (operands[1]) == CONST_DOUBLE))"
- + [(set (match_dup 0) (match_dup 1))
- + (set (match_dup 2) (match_dup 3))]
- + {
- + operands[2] = simplify_gen_subreg ( DImode, operands[0],
- + TImode, 0 );
- + operands[0] = simplify_gen_subreg ( DImode, operands[0],
- + TImode, 8 );
- + if ( REG_P(operands[1]) ){
- + operands[3] = simplify_gen_subreg ( DImode, operands[1],
- + TImode, 0 );
- + operands[1] = simplify_gen_subreg ( DImode, operands[1],
- + TImode, 8 );
- + } else if ( GET_CODE(operands[1]) == CONST_DOUBLE
- + || GET_CODE(operands[1]) == CONST_INT ){
- + rtx split_const[2];
- + avr32_split_const_expr (TImode, DImode, operands[1], split_const);
- + operands[3] = split_const[1];
- + operands[1] = split_const[0];
- + } else if (avr32_const_pool_ref_operand (operands[1], GET_MODE(operands[1]))){
- + rtx split_const[2];
- + rtx cop = avoid_constant_pool_reference (operands[1]);
- + if (operands[1] == cop)
- + cop = get_pool_constant (XEXP (operands[1], 0));
- + avr32_split_const_expr (TImode, DImode, cop, split_const);
- + operands[3] = force_const_mem (DImode, split_const[1]);
- + operands[1] = force_const_mem (DImode, split_const[0]);
- + } else {
- + rtx ptr_reg = XEXP (operands[1], 0);
- + operands[1] = gen_rtx_MEM (DImode,
- + gen_rtx_PLUS ( SImode,
- + ptr_reg,
- + GEN_INT (8) ));
- + operands[3] = gen_rtx_MEM (DImode,
- + ptr_reg);
- +
- + /* Check if the first load will clobber the pointer.
- + If so, we must switch the order of the operations. */
- + if ( reg_overlap_mentioned_p (operands[0], ptr_reg) )
- + {
- + /* We need to switch the order of the operations
- + so that the pointer register does not get clobbered
- + after the first double word load. */
- + rtx tmp;
- + tmp = operands[0];
- + operands[0] = operands[2];
- + operands[2] = tmp;
- + tmp = operands[1];
- + operands[1] = operands[3];
- + operands[3] = tmp;
- + }
- +
- +
- + }
- + }
- + [(set_attr "length" "*,*,4,4,*,8")
- + (set_attr "type" "*,*,load4,store4,*,load4")])
- +
- +
- +;;== float - 32 bits ==========================================================
- +(define_expand "movsf"
- + [(set (match_operand:SF 0 "nonimmediate_operand" "")
- + (match_operand:SF 1 "general_operand" ""))]
- + ""
- + {
- +
- +
- + /* One of the ops has to be in a register. */
- + if (GET_CODE (operands[0]) != REG)
- + operands[1] = force_reg (SFmode, operands[1]);
- +
- + })
- +
- +(define_insn "*movsf_internal"
- + [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,r,r,m")
- + (match_operand:SF 1 "general_operand" "r, G,F,m,r"))]
- + "(register_operand (operands[0], SFmode)
- + || register_operand (operands[1], SFmode))"
- + {
- + switch (which_alternative) {
- + case 0:
- + case 1: return "mov\t%0, %1";
- + case 2:
- + {
- + HOST_WIDE_INT target_float[2];
- + real_to_target (target_float, CONST_DOUBLE_REAL_VALUE (operands[1]), SFmode);
- + if ( TARGET_V2_INSNS
- + && avr32_hi16_immediate_operand (GEN_INT (target_float[0]), VOIDmode) )
- + return "movh\t%0, hi(%1)";
- + else
- + return "mov\t%0, lo(%1)\;orh\t%0, hi(%1)";
- + }
- + case 3:
- + if ( (REG_P(XEXP(operands[1], 0))
- + && REGNO(XEXP(operands[1], 0)) == SP_REGNUM)
- + || (GET_CODE(XEXP(operands[1], 0)) == PLUS
- + && REGNO(XEXP(XEXP(operands[1], 0), 0)) == SP_REGNUM
- + && GET_CODE(XEXP(XEXP(operands[1], 0), 1)) == CONST_INT
- + && INTVAL(XEXP(XEXP(operands[1], 0), 1)) % 4 == 0
- + && INTVAL(XEXP(XEXP(operands[1], 0), 1)) <= 0x1FC) )
- + return "lddsp\t%0, %1";
- + else if ( avr32_const_pool_ref_operand(operands[1], GET_MODE(operands[1])) )
- + return "lddpc\t%0, %1";
- + else
- + return "ld.w\t%0, %1";
- + case 4:
- + if ( (REG_P(XEXP(operands[0], 0))
- + && REGNO(XEXP(operands[0], 0)) == SP_REGNUM)
- + || (GET_CODE(XEXP(operands[0], 0)) == PLUS
- + && REGNO(XEXP(XEXP(operands[0], 0), 0)) == SP_REGNUM
- + && GET_CODE(XEXP(XEXP(operands[0], 0), 1)) == CONST_INT
- + && INTVAL(XEXP(XEXP(operands[0], 0), 1)) % 4 == 0
- + && INTVAL(XEXP(XEXP(operands[0], 0), 1)) <= 0x1FC) )
- + return "stdsp\t%0, %1";
- + else
- + return "st.w\t%0, %1";
- + default:
- + abort();
- + }
- + }
- +
- + [(set_attr "length" "2,4,8,4,4")
- + (set_attr "type" "alu,alu,alu2,load,store")
- + (set_attr "cc" "none,none,clobber,none,none")])
- +
- +
- +
- +;;== double - 64 bits =========================================================
- +(define_expand "movdf"
- + [(set (match_operand:DF 0 "nonimmediate_operand" "")
- + (match_operand:DF 1 "general_operand" ""))]
- + ""
- + {
- + /* One of the ops has to be in a register. */
- + if (GET_CODE (operands[0]) != REG){
- + operands[1] = force_reg (DFmode, operands[1]);
- + }
- + })
- +
- +
- +(define_insn_and_split "*movdf_internal"
- + [(set (match_operand:DF 0 "nonimmediate_operand" "=r,r,r,r,m")
- + (match_operand:DF 1 "general_operand" " r,G,F,m,r"))]
- + "(register_operand (operands[0], DFmode)
- + || register_operand (operands[1], DFmode))"
- + {
- + switch (which_alternative ){
- + case 0:
- + case 1:
- + case 2:
- + return "#";
- + case 3:
- + if ( avr32_const_pool_ref_operand(operands[1], GET_MODE(operands[1])))
- + return "ld.d\t%0, pc[%1 - .]";
- + else
- + return "ld.d\t%0, %1";
- + case 4:
- + return "st.d\t%0, %1";
- + default:
- + abort();
- + }
- + }
- + "reload_completed
- + && (REG_P (operands[0])
- + && (REG_P (operands[1])
- + || GET_CODE (operands[1]) == CONST_DOUBLE))"
- + [(set (match_dup 0) (match_dup 1))
- + (set (match_dup 2) (match_dup 3))]
- + "
- + {
- + operands[2] = gen_highpart (SImode, operands[0]);
- + operands[0] = gen_lowpart (SImode, operands[0]);
- + operands[3] = gen_highpart(SImode, operands[1]);
- + operands[1] = gen_lowpart(SImode, operands[1]);
- + }
- + "
- +
- + [(set_attr "length" "*,*,*,4,4")
- + (set_attr "type" "*,*,*,load2,store2")
- + (set_attr "cc" "*,*,*,none,none")])
- +
- +
- +;;=============================================================================
- +;; Conditional Moves
- +;;=============================================================================
- +(define_insn "ld<mode>_predicable"
- + [(set (match_operand:MOVCC 0 "register_operand" "=r")
- + (match_operand:MOVCC 1 "avr32_non_rmw_memory_operand" "<MOVCC:pred_mem_constraint>"))]
- + "TARGET_V2_INSNS"
- + "ld<MOVCC:load_postfix>%?\t%0, %1"
- + [(set_attr "length" "4")
- + (set_attr "cc" "cmp_cond_insn")
- + (set_attr "type" "load")
- + (set_attr "predicable" "yes")]
- +)
- +
- +
- +(define_insn "st<mode>_predicable"
- + [(set (match_operand:MOVCC 0 "avr32_non_rmw_memory_operand" "=<MOVCC:pred_mem_constraint>")
- + (match_operand:MOVCC 1 "register_operand" "r"))]
- + "TARGET_V2_INSNS"
- + "st<MOVCC:store_postfix>%?\t%0, %1"
- + [(set_attr "length" "4")
- + (set_attr "cc" "cmp_cond_insn")
- + (set_attr "type" "store")
- + (set_attr "predicable" "yes")]
- +)
- +
- +(define_insn "mov<mode>_predicable"
- + [(set (match_operand:MOVCC 0 "register_operand" "=r")
- + (match_operand:MOVCC 1 "avr32_cond_register_immediate_operand" "rKs08"))]
- + ""
- + "mov%?\t%0, %1"
- + [(set_attr "length" "4")
- + (set_attr "cc" "cmp_cond_insn")
- + (set_attr "type" "alu")
- + (set_attr "predicable" "yes")]
- +)
- +
- +
- +;;=============================================================================
- +;; Move chunks of memory
- +;;=============================================================================
- +
- +(define_expand "movmemsi"
- + [(match_operand:BLK 0 "general_operand" "")
- + (match_operand:BLK 1 "general_operand" "")
- + (match_operand:SI 2 "const_int_operand" "")
- + (match_operand:SI 3 "const_int_operand" "")]
- + ""
- + "
- + if (avr32_gen_movmemsi (operands))
- + DONE;
- + FAIL;
- + "
- + )
- +
- +
- +
- +
- +;;=============================================================================
- +;; Bit field instructions
- +;;-----------------------------------------------------------------------------
- +;; Instructions to insert or extract bit-fields
- +;;=============================================================================
- +
- +(define_insn "insv"
- + [ (set (zero_extract:SI (match_operand:SI 0 "register_operand" "+r")
- + (match_operand:SI 1 "immediate_operand" "Ku05")
- + (match_operand:SI 2 "immediate_operand" "Ku05"))
- + (match_operand 3 "register_operand" "r"))]
- + ""
- + "bfins\t%0, %3, %2, %1"
- + [(set_attr "type" "alu")
- + (set_attr "length" "4")
- + (set_attr "cc" "set_ncz")])
- +
- +
- +
- +(define_expand "extv"
- + [ (set (match_operand:SI 0 "register_operand" "")
- + (sign_extract:SI (match_operand:SI 1 "register_operand" "")
- + (match_operand:SI 2 "immediate_operand" "")
- + (match_operand:SI 3 "immediate_operand" "")))]
- + ""
- + {
- + if ( INTVAL(operands[2]) >= 32 )
- + FAIL;
- + }
- +)
- +
- +(define_expand "extzv"
- + [ (set (match_operand:SI 0 "register_operand" "")
- + (zero_extract:SI (match_operand:SI 1 "register_operand" "")
- + (match_operand:SI 2 "immediate_operand" "")
- + (match_operand:SI 3 "immediate_operand" "")))]
- + ""
- + {
- + if ( INTVAL(operands[2]) >= 32 )
- + FAIL;
- + }
- +)
- +
- +(define_insn "extv_internal"
- + [ (set (match_operand:SI 0 "register_operand" "=r")
- + (sign_extract:SI (match_operand:SI 1 "register_operand" "r")
- + (match_operand:SI 2 "immediate_operand" "Ku05")
- + (match_operand:SI 3 "immediate_operand" "Ku05")))]
- + "INTVAL(operands[2]) < 32"
- + "bfexts\t%0, %1, %3, %2"
- + [(set_attr "type" "alu")
- + (set_attr "length" "4")
- + (set_attr "cc" "set_ncz")])
- +
- +
- +(define_insn "extzv_internal"
- + [ (set (match_operand:SI 0 "register_operand" "=r")
- + (zero_extract:SI (match_operand:SI 1 "register_operand" "r")
- + (match_operand:SI 2 "immediate_operand" "Ku05")
- + (match_operand:SI 3 "immediate_operand" "Ku05")))]
- + "INTVAL(operands[2]) < 32"
- + "bfextu\t%0, %1, %3, %2"
- + [(set_attr "type" "alu")
- + (set_attr "length" "4")
- + (set_attr "cc" "set_ncz")])
- +
- +
- +
- +;;=============================================================================
- +;; Some peepholes for avoiding unnecessary cast instructions
- +;; followed by bfins.
- +;;-----------------------------------------------------------------------------
- +
- +(define_peephole2
- + [(set (match_operand:SI 0 "register_operand" "")
- + (zero_extend:SI (match_operand:QI 1 "register_operand" "")))
- + (set (zero_extract:SI (match_operand 2 "register_operand" "")
- + (match_operand:SI 3 "immediate_operand" "")
- + (match_operand:SI 4 "immediate_operand" ""))
- + (match_dup 0))]
- + "((peep2_reg_dead_p(2, operands[0]) &&
- + (INTVAL(operands[3]) <= 8)))"
- + [(set (zero_extract:SI (match_dup 2)
- + (match_dup 3)
- + (match_dup 4))
- + (match_dup 1))]
- + )
- +
- +(define_peephole2
- + [(set (match_operand:SI 0 "register_operand" "")
- + (zero_extend:SI (match_operand:HI 1 "register_operand" "")))
- + (set (zero_extract:SI (match_operand 2 "register_operand" "")
- + (match_operand:SI 3 "immediate_operand" "")
- + (match_operand:SI 4 "immediate_operand" ""))
- + (match_dup 0))]
- + "((peep2_reg_dead_p(2, operands[0]) &&
- + (INTVAL(operands[3]) <= 16)))"
- + [(set (zero_extract:SI (match_dup 2)
- + (match_dup 3)
- + (match_dup 4))
- + (match_dup 1))]
- + )
- +
- +;;=============================================================================
- +;; push bytes
- +;;-----------------------------------------------------------------------------
- +;; Implements the push instruction
- +;;=============================================================================
- +(define_insn "pushm"
- + [(set (mem:BLK (pre_dec:BLK (reg:SI SP_REGNUM)))
- + (unspec:BLK [(match_operand 0 "const_int_operand" "")]
- + UNSPEC_PUSHM))]
- + ""
- + {
- + if (INTVAL(operands[0])) {
- + return "pushm\t%r0";
- + } else {
- + return "";
- + }
- + }
- + [(set_attr "type" "store")
- + (set_attr "length" "2")
- + (set_attr "cc" "none")])
- +
- +(define_insn "stm"
- + [(unspec [(match_operand 0 "register_operand" "r")
- + (match_operand 1 "const_int_operand" "")
- + (match_operand 2 "const_int_operand" "")]
- + UNSPEC_STM)]
- + ""
- + {
- + if (INTVAL(operands[1])) {
- + if (INTVAL(operands[2]) != 0)
- + return "stm\t--%0, %s1";
- + else
- + return "stm\t%0, %s1";
- + } else {
- + return "";
- + }
- + }
- + [(set_attr "type" "store")
- + (set_attr "length" "4")
- + (set_attr "cc" "none")])
- +
- +
- +
- +(define_insn "popm"
- + [(unspec [(match_operand 0 "const_int_operand" "")]
- + UNSPEC_POPM)]
- + ""
- + {
- + if (INTVAL(operands[0])) {
- + return "popm %r0";
- + } else {
- + return "";
- + }
- + }
- + [(set_attr "type" "load")
- + (set_attr "length" "2")])
- +
- +
- +
- +;;=============================================================================
- +;; add
- +;;-----------------------------------------------------------------------------
- +;; Adds reg1 with reg2 and puts the result in reg0.
- +;;=============================================================================
- +(define_insn "add<mode>3"
- + [(set (match_operand:INTM 0 "register_operand" "=r,r,r,r,r")
- + (plus:INTM (match_operand:INTM 1 "register_operand" "%0,r,0,r,0")
- + (match_operand:INTM 2 "avr32_add_operand" "r,r,Is08,Is16,Is21")))]
- + ""
- + "@
- + add %0, %2
- + add %0, %1, %2
- + sub %0, %n2
- + sub %0, %1, %n2
- + sub %0, %n2"
- +
- + [(set_attr "length" "2,4,2,4,4")
- + (set_attr "cc" "<INTM:alu_cc_attr>")])
- +
- +(define_insn "add<mode>3_lsl"
- + [(set (match_operand:INTM 0 "register_operand" "=r")
- + (plus:INTM (ashift:INTM (match_operand:INTM 1 "register_operand" "r")
- + (match_operand:INTM 3 "avr32_add_shift_immediate_operand" "Ku02"))
- + (match_operand:INTM 2 "register_operand" "r")))]
- + ""
- + "add %0, %2, %1 << %3"
- + [(set_attr "length" "4")
- + (set_attr "cc" "<INTM:alu_cc_attr>")])
- +
- +(define_insn "add<mode>3_lsl2"
- + [(set (match_operand:INTM 0 "register_operand" "=r")
- + (plus:INTM (match_operand:INTM 1 "register_operand" "r")
- + (ashift:INTM (match_operand:INTM 2 "register_operand" "r")
- + (match_operand:INTM 3 "avr32_add_shift_immediate_operand" "Ku02"))))]
- + ""
- + "add %0, %1, %2 << %3"
- + [(set_attr "length" "4")
- + (set_attr "cc" "<INTM:alu_cc_attr>")])
- +
- +
- +(define_insn "add<mode>3_mul"
- + [(set (match_operand:INTM 0 "register_operand" "=r")
- + (plus:INTM (mult:INTM (match_operand:INTM 1 "register_operand" "r")
- + (match_operand:INTM 3 "immediate_operand" "Ku04" ))
- + (match_operand:INTM 2 "register_operand" "r")))]
- + "(INTVAL(operands[3]) == 0) || (INTVAL(operands[3]) == 2) ||
- + (INTVAL(operands[3]) == 4) || (INTVAL(operands[3]) == 8)"
- + "add %0, %2, %1 << %p3"
- + [(set_attr "length" "4")
- + (set_attr "cc" "<INTM:alu_cc_attr>")])
- +
- +(define_insn "add<mode>3_mul2"
- + [(set (match_operand:INTM 0 "register_operand" "=r")
- + (plus:INTM (match_operand:INTM 1 "register_operand" "r")
- + (mult:INTM (match_operand:INTM 2 "register_operand" "r")
- + (match_operand:INTM 3 "immediate_operand" "Ku04" ))))]
- + "(INTVAL(operands[3]) == 0) || (INTVAL(operands[3]) == 2) ||
- + (INTVAL(operands[3]) == 4) || (INTVAL(operands[3]) == 8)"
- + "add %0, %1, %2 << %p3"
- + [(set_attr "length" "4")
- + (set_attr "cc" "<INTM:alu_cc_attr>")])
- +
- +
- +(define_peephole2
- + [(set (match_operand:SI 0 "register_operand" "")
- + (ashift:SI (match_operand:SI 1 "register_operand" "")
- + (match_operand:SI 2 "immediate_operand" "")))
- + (set (match_operand:SI 3 "register_operand" "")
- + (plus:SI (match_dup 0)
- + (match_operand:SI 4 "register_operand" "")))]
- + "(peep2_reg_dead_p(2, operands[0]) &&
- + (INTVAL(operands[2]) < 4 && INTVAL(operands[2]) > 0))"
- + [(set (match_dup 3)
- + (plus:SI (ashift:SI (match_dup 1)
- + (match_dup 2))
- + (match_dup 4)))]
- + )
- +
- +(define_peephole2
- + [(set (match_operand:SI 0 "register_operand" "")
- + (ashift:SI (match_operand:SI 1 "register_operand" "")
- + (match_operand:SI 2 "immediate_operand" "")))
- + (set (match_operand:SI 3 "register_operand" "")
- + (plus:SI (match_operand:SI 4 "register_operand" "")
- + (match_dup 0)))]
- + "(peep2_reg_dead_p(2, operands[0]) &&
- + (INTVAL(operands[2]) < 4 && INTVAL(operands[2]) > 0))"
- + [(set (match_dup 3)
- + (plus:SI (ashift:SI (match_dup 1)
- + (match_dup 2))
- + (match_dup 4)))]
- + )
- +
- +(define_insn "adddi3"
- + [(set (match_operand:DI 0 "register_operand" "=r,r")
- + (plus:DI (match_operand:DI 1 "register_operand" "%0,r")
- + (match_operand:DI 2 "register_operand" "r,r")))]
- + ""
- + "@
- + add %0, %2\;adc %m0, %m0, %m2
- + add %0, %1, %2\;adc %m0, %m1, %m2"
- + [(set_attr "length" "6,8")
- + (set_attr "type" "alu2")
- + (set_attr "cc" "set_vncz")])
- +
- +
- +(define_insn "add<mode>_imm_predicable"
- + [(set (match_operand:INTM 0 "register_operand" "+r")
- + (plus:INTM (match_dup 0)
- + (match_operand:INTM 1 "avr32_cond_immediate_operand" "%Is08")))]
- + ""
- + "sub%?\t%0, -%1"
- + [(set_attr "length" "4")
- + (set_attr "cc" "cmp_cond_insn")
- + (set_attr "predicable" "yes")]
- +)
- +
- +;;=============================================================================
- +;; subtract
- +;;-----------------------------------------------------------------------------
- +;; Subtract reg2 or immediate value from reg0 and puts the result in reg0.
- +;;=============================================================================
- +
- +(define_insn "sub<mode>3"
- + [(set (match_operand:INTM 0 "general_operand" "=r,r,r,r,r,r,r")
- + (minus:INTM (match_operand:INTM 1 "register_const_int_operand" "0,r,0,r,0,r,Ks08")
- + (match_operand:INTM 2 "register_const_int_operand" "r,r,Ks08,Ks16,Ks21,0,r")))]
- + ""
- + "@
- + sub %0, %2
- + sub %0, %1, %2
- + sub %0, %2
- + sub %0, %1, %2
- + sub %0, %2
- + rsub %0, %1
- + rsub %0, %2, %1"
- + [(set_attr "length" "2,4,2,4,4,2,4")
- + (set_attr "cc" "<INTM:alu_cc_attr>")])
- +
- +(define_insn "*sub<mode>3_mul"
- + [(set (match_operand:INTM 0 "register_operand" "=r")
- + (minus:INTM (match_operand:INTM 1 "register_operand" "r")
- + (mult:INTM (match_operand:INTM 2 "register_operand" "r")
- + (match_operand:SI 3 "immediate_operand" "Ku04" ))))]
- + "(INTVAL(operands[3]) == 0) || (INTVAL(operands[3]) == 2) ||
- + (INTVAL(operands[3]) == 4) || (INTVAL(operands[3]) == 8)"
- + "sub %0, %1, %2 << %p3"
- + [(set_attr "length" "4")
- + (set_attr "cc" "<INTM:alu_cc_attr>")])
- +
- +(define_insn "*sub<mode>3_lsl"
- + [(set (match_operand:INTM 0 "register_operand" "=r")
- + (minus:INTM (match_operand:INTM 1 "register_operand" "r")
- + (ashift:INTM (match_operand:INTM 2 "register_operand" "r")
- + (match_operand:SI 3 "avr32_add_shift_immediate_operand" "Ku02"))))]
- + ""
- + "sub %0, %1, %2 << %3"
- + [(set_attr "length" "4")
- + (set_attr "cc" "<INTM:alu_cc_attr>")])
- +
- +
- +(define_insn "subdi3"
- + [(set (match_operand:DI 0 "register_operand" "=r,r")
- + (minus:DI (match_operand:DI 1 "register_operand" "%0,r")
- + (match_operand:DI 2 "register_operand" "r,r")))]
- + ""
- + "@
- + sub %0, %2\;sbc %m0, %m0, %m2
- + sub %0, %1, %2\;sbc %m0, %m1, %m2"
- + [(set_attr "length" "6,8")
- + (set_attr "type" "alu2")
- + (set_attr "cc" "set_vncz")])
- +
- +
- +(define_insn "sub<mode>_imm_predicable"
- + [(set (match_operand:INTM 0 "register_operand" "+r")
- + (minus:INTM (match_dup 0)
- + (match_operand:INTM 1 "avr32_cond_immediate_operand" "Ks08")))]
- + ""
- + "sub%?\t%0, %1"
- + [(set_attr "length" "4")
- + (set_attr "cc" "cmp_cond_insn")
- + (set_attr "predicable" "yes")])
- +
- +(define_insn "rsub<mode>_imm_predicable"
- + [(set (match_operand:INTM 0 "register_operand" "+r")
- + (minus:INTM (match_operand:INTM 1 "avr32_cond_immediate_operand" "Ks08")
- + (match_dup 0)))]
- + ""
- + "rsub%?\t%0, %1"
- + [(set_attr "length" "4")
- + (set_attr "cc" "cmp_cond_insn")
- + (set_attr "predicable" "yes")])
- +
- +;;=============================================================================
- +;; multiply
- +;;-----------------------------------------------------------------------------
- +;; Multiply op1 and op2 and put the value in op0.
- +;;=============================================================================
- +
- +
- +(define_insn "mulqi3"
- + [(set (match_operand:QI 0 "register_operand" "=r,r,r")
- + (mult:QI (match_operand:QI 1 "register_operand" "%0,r,r")
- + (match_operand:QI 2 "avr32_mul_operand" "r,r,Ks08")))]
- + "!TARGET_NO_MUL_INSNS"
- + {
- + switch (which_alternative){
- + case 0:
- + return "mul %0, %2";
- + case 1:
- + return "mul %0, %1, %2";
- + case 2:
- + return "mul %0, %1, %2";
- + default:
- + gcc_unreachable();
- + }
- + }
- + [(set_attr "type" "mulww_w,mulww_w,mulwh")
- + (set_attr "length" "2,4,4")
- + (set_attr "cc" "none")])
- +
- +(define_insn "mulsi3"
- + [(set (match_operand:SI 0 "register_operand" "=r,r,r")
- + (mult:SI (match_operand:SI 1 "register_operand" "%0,r,r")
- + (match_operand:SI 2 "avr32_mul_operand" "r,r,Ks08")))]
- + "!TARGET_NO_MUL_INSNS"
- + {
- + switch (which_alternative){
- + case 0:
- + return "mul %0, %2";
- + case 1:
- + return "mul %0, %1, %2";
- + case 2:
- + return "mul %0, %1, %2";
- + default:
- + gcc_unreachable();
- + }
- + }
- + [(set_attr "type" "mulww_w,mulww_w,mulwh")
- + (set_attr "length" "2,4,4")
- + (set_attr "cc" "none")])
- +
- +
- +(define_insn "mulhisi3"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (mult:SI
- + (sign_extend:SI (match_operand:HI 1 "register_operand" "%r"))
- + (sign_extend:SI (match_operand:HI 2 "register_operand" "r"))))]
- + "!TARGET_NO_MUL_INSNS && TARGET_DSP"
- + "mulhh.w %0, %1:b, %2:b"
- + [(set_attr "type" "mulhh")
- + (set_attr "length" "4")
- + (set_attr "cc" "none")])
- +
- +(define_peephole2
- + [(match_scratch:DI 6 "r")
- + (set (match_operand:SI 0 "register_operand" "")
- + (mult:SI
- + (sign_extend:SI (match_operand:HI 1 "register_operand" ""))
- + (sign_extend:SI (match_operand:HI 2 "register_operand" ""))))
- + (set (match_operand:SI 3 "register_operand" "")
- + (ashiftrt:SI (match_dup 0)
- + (const_int 16)))]
- + "!TARGET_NO_MUL_INSNS && TARGET_DSP
- + && (peep2_reg_dead_p(1, operands[0]) || (REGNO(operands[0]) == REGNO(operands[3])))"
- + [(set (match_dup 4) (sign_extend:SI (match_dup 1)))
- + (set (match_dup 6)
- + (ashift:DI (mult:DI (sign_extend:DI (match_dup 4))
- + (sign_extend:DI (match_dup 2)))
- + (const_int 16)))
- + (set (match_dup 3) (match_dup 5))]
- +
- + "{
- + operands[4] = gen_rtx_REG(SImode, REGNO(operands[1]));
- + operands[5] = gen_highpart (SImode, operands[4]);
- + }"
- + )
- +
- +(define_insn "mulnhisi3"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (mult:SI
- + (sign_extend:SI (neg:HI (match_operand:HI 1 "register_operand" "r")))
- + (sign_extend:SI (match_operand:HI 2 "register_operand" "r"))))]
- + "!TARGET_NO_MUL_INSNS && TARGET_DSP"
- + "mulnhh.w %0, %1:b, %2:b"
- + [(set_attr "type" "mulhh")
- + (set_attr "length" "4")
- + (set_attr "cc" "none")])
- +
- +(define_insn "machisi3"
- + [(set (match_operand:SI 0 "register_operand" "+r")
- + (plus:SI (mult:SI
- + (sign_extend:SI (match_operand:HI 1 "register_operand" "%r"))
- + (sign_extend:SI (match_operand:HI 2 "register_operand" "r")))
- + (match_dup 0)))]
- + "!TARGET_NO_MUL_INSNS && TARGET_DSP"
- + "machh.w %0, %1:b, %2:b"
- + [(set_attr "type" "machh_w")
- + (set_attr "length" "4")
- + (set_attr "cc" "none")])
- +
- +
- +
- +(define_insn "mulsidi3"
- + [(set (match_operand:DI 0 "register_operand" "=r")
- + (mult:DI
- + (sign_extend:DI (match_operand:SI 1 "register_operand" "%r"))
- + (sign_extend:DI (match_operand:SI 2 "register_operand" "r"))))]
- + "!TARGET_NO_MUL_INSNS"
- + "muls.d %0, %1, %2"
- + [(set_attr "type" "mulww_d")
- + (set_attr "length" "4")
- + (set_attr "cc" "none")])
- +
- +(define_insn "umulsidi3"
- + [(set (match_operand:DI 0 "register_operand" "=r")
- + (mult:DI
- + (zero_extend:DI (match_operand:SI 1 "register_operand" "%r"))
- + (zero_extend:DI (match_operand:SI 2 "register_operand" "r"))))]
- + "!TARGET_NO_MUL_INSNS"
- + "mulu.d %0, %1, %2"
- + [(set_attr "type" "mulww_d")
- + (set_attr "length" "4")
- + (set_attr "cc" "none")])
- +
- +(define_insn "*mulaccsi3"
- + [(set (match_operand:SI 0 "register_operand" "+r")
- + (plus:SI (mult:SI (match_operand:SI 1 "register_operand" "%r")
- + (match_operand:SI 2 "register_operand" "r"))
- + (match_dup 0)))]
- + "!TARGET_NO_MUL_INSNS"
- + "mac %0, %1, %2"
- + [(set_attr "type" "macww_w")
- + (set_attr "length" "4")
- + (set_attr "cc" "none")])
- +
- +(define_insn "*mulaccsidi3"
- + [(set (match_operand:DI 0 "register_operand" "+r")
- + (plus:DI (mult:DI
- + (sign_extend:DI (match_operand:SI 1 "register_operand" "%r"))
- + (sign_extend:DI (match_operand:SI 2 "register_operand" "r")))
- + (match_dup 0)))]
- + "!TARGET_NO_MUL_INSNS"
- + "macs.d %0, %1, %2"
- + [(set_attr "type" "macww_d")
- + (set_attr "length" "4")
- + (set_attr "cc" "none")])
- +
- +(define_insn "*umulaccsidi3"
- + [(set (match_operand:DI 0 "register_operand" "+r")
- + (plus:DI (mult:DI
- + (zero_extend:DI (match_operand:SI 1 "register_operand" "%r"))
- + (zero_extend:DI (match_operand:SI 2 "register_operand" "r")))
- + (match_dup 0)))]
- + "!TARGET_NO_MUL_INSNS"
- + "macu.d %0, %1, %2"
- + [(set_attr "type" "macww_d")
- + (set_attr "length" "4")
- + (set_attr "cc" "none")])
- +
- +
- +
- +;; Try to avoid Write-After-Write hazards for mul operations
- +;; if it can be done
- +(define_peephole2
- + [(set (match_operand:SI 0 "register_operand" "")
- + (mult:SI
- + (sign_extend:SI (match_operand 1 "general_operand" ""))
- + (sign_extend:SI (match_operand 2 "general_operand" ""))))
- + (set (match_dup 0)
- + (match_operator:SI 3 "alu_operator" [(match_dup 0)
- + (match_operand 4 "general_operand" "")]))]
- + "peep2_reg_dead_p(1, operands[2])"
- + [(set (match_dup 5)
- + (mult:SI
- + (sign_extend:SI (match_dup 1))
- + (sign_extend:SI (match_dup 2))))
- + (set (match_dup 0)
- + (match_op_dup 3 [(match_dup 5)
- + (match_dup 4)]))]
- + "{operands[5] = gen_rtx_REG(SImode, REGNO(operands[2]));}"
- + )
- +
- +
- +
- +;;=============================================================================
- +;; DSP instructions
- +;;=============================================================================
- +(define_insn "mulsathh_h"
- + [(set (match_operand:HI 0 "register_operand" "=r")
- + (ss_truncate:HI (ashiftrt:SI (mult:SI (sign_extend:SI (match_operand:HI 1 "register_operand" "%r"))
- + (sign_extend:SI (match_operand:HI 2 "register_operand" "r")))
- + (const_int 15))))]
- + "!TARGET_NO_MUL_INSNS && TARGET_DSP"
- + "mulsathh.h\t%0, %1:b, %2:b"
- + [(set_attr "length" "4")
- + (set_attr "cc" "none")
- + (set_attr "type" "mulhh")])
- +
- +(define_insn "mulsatrndhh_h"
- + [(set (match_operand:HI 0 "register_operand" "=r")
- + (ss_truncate:HI (ashiftrt:SI
- + (plus:SI (mult:SI (sign_extend:SI (match_operand:HI 1 "register_operand" "%r"))
- + (sign_extend:SI (match_operand:HI 2 "register_operand" "r")))
- + (const_int 1073741824))
- + (const_int 15))))]
- + "!TARGET_NO_MUL_INSNS && TARGET_DSP"
- + "mulsatrndhh.h\t%0, %1:b, %2:b"
- + [(set_attr "length" "4")
- + (set_attr "cc" "none")
- + (set_attr "type" "mulhh")])
- +
- +(define_insn "mulsathh_w"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (ss_truncate:SI (ashift:DI (mult:DI (sign_extend:DI (match_operand:HI 1 "register_operand" "%r"))
- + (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
- + (const_int 1))))]
- + "!TARGET_NO_MUL_INSNS && TARGET_DSP"
- + "mulsathh.w\t%0, %1:b, %2:b"
- + [(set_attr "length" "4")
- + (set_attr "cc" "none")
- + (set_attr "type" "mulhh")])
- +
- +(define_insn "mulsatwh_w"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (ss_truncate:SI (ashiftrt:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
- + (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
- + (const_int 15))))]
- + "!TARGET_NO_MUL_INSNS && TARGET_DSP"
- + "mulsatwh.w\t%0, %1, %2:b"
- + [(set_attr "length" "4")
- + (set_attr "cc" "none")
- + (set_attr "type" "mulwh")])
- +
- +(define_insn "mulsatrndwh_w"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (ss_truncate:SI (ashiftrt:DI (plus:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
- + (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
- + (const_int 1073741824))
- + (const_int 15))))]
- + "!TARGET_NO_MUL_INSNS && TARGET_DSP"
- + "mulsatrndwh.w\t%0, %1, %2:b"
- + [(set_attr "length" "4")
- + (set_attr "cc" "none")
- + (set_attr "type" "mulwh")])
- +
- +(define_insn "macsathh_w"
- + [(set (match_operand:SI 0 "register_operand" "+r")
- + (plus:SI (match_dup 0)
- + (ss_truncate:SI (ashift:DI (mult:DI (sign_extend:DI (match_operand:HI 1 "register_operand" "%r"))
- + (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
- + (const_int 1)))))]
- + "!TARGET_NO_MUL_INSNS && TARGET_DSP"
- + "macsathh.w\t%0, %1:b, %2:b"
- + [(set_attr "length" "4")
- + (set_attr "cc" "none")
- + (set_attr "type" "mulhh")])
- +
- +
- +(define_insn "mulwh_d"
- + [(set (match_operand:DI 0 "register_operand" "=r")
- + (ashift:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
- + (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
- + (const_int 16)))]
- + "!TARGET_NO_MUL_INSNS && TARGET_DSP"
- + "mulwh.d\t%0, %1, %2:b"
- + [(set_attr "length" "4")
- + (set_attr "cc" "none")
- + (set_attr "type" "mulwh")])
- +
- +
- +(define_insn "mulnwh_d"
- + [(set (match_operand:DI 0 "register_operand" "=r")
- + (ashift:DI (mult:DI (not:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r")))
- + (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
- + (const_int 16)))]
- + "!TARGET_NO_MUL_INSNS && TARGET_DSP"
- + "mulnwh.d\t%0, %1, %2:b"
- + [(set_attr "length" "4")
- + (set_attr "cc" "none")
- + (set_attr "type" "mulwh")])
- +
- +(define_insn "macwh_d"
- + [(set (match_operand:DI 0 "register_operand" "+r")
- + (plus:DI (match_dup 0)
- + (ashift:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "%r"))
- + (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
- + (const_int 16))))]
- + "!TARGET_NO_MUL_INSNS && TARGET_DSP"
- + "macwh.d\t%0, %1, %2:b"
- + [(set_attr "length" "4")
- + (set_attr "cc" "none")
- + (set_attr "type" "mulwh")])
- +
- +(define_insn "machh_d"
- + [(set (match_operand:DI 0 "register_operand" "+r")
- + (plus:DI (match_dup 0)
- + (mult:DI (sign_extend:DI (match_operand:HI 1 "register_operand" "%r"))
- + (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))))]
- + "!TARGET_NO_MUL_INSNS && TARGET_DSP"
- + "machh.d\t%0, %1:b, %2:b"
- + [(set_attr "length" "4")
- + (set_attr "cc" "none")
- + (set_attr "type" "mulwh")])
- +
- +(define_insn "satadd_w"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (ss_plus:SI (match_operand:SI 1 "register_operand" "r")
- + (match_operand:SI 2 "register_operand" "r")))]
- + "TARGET_DSP"
- + "satadd.w\t%0, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "cc" "none")
- + (set_attr "type" "alu_sat")])
- +
- +(define_insn "satsub_w"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (ss_minus:SI (match_operand:SI 1 "register_operand" "r")
- + (match_operand:SI 2 "register_operand" "r")))]
- + "TARGET_DSP"
- + "satsub.w\t%0, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "cc" "none")
- + (set_attr "type" "alu_sat")])
- +
- +(define_insn "satadd_h"
- + [(set (match_operand:HI 0 "register_operand" "=r")
- + (ss_plus:HI (match_operand:HI 1 "register_operand" "r")
- + (match_operand:HI 2 "register_operand" "r")))]
- + "TARGET_DSP"
- + "satadd.h\t%0, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "cc" "none")
- + (set_attr "type" "alu_sat")])
- +
- +(define_insn "satsub_h"
- + [(set (match_operand:HI 0 "register_operand" "=r")
- + (ss_minus:HI (match_operand:HI 1 "register_operand" "r")
- + (match_operand:HI 2 "register_operand" "r")))]
- + "TARGET_DSP"
- + "satsub.h\t%0, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "cc" "none")
- + (set_attr "type" "alu_sat")])
- +
- +
- +;;=============================================================================
- +;; smin
- +;;-----------------------------------------------------------------------------
- +;; Set reg0 to the smallest value of reg1 and reg2. It is used for signed
- +;; values in the registers.
- +;;=============================================================================
- +(define_insn "sminsi3"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (smin:SI (match_operand:SI 1 "register_operand" "r")
- + (match_operand:SI 2 "register_operand" "r")))]
- + ""
- + "min %0, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "cc" "none")])
- +
- +;;=============================================================================
- +;; smax
- +;;-----------------------------------------------------------------------------
- +;; Set reg0 to the largest value of reg1 and reg2. It is used for signed
- +;; values in the registers.
- +;;=============================================================================
- +(define_insn "smaxsi3"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (smax:SI (match_operand:SI 1 "register_operand" "r")
- + (match_operand:SI 2 "register_operand" "r")))]
- + ""
- + "max %0, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "cc" "none")])
- +
- +
- +
- +;;=============================================================================
- +;; Logical operations
- +;;-----------------------------------------------------------------------------
- +
- +
- +;; Split up simple DImode logical operations. Simply perform the logical
- +;; operation on the upper and lower halves of the registers.
- +(define_split
- + [(set (match_operand:DI 0 "register_operand" "")
- + (match_operator:DI 6 "logical_binary_operator"
- + [(match_operand:DI 1 "register_operand" "")
- + (match_operand:DI 2 "register_operand" "")]))]
- + "reload_completed"
- + [(set (match_dup 0) (match_op_dup:SI 6 [(match_dup 1) (match_dup 2)]))
- + (set (match_dup 3) (match_op_dup:SI 6 [(match_dup 4) (match_dup 5)]))]
- + "
- + {
- + operands[3] = gen_highpart (SImode, operands[0]);
- + operands[0] = gen_lowpart (SImode, operands[0]);
- + operands[4] = gen_highpart (SImode, operands[1]);
- + operands[1] = gen_lowpart (SImode, operands[1]);
- + operands[5] = gen_highpart (SImode, operands[2]);
- + operands[2] = gen_lowpart (SImode, operands[2]);
- + }"
- +)
- +
- +;;=============================================================================
- +;; Logical operations with shifted operand
- +;;=============================================================================
- +(define_insn "<code>si_lshift"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (logical:SI (match_operator:SI 4 "logical_shift_operator"
- + [(match_operand:SI 2 "register_operand" "r")
- + (match_operand:SI 3 "immediate_operand" "Ku05")])
- + (match_operand:SI 1 "register_operand" "r")))]
- + ""
- + {
- + if ( GET_CODE(operands[4]) == ASHIFT )
- + return "<logical_insn>\t%0, %1, %2 << %3";
- + else
- + return "<logical_insn>\t%0, %1, %2 >> %3";
- + }
- +
- + [(set_attr "cc" "set_z")]
- +)
- +
- +
- +;;************************************************
- +;; Peepholes for detecting logical operantions
- +;; with shifted operands
- +;;************************************************
- +
- +(define_peephole
- + [(set (match_operand:SI 3 "register_operand" "")
- + (match_operator:SI 5 "logical_shift_operator"
- + [(match_operand:SI 1 "register_operand" "")
- + (match_operand:SI 2 "immediate_operand" "")]))
- + (set (match_operand:SI 0 "register_operand" "")
- + (logical:SI (match_operand:SI 4 "register_operand" "")
- + (match_dup 3)))]
- + "(dead_or_set_p(insn, operands[3])) || (REGNO(operands[3]) == REGNO(operands[0]))"
- + {
- + if ( GET_CODE(operands[5]) == ASHIFT )
- + return "<logical_insn>\t%0, %4, %1 << %2";
- + else
- + return "<logical_insn>\t%0, %4, %1 >> %2";
- + }
- + [(set_attr "cc" "set_z")]
- + )
- +
- +(define_peephole
- + [(set (match_operand:SI 3 "register_operand" "")
- + (match_operator:SI 5 "logical_shift_operator"
- + [(match_operand:SI 1 "register_operand" "")
- + (match_operand:SI 2 "immediate_operand" "")]))
- + (set (match_operand:SI 0 "register_operand" "")
- + (logical:SI (match_dup 3)
- + (match_operand:SI 4 "register_operand" "")))]
- + "(dead_or_set_p(insn, operands[3])) || (REGNO(operands[3]) == REGNO(operands[0]))"
- + {
- + if ( GET_CODE(operands[5]) == ASHIFT )
- + return "<logical_insn>\t%0, %4, %1 << %2";
- + else
- + return "<logical_insn>\t%0, %4, %1 >> %2";
- + }
- + [(set_attr "cc" "set_z")]
- + )
- +
- +
- +(define_peephole2
- + [(set (match_operand:SI 0 "register_operand" "")
- + (match_operator:SI 5 "logical_shift_operator"
- + [(match_operand:SI 1 "register_operand" "")
- + (match_operand:SI 2 "immediate_operand" "")]))
- + (set (match_operand:SI 3 "register_operand" "")
- + (logical:SI (match_operand:SI 4 "register_operand" "")
- + (match_dup 0)))]
- + "(peep2_reg_dead_p(2, operands[0])) || (REGNO(operands[3]) == REGNO(operands[0]))"
- +
- + [(set (match_dup 3)
- + (logical:SI (match_op_dup:SI 5 [(match_dup 1) (match_dup 2)])
- + (match_dup 4)))]
- +
- + ""
- +)
- +
- +(define_peephole2
- + [(set (match_operand:SI 0 "register_operand" "")
- + (match_operator:SI 5 "logical_shift_operator"
- + [(match_operand:SI 1 "register_operand" "")
- + (match_operand:SI 2 "immediate_operand" "")]))
- + (set (match_operand:SI 3 "register_operand" "")
- + (logical:SI (match_dup 0)
- + (match_operand:SI 4 "register_operand" "")))]
- + "(peep2_reg_dead_p(2, operands[0])) || (REGNO(operands[3]) == REGNO(operands[0]))"
- +
- + [(set (match_dup 3)
- + (logical:SI (match_op_dup:SI 5 [(match_dup 1) (match_dup 2)])
- + (match_dup 4)))]
- +
- + ""
- +)
- +
- +
- +;;=============================================================================
- +;; and
- +;;-----------------------------------------------------------------------------
- +;; Store the result after a bitwise logical-and between reg0 and reg2 in reg0.
- +;;=============================================================================
- +
- +(define_insn "andnsi"
- + [(set (match_operand:SI 0 "register_operand" "+r")
- + (and:SI (match_dup 0)
- + (not:SI (match_operand:SI 1 "register_operand" "r"))))]
- + ""
- + "andn %0, %1"
- + [(set_attr "cc" "set_z")
- + (set_attr "length" "2")]
- +)
- +
- +
- +(define_insn "andsi3"
- + [(set (match_operand:SI 0 "avr32_rmw_memory_or_register_operand" "=Y,r,r,r, r, r,r,r,r,r")
- + (and:SI (match_operand:SI 1 "avr32_rmw_memory_or_register_operand" "%0,r,0,0, 0, 0,0,0,0,r" )
- + (match_operand:SI 2 "nonmemory_operand" " N,M,N,Ku16,Ks17,J,L,r,i,r")))]
- + ""
- + "@
- + memc\t%0, %z2
- + bfextu\t%0, %1, 0, %z2
- + cbr\t%0, %z2
- + andl\t%0, %2, COH
- + andl\t%0, lo(%2)
- + andh\t%0, hi(%2), COH
- + andh\t%0, hi(%2)
- + and\t%0, %2
- + andh\t%0, hi(%2)\;andl\t%0, lo(%2)
- + and\t%0, %1, %2"
- +
- + [(set_attr "length" "4,4,2,4,4,4,4,2,8,4")
- + (set_attr "cc" "none,set_z,set_z,set_z,set_z,set_z,set_z,set_z,set_z,set_z")])
- +
- +
- +
- +(define_insn "anddi3"
- + [(set (match_operand:DI 0 "register_operand" "=&r,&r")
- + (and:DI (match_operand:DI 1 "register_operand" "%0,r")
- + (match_operand:DI 2 "register_operand" "r,r")))]
- + ""
- + "#"
- + [(set_attr "length" "8")
- + (set_attr "cc" "clobber")]
- +)
- +
- +;;=============================================================================
- +;; or
- +;;-----------------------------------------------------------------------------
- +;; Store the result after a bitwise inclusive-or between reg0 and reg2 in reg0.
- +;;=============================================================================
- +
- +(define_insn "iorsi3"
- + [(set (match_operand:SI 0 "avr32_rmw_memory_or_register_operand" "=Y,r,r, r,r,r,r")
- + (ior:SI (match_operand:SI 1 "avr32_rmw_memory_or_register_operand" "%0,0,0, 0,0,0,r" )
- + (match_operand:SI 2 "nonmemory_operand" " O,O,Ku16,J,r,i,r")))]
- + ""
- + "@
- + mems\t%0, %p2
- + sbr\t%0, %p2
- + orl\t%0, %2
- + orh\t%0, hi(%2)
- + or\t%0, %2
- + orh\t%0, hi(%2)\;orl\t%0, lo(%2)
- + or\t%0, %1, %2"
- +
- + [(set_attr "length" "4,2,4,4,2,8,4")
- + (set_attr "cc" "none,set_z,set_z,set_z,set_z,set_z,set_z")])
- +
- +
- +(define_insn "iordi3"
- + [(set (match_operand:DI 0 "register_operand" "=&r,&r")
- + (ior:DI (match_operand:DI 1 "register_operand" "%0,r")
- + (match_operand:DI 2 "register_operand" "r,r")))]
- + ""
- + "#"
- + [(set_attr "length" "8")
- + (set_attr "cc" "clobber")]
- +)
- +
- +;;=============================================================================
- +;; xor bytes
- +;;-----------------------------------------------------------------------------
- +;; Store the result after a bitwise exclusive-or between reg0 and reg2 in reg0.
- +;;=============================================================================
- +
- +(define_insn "xorsi3"
- + [(set (match_operand:SI 0 "avr32_rmw_memory_or_register_operand" "=Y,r, r,r,r,r")
- + (xor:SI (match_operand:SI 1 "avr32_rmw_memory_or_register_operand" "%0,0, 0,0,0,r" )
- + (match_operand:SI 2 "nonmemory_operand" " O,Ku16,J,r,i,r")))]
- + ""
- + "@
- + memt\t%0, %p2
- + eorl\t%0, %2
- + eorh\t%0, hi(%2)
- + eor\t%0, %2
- + eorh\t%0, hi(%2)\;eorl\t%0, lo(%2)
- + eor\t%0, %1, %2"
- +
- + [(set_attr "length" "4,4,4,2,8,4")
- + (set_attr "cc" "none,set_z,set_z,set_z,set_z,set_z")])
- +
- +(define_insn "xordi3"
- + [(set (match_operand:DI 0 "register_operand" "=&r,&r")
- + (xor:DI (match_operand:DI 1 "register_operand" "%0,r")
- + (match_operand:DI 2 "register_operand" "r,r")))]
- + ""
- + "#"
- + [(set_attr "length" "8")
- + (set_attr "cc" "clobber")]
- +)
- +
- +;;=============================================================================
- +;; Three operand predicable insns
- +;;=============================================================================
- +
- +(define_insn "<predicable_insn3><mode>_predicable"
- + [(set (match_operand:INTM 0 "register_operand" "=r")
- + (predicable_op3:INTM (match_operand:INTM 1 "register_operand" "<predicable_commutative3>r")
- + (match_operand:INTM 2 "register_operand" "r")))]
- + "TARGET_V2_INSNS"
- + "<predicable_insn3>%?\t%0, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "cc" "cmp_cond_insn")
- + (set_attr "predicable" "yes")]
- +)
- +
- +(define_insn_and_split "<predicable_insn3><mode>_imm_clobber_predicable"
- + [(parallel
- + [(set (match_operand:INTM 0 "register_operand" "=r")
- + (predicable_op3:INTM (match_operand:INTM 1 "register_operand" "<predicable_commutative3>r")
- + (match_operand:INTM 2 "avr32_mov_immediate_operand" "JKs21")))
- + (clobber (match_operand:INTM 3 "register_operand" "=&r"))])]
- + "TARGET_V2_INSNS"
- + {
- + if ( current_insn_predicate != NULL_RTX )
- + {
- + if ( avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks08") )
- + return "%! mov%?\t%3, %2\;<predicable_insn3>%?\t%0, %1, %3";
- + else if ( avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks21") )
- + return "%! mov\t%3, %2\;<predicable_insn3>%?\t%0, %1, %3";
- + else
- + return "%! movh\t%3, hi(%2)\;<predicable_insn3>%?\t%0, %1, %3";
- + }
- + else
- + {
- + if ( !avr32_cond_imm_clobber_splittable (insn, operands) )
- + {
- + if ( avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks08") )
- + return "mov%?\t%3, %2\;<predicable_insn3>%?\t%0, %1, %3";
- + else if ( avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks21") )
- + return "mov\t%3, %2\;<predicable_insn3>%?\t%0, %1, %3";
- + else
- + return "movh\t%3, hi(%2)\;<predicable_insn3>%?\t%0, %1, %3";
- + }
- + return "#";
- + }
- +
- + }
- + ;; If we find out that we could not actually do if-conversion on the block
- + ;; containing this insn we convert it back to normal immediate format
- + ;; to avoid outputing a redundant move insn
- + ;; Do not split until after we have checked if we can make the insn
- + ;; conditional.
- + "(GET_CODE (PATTERN (insn)) != COND_EXEC
- + && cfun->machine->ifcvt_after_reload
- + && avr32_cond_imm_clobber_splittable (insn, operands))"
- + [(set (match_dup 0)
- + (predicable_op3:INTM (match_dup 1)
- + (match_dup 2)))]
- + ""
- + [(set_attr "length" "8")
- + (set_attr "cc" "cmp_cond_insn")
- + (set_attr "predicable" "yes")]
- + )
- +
- +
- +;;=============================================================================
- +;; Zero extend predicable insns
- +;;=============================================================================
- +(define_insn_and_split "zero_extendhisi_clobber_predicable"
- + [(parallel
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (zero_extend:SI (match_operand:HI 1 "register_operand" "r")))
- + (clobber (match_operand:SI 2 "register_operand" "=&r"))])]
- + "TARGET_V2_INSNS"
- + {
- + if ( current_insn_predicate != NULL_RTX )
- + {
- + return "%! mov\t%2, 0xffff\;and%?\t%0, %1, %2";
- + }
- + else
- + {
- + return "#";
- + }
- +
- + }
- + ;; If we find out that we could not actually do if-conversion on the block
- + ;; containing this insn we convert it back to normal immediate format
- + ;; to avoid outputing a redundant move insn
- + ;; Do not split until after we have checked if we can make the insn
- + ;; conditional.
- + "(GET_CODE (PATTERN (insn)) != COND_EXEC
- + && cfun->machine->ifcvt_after_reload)"
- + [(set (match_dup 0)
- + (zero_extend:SI (match_dup 1)))]
- + ""
- + [(set_attr "length" "8")
- + (set_attr "cc" "cmp_cond_insn")
- + (set_attr "predicable" "yes")]
- + )
- +
- +(define_insn_and_split "zero_extendqisi_clobber_predicable"
- + [(parallel
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (zero_extend:SI (match_operand:QI 1 "register_operand" "r")))
- + (clobber (match_operand:SI 2 "register_operand" "=&r"))])]
- + "TARGET_V2_INSNS"
- + {
- + if ( current_insn_predicate != NULL_RTX )
- + {
- + return "%! mov\t%2, 0xff\;and%?\t%0, %1, %2";
- + }
- + else
- + {
- + return "#";
- + }
- +
- + }
- + ;; If we find out that we could not actually do if-conversion on the block
- + ;; containing this insn we convert it back to normal immediate format
- + ;; to avoid outputing a redundant move insn
- + ;; Do not split until after we have checked if we can make the insn
- + ;; conditional.
- + "(GET_CODE (PATTERN (insn)) != COND_EXEC
- + && cfun->machine->ifcvt_after_reload)"
- + [(set (match_dup 0)
- + (zero_extend:SI (match_dup 1)))]
- + ""
- + [(set_attr "length" "8")
- + (set_attr "cc" "cmp_cond_insn")
- + (set_attr "predicable" "yes")]
- + )
- +
- +(define_insn_and_split "zero_extendqihi_clobber_predicable"
- + [(parallel
- + [(set (match_operand:HI 0 "register_operand" "=r")
- + (zero_extend:HI (match_operand:QI 1 "register_operand" "r")))
- + (clobber (match_operand:SI 2 "register_operand" "=&r"))])]
- + "TARGET_V2_INSNS"
- + {
- + if ( current_insn_predicate != NULL_RTX )
- + {
- + return "%! mov\t%2, 0xff\;and%?\t%0, %1, %2";
- + }
- + else
- + {
- + return "#";
- + }
- +
- + }
- + ;; If we find out that we could not actually do if-conversion on the block
- + ;; containing this insn we convert it back to normal immediate format
- + ;; to avoid outputing a redundant move insn
- + ;; Do not split until after we have checked if we can make the insn
- + ;; conditional.
- + "(GET_CODE (PATTERN (insn)) != COND_EXEC
- + && cfun->machine->ifcvt_after_reload)"
- + [(set (match_dup 0)
- + (zero_extend:HI (match_dup 1)))]
- + ""
- + [(set_attr "length" "8")
- + (set_attr "cc" "cmp_cond_insn")
- + (set_attr "predicable" "yes")]
- + )
- +;;=============================================================================
- +;; divmod
- +;;-----------------------------------------------------------------------------
- +;; Signed division that produces both a quotient and a remainder.
- +;;=============================================================================
- +
- +(define_expand "divmodsi4"
- + [(parallel [
- + (parallel [
- + (set (match_operand:SI 0 "register_operand" "=r")
- + (div:SI (match_operand:SI 1 "register_operand" "r")
- + (match_operand:SI 2 "register_operand" "r")))
- + (set (match_operand:SI 3 "register_operand" "=r")
- + (mod:SI (match_dup 1)
- + (match_dup 2)))])
- + (use (match_dup 4))])]
- + ""
- + {
- + if (can_create_pseudo_p ()) {
- + operands[4] = gen_reg_rtx (DImode);
- + emit_insn(gen_divmodsi4_internal(operands[4],operands[1],operands[2]));
- + emit_move_insn(operands[0], gen_rtx_SUBREG( SImode, operands[4], 4));
- + emit_move_insn(operands[3], gen_rtx_SUBREG( SImode, operands[4], 0));
- + DONE;
- + } else {
- + FAIL;
- + }
- + })
- +
- +
- +(define_insn "divmodsi4_internal"
- + [(set (match_operand:DI 0 "register_operand" "=r")
- + (unspec:DI [(match_operand:SI 1 "register_operand" "r")
- + (match_operand:SI 2 "register_operand" "r")]
- + UNSPEC_DIVMODSI4_INTERNAL))]
- + ""
- + "divs %0, %1, %2"
- + [(set_attr "type" "div")
- + (set_attr "cc" "none")])
- +
- +
- +;;=============================================================================
- +;; udivmod
- +;;-----------------------------------------------------------------------------
- +;; Unsigned division that produces both a quotient and a remainder.
- +;;=============================================================================
- +(define_expand "udivmodsi4"
- + [(parallel [
- + (parallel [
- + (set (match_operand:SI 0 "register_operand" "=r")
- + (udiv:SI (match_operand:SI 1 "register_operand" "r")
- + (match_operand:SI 2 "register_operand" "r")))
- + (set (match_operand:SI 3 "register_operand" "=r")
- + (umod:SI (match_dup 1)
- + (match_dup 2)))])
- + (use (match_dup 4))])]
- + ""
- + {
- + if (can_create_pseudo_p ()) {
- + operands[4] = gen_reg_rtx (DImode);
- +
- + emit_insn(gen_udivmodsi4_internal(operands[4],operands[1],operands[2]));
- + emit_move_insn(operands[0], gen_rtx_SUBREG( SImode, operands[4], 4));
- + emit_move_insn(operands[3], gen_rtx_SUBREG( SImode, operands[4], 0));
- +
- + DONE;
- + } else {
- + FAIL;
- + }
- + })
- +
- +(define_insn "udivmodsi4_internal"
- + [(set (match_operand:DI 0 "register_operand" "=r")
- + (unspec:DI [(match_operand:SI 1 "register_operand" "r")
- + (match_operand:SI 2 "register_operand" "r")]
- + UNSPEC_UDIVMODSI4_INTERNAL))]
- + ""
- + "divu %0, %1, %2"
- + [(set_attr "type" "div")
- + (set_attr "cc" "none")])
- +
- +
- +;;=============================================================================
- +;; Arithmetic-shift left
- +;;-----------------------------------------------------------------------------
- +;; Arithmetic-shift reg0 left by reg2 or immediate value.
- +;;=============================================================================
- +
- +(define_insn "ashlsi3"
- + [(set (match_operand:SI 0 "register_operand" "=r,r,r")
- + (ashift:SI (match_operand:SI 1 "register_operand" "r,0,r")
- + (match_operand:SI 2 "register_const_int_operand" "r,Ku05,Ku05")))]
- + ""
- + "@
- + lsl %0, %1, %2
- + lsl %0, %2
- + lsl %0, %1, %2"
- + [(set_attr "length" "4,2,4")
- + (set_attr "cc" "set_ncz")])
- +
- +;;=============================================================================
- +;; Arithmetic-shift right
- +;;-----------------------------------------------------------------------------
- +;; Arithmetic-shift reg0 right by an immediate value.
- +;;=============================================================================
- +
- +(define_insn "ashrsi3"
- + [(set (match_operand:SI 0 "register_operand" "=r,r,r")
- + (ashiftrt:SI (match_operand:SI 1 "register_operand" "r,0,r")
- + (match_operand:SI 2 "register_const_int_operand" "r,Ku05,Ku05")))]
- + ""
- + "@
- + asr %0, %1, %2
- + asr %0, %2
- + asr %0, %1, %2"
- + [(set_attr "length" "4,2,4")
- + (set_attr "cc" "set_ncz")])
- +
- +;;=============================================================================
- +;; Logical shift right
- +;;-----------------------------------------------------------------------------
- +;; Logical shift reg0 right by an immediate value.
- +;;=============================================================================
- +
- +(define_insn "lshrsi3"
- + [(set (match_operand:SI 0 "register_operand" "=r,r,r")
- + (lshiftrt:SI (match_operand:SI 1 "register_operand" "r,0,r")
- + (match_operand:SI 2 "register_const_int_operand" "r,Ku05,Ku05")))]
- + ""
- + "@
- + lsr %0, %1, %2
- + lsr %0, %2
- + lsr %0, %1, %2"
- + [(set_attr "length" "4,2,4")
- + (set_attr "cc" "set_ncz")])
- +
- +
- +;;=============================================================================
- +;; neg
- +;;-----------------------------------------------------------------------------
- +;; Negate operand 1 and store the result in operand 0.
- +;;=============================================================================
- +(define_insn "negsi2"
- + [(set (match_operand:SI 0 "register_operand" "=r,r")
- + (neg:SI (match_operand:SI 1 "register_operand" "0,r")))]
- + ""
- + "@
- + neg\t%0
- + rsub\t%0, %1, 0"
- + [(set_attr "length" "2,4")
- + (set_attr "cc" "set_vncz")])
- +
- +(define_insn "negsi2_predicable"
- + [(set (match_operand:SI 0 "register_operand" "+r")
- + (neg:SI (match_dup 0)))]
- + "TARGET_V2_INSNS"
- + "rsub%?\t%0, 0"
- + [(set_attr "length" "4")
- + (set_attr "cc" "cmp_cond_insn")
- + (set_attr "predicable" "yes")])
- +
- +;;=============================================================================
- +;; abs
- +;;-----------------------------------------------------------------------------
- +;; Store the absolute value of operand 1 into operand 0.
- +;;=============================================================================
- +(define_insn "abssi2"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (abs:SI (match_operand:SI 1 "register_operand" "0")))]
- + ""
- + "abs\t%0"
- + [(set_attr "length" "2")
- + (set_attr "cc" "set_z")])
- +
- +
- +;;=============================================================================
- +;; one_cmpl
- +;;-----------------------------------------------------------------------------
- +;; Store the bitwise-complement of operand 1 into operand 0.
- +;;=============================================================================
- +
- +(define_insn "one_cmplsi2"
- + [(set (match_operand:SI 0 "register_operand" "=r,r")
- + (not:SI (match_operand:SI 1 "register_operand" "0,r")))]
- + ""
- + "@
- + com\t%0
- + rsub\t%0, %1, -1"
- + [(set_attr "length" "2,4")
- + (set_attr "cc" "set_z")])
- +
- +
- +(define_insn "one_cmplsi2_predicable"
- + [(set (match_operand:SI 0 "register_operand" "+r")
- + (not:SI (match_dup 0)))]
- + "TARGET_V2_INSNS"
- + "rsub%?\t%0, -1"
- + [(set_attr "length" "4")
- + (set_attr "cc" "cmp_cond_insn")
- + (set_attr "predicable" "yes")])
- +
- +
- +;;=============================================================================
- +;; Bit load
- +;;-----------------------------------------------------------------------------
- +;; Load a bit into Z and C flags
- +;;=============================================================================
- +(define_insn "bldsi"
- + [(set (cc0)
- + (and:SI (match_operand:SI 0 "register_operand" "r")
- + (match_operand:SI 1 "one_bit_set_operand" "i")))]
- + ""
- + "bld\t%0, %p1"
- + [(set_attr "length" "4")
- + (set_attr "cc" "bld")]
- + )
- +
- +
- +;;=============================================================================
- +;; Compare
- +;;-----------------------------------------------------------------------------
- +;; Compare reg0 with reg1 or an immediate value.
- +;;=============================================================================
- +
- +(define_expand "cmp<mode>"
- + [(set (cc0)
- + (compare:CMP
- + (match_operand:CMP 0 "register_operand" "")
- + (match_operand:CMP 1 "<CMP:cmp_predicate>" "")))]
- + ""
- + "{
- + avr32_compare_op0 = operands[0];
- + avr32_compare_op1 = operands[1];
- + }"
- +)
- +
- +(define_insn "cmp<mode>_internal"
- + [(set (cc0)
- + (compare:CMP
- + (match_operand:CMP 0 "register_operand" "r")
- + (match_operand:CMP 1 "<CMP:cmp_predicate>" "<CMP:cmp_constraint>")))]
- + ""
- + {
- +switch(GET_MODE(operands[0]))
- + {
- + case QImode:
- + avr32_branch_type = CMP_QI;
- + break;
- + case HImode:
- + avr32_branch_type = CMP_HI;
- + break;
- + case SImode:
- + avr32_branch_type = CMP_SI;
- + break;
- + case DImode:
- + avr32_branch_type = CMP_DI;
- + break;
- + default:
- + abort();
- + }
- + /* Check if the next insn already will output a compare. */
- + if (!next_insn_emits_cmp (insn))
- + set_next_insn_cond(insn,
- + avr32_output_cmp(get_next_insn_cond(insn), GET_MODE (operands[0]), operands[0], operands[1]));
- + return "";
- + }
- + [(set_attr "length" "4")
- + (set_attr "cc" "compare")])
- +
- +(define_expand "cmpsf"
- + [(set (cc0)
- + (compare:SF
- + (match_operand:SF 0 "general_operand" "")
- + (match_operand:SF 1 "general_operand" "")))]
- + "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
- + "{
- + if ( !REG_P(operands[0]) )
- + operands[0] = force_reg(SFmode, operands[0]);
- +
- + if ( !REG_P(operands[1]) )
- + operands[1] = force_reg(SFmode, operands[1]);
- +
- + avr32_compare_op0 = operands[0];
- + avr32_compare_op1 = operands[1];
- + emit_insn(gen_cmpsf_internal_uc3fp(operands[0], operands[1]));
- + DONE;
- + }"
- +)
- +
- +;;;=============================================================================
- +;; Test if zero
- +;;-----------------------------------------------------------------------------
- +;; Compare reg against zero and set the condition codes.
- +;;=============================================================================
- +
- +
- +(define_expand "tstsi"
- + [(set (cc0)
- + (match_operand:SI 0 "register_operand" ""))]
- + ""
- + {
- + avr32_compare_op0 = operands[0];
- + avr32_compare_op1 = const0_rtx;
- + }
- +)
- +
- +(define_insn "tstsi_internal"
- + [(set (cc0)
- + (match_operand:SI 0 "register_operand" "r"))]
- + ""
- + {
- + /* Check if the next insn already will output a compare. */
- + if (!next_insn_emits_cmp (insn))
- + set_next_insn_cond(insn,
- + avr32_output_cmp(get_next_insn_cond(insn), SImode, operands[0], const0_rtx));
- +
- + return "";
- + }
- + [(set_attr "length" "2")
- + (set_attr "cc" "compare")])
- +
- +
- +(define_expand "tstdi"
- + [(set (cc0)
- + (match_operand:DI 0 "register_operand" ""))]
- + ""
- + {
- + avr32_compare_op0 = operands[0];
- + avr32_compare_op1 = const0_rtx;
- + }
- +)
- +
- +(define_insn "tstdi_internal"
- + [(set (cc0)
- + (match_operand:DI 0 "register_operand" "r"))]
- + ""
- + {
- + /* Check if the next insn already will output a compare. */
- + if (!next_insn_emits_cmp (insn))
- + set_next_insn_cond(insn,
- + avr32_output_cmp(get_next_insn_cond(insn), DImode, operands[0], const0_rtx));
- + return "";
- + }
- + [(set_attr "length" "4")
- + (set_attr "type" "alu2")
- + (set_attr "cc" "compare")])
- +
- +
- +
- +;;=============================================================================
- +;; Convert operands
- +;;-----------------------------------------------------------------------------
- +;;
- +;;=============================================================================
- +(define_insn "truncdisi2"
- + [(set (match_operand:SI 0 "general_operand" "")
- + (truncate:SI (match_operand:DI 1 "general_operand" "")))]
- + ""
- + "truncdisi2")
- +
- +;;=============================================================================
- +;; Extend
- +;;-----------------------------------------------------------------------------
- +;;
- +;;=============================================================================
- +
- +
- +(define_insn "extendhisi2"
- + [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
- + (sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" "0,r,<RKu00>,m")))]
- + ""
- + {
- + switch ( which_alternative ){
- + case 0:
- + return "casts.h\t%0";
- + case 1:
- + return "bfexts\t%0, %1, 0, 16";
- + case 2:
- + case 3:
- + return "ld.sh\t%0, %1";
- + default:
- + abort();
- + }
- + }
- + [(set_attr "length" "2,4,2,4")
- + (set_attr "cc" "set_ncz,set_ncz,none,none")
- + (set_attr "type" "alu,alu,load_rm,load_rm")])
- +
- +(define_insn "extendqisi2"
- + [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
- + (sign_extend:SI (match_operand:QI 1 "extendqi_operand" "0,r,RKu00,m")))]
- + ""
- + {
- + switch ( which_alternative ){
- + case 0:
- + return "casts.b\t%0";
- + case 1:
- + return "bfexts\t%0, %1, 0, 8";
- + case 2:
- + case 3:
- + return "ld.sb\t%0, %1";
- + default:
- + abort();
- + }
- + }
- + [(set_attr "length" "2,4,2,4")
- + (set_attr "cc" "set_ncz,set_ncz,none,none")
- + (set_attr "type" "alu,alu,load_rm,load_rm")])
- +
- +(define_insn "extendqihi2"
- + [(set (match_operand:HI 0 "register_operand" "=r,r,r,r")
- + (sign_extend:HI (match_operand:QI 1 "extendqi_operand" "0,r,RKu00,m")))]
- + ""
- + {
- + switch ( which_alternative ){
- + case 0:
- + return "casts.b\t%0";
- + case 1:
- + return "bfexts\t%0, %1, 0, 8";
- + case 2:
- + case 3:
- + return "ld.sb\t%0, %1";
- + default:
- + abort();
- + }
- + }
- + [(set_attr "length" "2,4,2,4")
- + (set_attr "cc" "set_ncz,set_ncz,none,none")
- + (set_attr "type" "alu,alu,load_rm,load_rm")])
- +
- +
- +;;=============================================================================
- +;; Zero-extend
- +;;-----------------------------------------------------------------------------
- +;;
- +;;=============================================================================
- +
- +(define_insn "zero_extendhisi2"
- + [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
- + (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "0,r,<RKu00>,m")))]
- + ""
- + {
- + switch ( which_alternative ){
- + case 0:
- + return "castu.h\t%0";
- + case 1:
- + return "bfextu\t%0, %1, 0, 16";
- + case 2:
- + case 3:
- + return "ld.uh\t%0, %1";
- + default:
- + abort();
- + }
- + }
- +
- + [(set_attr "length" "2,4,2,4")
- + (set_attr "cc" "set_ncz,set_ncz,none,none")
- + (set_attr "type" "alu,alu,load_rm,load_rm")])
- +
- +(define_insn "zero_extendqisi2"
- + [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
- + (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "0,r,<RKu00>,m")))]
- + ""
- + {
- + switch ( which_alternative ){
- + case 0:
- + return "castu.b\t%0";
- + case 1:
- + return "bfextu\t%0, %1, 0, 8";
- + case 2:
- + case 3:
- + return "ld.ub\t%0, %1";
- + default:
- + abort();
- + }
- + }
- + [(set_attr "length" "2,4,2,4")
- + (set_attr "cc" "set_ncz, set_ncz, none, none")
- + (set_attr "type" "alu, alu, load_rm, load_rm")])
- +
- +(define_insn "zero_extendqihi2"
- + [(set (match_operand:HI 0 "register_operand" "=r,r,r,r")
- + (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "0,r,<RKu00>,m")))]
- + ""
- + {
- + switch ( which_alternative ){
- + case 0:
- + return "castu.b\t%0";
- + case 1:
- + return "bfextu\t%0, %1, 0, 8";
- + case 2:
- + case 3:
- + return "ld.ub\t%0, %1";
- + default:
- + abort();
- + }
- + }
- + [(set_attr "length" "2,4,2,4")
- + (set_attr "cc" "set_ncz, set_ncz, none, none")
- + (set_attr "type" "alu, alu, load_rm, load_rm")])
- +
- +
- +;;=============================================================================
- +;; Conditional load and extend insns
- +;;=============================================================================
- +(define_insn "ldsi<mode>_predicable_se"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (sign_extend:SI
- + (match_operand:INTM 1 "memory_operand" "<INTM:pred_mem_constraint>")))]
- + "TARGET_V2_INSNS"
- + "ld<INTM:load_postfix_s>%?\t%0, %1"
- + [(set_attr "length" "4")
- + (set_attr "cc" "cmp_cond_insn")
- + (set_attr "type" "load")
- + (set_attr "predicable" "yes")]
- +)
- +
- +(define_insn "ldsi<mode>_predicable_ze"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (zero_extend:SI
- + (match_operand:INTM 1 "memory_operand" "<INTM:pred_mem_constraint>")))]
- + "TARGET_V2_INSNS"
- + "ld<INTM:load_postfix_u>%?\t%0, %1"
- + [(set_attr "length" "4")
- + (set_attr "cc" "cmp_cond_insn")
- + (set_attr "type" "load")
- + (set_attr "predicable" "yes")]
- +)
- +
- +(define_insn "ldhi_predicable_ze"
- + [(set (match_operand:HI 0 "register_operand" "=r")
- + (zero_extend:HI
- + (match_operand:QI 1 "memory_operand" "RKs10")))]
- + "TARGET_V2_INSNS"
- + "ld.ub%?\t%0, %1"
- + [(set_attr "length" "4")
- + (set_attr "cc" "cmp_cond_insn")
- + (set_attr "type" "load")
- + (set_attr "predicable" "yes")]
- +)
- +
- +(define_insn "ldhi_predicable_se"
- + [(set (match_operand:HI 0 "register_operand" "=r")
- + (sign_extend:HI
- + (match_operand:QI 1 "memory_operand" "RKs10")))]
- + "TARGET_V2_INSNS"
- + "ld.sb%?\t%0, %1"
- + [(set_attr "length" "4")
- + (set_attr "cc" "cmp_cond_insn")
- + (set_attr "type" "load")
- + (set_attr "predicable" "yes")]
- +)
- +
- +;;=============================================================================
- +;; Conditional set register
- +;; sr{cond4} rd
- +;;-----------------------------------------------------------------------------
- +
- +;;Because of the same issue as with conditional moves and adds we must
- +;;not separate the compare instrcution from the scc instruction as
- +;;they might be sheduled "badly".
- +
- +(define_expand "s<code>"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (any_cond:SI (cc0)
- + (const_int 0)))]
- +""
- +{
- + if(TARGET_HARD_FLOAT && TARGET_ARCH_FPU)
- + FAIL;
- +})
- +
- +(define_insn "*s<code>"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (any_cond:SI (cc0)
- + (const_int 0)))]
- + ""
- +{
- + return "sr<cond>\t%0";
- +}
- +[(set_attr "length" "2")
- +(set_attr "cc" "none")])
- +
- +(define_insn "seq"
- +[(set (match_operand:SI 0 "register_operand" "=r")
- +(eq:SI (cc0)
- + (const_int 0)))]
- + ""
- +"sreq\t%0"
- +[(set_attr "length" "2")
- +(set_attr "cc" "none")])
- +
- +(define_insn "sne"
- +[(set (match_operand:SI 0 "register_operand" "=r")
- +(ne:SI (cc0)
- + (const_int 0)))]
- + ""
- +"srne\t%0"
- + [(set_attr "length" "2")
- + (set_attr "cc" "none")])
- +
- +(define_insn "smi"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (unspec:SI [(cc0)
- + (const_int 0)] UNSPEC_COND_MI))]
- + ""
- + "srmi\t%0"
- + [(set_attr "length" "2")
- + (set_attr "cc" "none")])
- +
- +(define_insn "spl"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (unspec:SI [(cc0)
- + (const_int 0)] UNSPEC_COND_PL))]
- + ""
- + "srpl\t%0"
- + [(set_attr "length" "2")
- + (set_attr "cc" "none")])
- +
- +
- +;;=============================================================================
- +;; Conditional branch
- +;;-----------------------------------------------------------------------------
- +;; Branch to label if the specified condition codes are set.
- +;;=============================================================================
- +; branch if negative
- +(define_insn "bmi"
- + [(set (pc)
- + (if_then_else (unspec:CC [(cc0) (const_int 0)] UNSPEC_COND_MI)
- + (label_ref (match_operand 0 "" ""))
- + (pc)))]
- + ""
- + "brmi %0"
- + [(set_attr "type" "branch")
- + (set (attr "length")
- + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
- + (le (minus (pc) (match_dup 0)) (const_int 256)))
- + (const_int 2)] ; use compact branch
- + (const_int 4))) ; use extended branch
- + (set_attr "cc" "none")])
- +
- +(define_insn "*bmi-reverse"
- + [(set (pc)
- + (if_then_else (unspec:CC [(cc0) (const_int 0)] UNSPEC_COND_MI)
- + (pc)
- + (label_ref (match_operand 0 "" ""))))]
- + ""
- + "brpl %0"
- + [(set_attr "type" "branch")
- + (set (attr "length")
- + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
- + (le (minus (pc) (match_dup 0)) (const_int 256)))
- + (const_int 2)] ; use compact branch
- + (const_int 4))) ; use extended branch
- + (set_attr "cc" "none")])
- +
- +; branch if positive
- +(define_insn "bpl"
- + [(set (pc)
- + (if_then_else (unspec:CC [(cc0) (const_int 0)] UNSPEC_COND_PL)
- + (label_ref (match_operand 0 "" ""))
- + (pc)))]
- + ""
- + "brpl %0"
- + [(set_attr "type" "branch")
- + (set (attr "length")
- + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
- + (le (minus (pc) (match_dup 0)) (const_int 256)))
- + (const_int 2)] ; use compact branch
- + (const_int 4))) ; use extended branch
- + (set_attr "cc" "none")])
- +
- +(define_insn "*bpl-reverse"
- + [(set (pc)
- + (if_then_else (unspec:CC [(cc0) (const_int 0)] UNSPEC_COND_PL)
- + (pc)
- + (label_ref (match_operand 0 "" ""))))]
- + ""
- + "brmi %0"
- + [(set_attr "type" "branch")
- + (set (attr "length")
- + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
- + (le (minus (pc) (match_dup 0)) (const_int 256)))
- + (const_int 2)] ; use compact branch
- + (const_int 4))) ; use extended branch
- + (set_attr "cc" "none")])
- +
- +; branch if equal
- +(define_insn "b<code>"
- + [(set (pc)
- + (if_then_else (any_cond_b:CC (cc0)
- + (const_int 0))
- + (label_ref (match_operand 0 "" ""))
- + (pc)))]
- + ""
- + {
- + if (TARGET_HARD_FLOAT && TARGET_ARCH_FPU && (avr32_branch_type == CMP_SF))
- + return get_attr_length(insn) == 6 ? "brvs .+6\;br<cond> %0" : "brvs .+8\;br<cond> %0";
- + else
- + return "br<cond> %0";
- + }
- + [(set_attr "type" "branch")
- + (set (attr "length")
- + (if_then_else (eq (const_int 1)(symbol_ref "TARGET_HARD_FLOAT && TARGET_ARCH_FPU"))
- + (if_then_else
- + (and (le (minus (match_dup 0) (pc)) (const_int 254))
- + (le (minus (pc) (match_dup 0)) (const_int 256)))
- + (const_int 6)
- + (const_int 8))
- + (if_then_else
- + (and (le (minus (match_dup 0) (pc)) (const_int 254))
- + (le (minus (pc) (match_dup 0)) (const_int 256)))
- + (const_int 2)
- + (const_int 4))))
- + (set_attr "cc" "none")])
- +
- +(define_insn "beq"
- + [(set (pc)
- + (if_then_else (eq:CC (cc0)
- + (const_int 0))
- + (label_ref (match_operand 0 "" ""))
- + (pc)))]
- + ""
- + "breq %0";
- + [(set_attr "type" "branch")
- + (set (attr "length")
- + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
- + (le (minus (pc) (match_dup 0)) (const_int 256)))
- + (const_int 2)] ; use compact branch
- + (const_int 4))) ; use extended branch
- + (set_attr "cc" "none")])
- +
- +(define_insn "bne"
- + [(set (pc)
- + (if_then_else (ne:CC (cc0)
- + (const_int 0))
- + (label_ref (match_operand 0 "" ""))
- + (pc)))]
- + ""
- + "brne %0";
- + [(set_attr "type" "branch")
- + (set (attr "length")
- + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
- + (le (minus (pc) (match_dup 0)) (const_int 256)))
- + (const_int 2)] ; use compact branch
- + (const_int 4))) ; use extended branch
- + (set_attr "cc" "none")])
- +
- +(define_insn "b<code>"
- + [(set (pc)
- + (if_then_else (any_cond4:CC (cc0)
- + (const_int 0))
- + (label_ref (match_operand 0 "" ""))
- + (pc)))]
- + ""
- + {
- + if(TARGET_HARD_FLOAT && TARGET_ARCH_FPU && (avr32_branch_type == CMP_SF))
- + return "brvs .+8\;br<cond> %l0";
- + else
- + return "br<cond> %l0";
- + }
- + [(set_attr "type" "branch")
- + (set (attr "length")
- + (cond [(eq (const_int 1)(symbol_ref "TARGET_HARD_FLOAT && TARGET_ARCH_FPU"))
- + (const_int 8)]
- + (const_int 4)))
- + (set_attr "cc" "none")])
- +
- +(define_insn "*b<code>-reverse"
- + [(set (pc)
- + (if_then_else (any_cond_b:CC (cc0)
- + (const_int 0))
- + (pc)
- + (label_ref (match_operand 0 "" ""))))]
- + ""
- + {
- + if (TARGET_HARD_FLOAT && TARGET_ARCH_FPU && (avr32_branch_type == CMP_SF))
- + return "brvs %0\;br<invcond> %0";
- + else
- + return "br<invcond> %0";
- + }
- + [(set_attr "type" "branch")
- + (set (attr "length")
- + (if_then_else (eq (const_int 1)(symbol_ref "TARGET_HARD_FLOAT && TARGET_ARCH_FPU"))
- + (if_then_else
- + (and (le (minus (match_dup 0) (pc)) (const_int 254))
- + (le (minus (pc) (match_dup 0)) (const_int 256)))
- + (const_int 6)
- + (const_int 8))
- + (if_then_else
- + (and (le (minus (match_dup 0) (pc)) (const_int 254))
- + (le (minus (pc) (match_dup 0)) (const_int 256)))
- + (const_int 2)
- + (const_int 4))))
- + (set_attr "cc" "none")])
- +
- +(define_insn "*beq-reverse"
- + [(set (pc)
- + (if_then_else (eq:CC (cc0)
- + (const_int 0))
- + (pc)
- + (label_ref (match_operand 0 "" ""))))]
- + ""
- + "brne %0";
- + [(set_attr "type" "branch")
- + (set (attr "length")
- + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
- + (le (minus (pc) (match_dup 0)) (const_int 256)))
- + (const_int 2)] ; use compact branch
- + (const_int 4))) ; use extended branch
- + (set_attr "cc" "none")])
- +
- +(define_insn "*bne-reverse"
- + [(set (pc)
- + (if_then_else (ne:CC (cc0)
- + (const_int 0))
- + (pc)
- + (label_ref (match_operand 0 "" ""))))]
- + ""
- + "breq %0";
- + [(set_attr "type" "branch")
- + (set (attr "length")
- + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
- + (le (minus (pc) (match_dup 0)) (const_int 256)))
- + (const_int 2)] ; use compact branch
- + (const_int 4))) ; use extended branch
- + (set_attr "cc" "none")])
- +
- +(define_insn "*b<code>-reverse"
- + [(set (pc)
- + (if_then_else (any_cond4:CC (cc0)
- + (const_int 0))
- + (pc)
- + (label_ref (match_operand 0 "" ""))))]
- + ""
- + {
- + if (TARGET_HARD_FLOAT && TARGET_ARCH_FPU && (avr32_branch_type == CMP_SF))
- + return "brvs %l0\;br<invcond> %l0";
- + else
- + return "br<invcond> %0";
- + }
- + [(set_attr "type" "branch")
- + (set (attr "length")
- + (cond [(eq (const_int 1)(symbol_ref "TARGET_HARD_FLOAT && TARGET_ARCH_FPU"))
- + (const_int 8)]
- + (const_int 4)))
- + (set_attr "cc" "none")])
- +
- +;=============================================================================
- +; Conditional Add/Subtract
- +;-----------------------------------------------------------------------------
- +; sub{cond4} Rd, imm
- +;=============================================================================
- +
- +
- +(define_expand "add<mode>cc"
- + [(set (match_operand:ADDCC 0 "register_operand" "")
- + (if_then_else:ADDCC (match_operator 1 "avr32_comparison_operator"
- + [(match_dup 4)
- + (match_dup 5)])
- + (match_operand:ADDCC 2 "register_operand" "")
- + (plus:ADDCC
- + (match_dup 2)
- + (match_operand:ADDCC 3 "" ""))))]
- + ""
- + {
- + if ( !(GET_CODE (operands[3]) == CONST_INT
- + || (TARGET_V2_INSNS && REG_P(operands[3]))) ){
- + FAIL;
- + }
- +
- + /* Delete compare instruction as it is merged into this instruction */
- + remove_insn (get_last_insn_anywhere ());
- +
- + operands[4] = avr32_compare_op0;
- + operands[5] = avr32_compare_op1;
- +
- + if ( TARGET_V2_INSNS
- + && REG_P(operands[3])
- + && REGNO(operands[0]) != REGNO(operands[2]) ){
- + emit_move_insn (operands[0], operands[2]);
- + operands[2] = operands[0];
- + }
- + }
- + )
- +
- +(define_insn "add<ADDCC:mode>cc_cmp<CMP:mode>_reg"
- + [(set (match_operand:ADDCC 0 "register_operand" "=r")
- + (if_then_else:ADDCC (match_operator 1 "avr32_comparison_operator"
- + [(match_operand:CMP 4 "register_operand" "r")
- + (match_operand:CMP 5 "<CMP:cmp_predicate>" "<CMP:cmp_constraint>")])
- + (match_dup 0)
- + (plus:ADDCC
- + (match_operand:ADDCC 2 "register_operand" "r")
- + (match_operand:ADDCC 3 "register_operand" "r"))))]
- + "TARGET_V2_INSNS"
- + {
- + operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[4]), operands[4], operands[5]);
- + return "add%i1\t%0, %2, %3";
- + }
- + [(set_attr "length" "8")
- + (set_attr "cc" "cmp_cond_insn")])
- +
- +(define_insn "add<ADDCC:mode>cc_cmp<CMP:mode>"
- + [(set (match_operand:ADDCC 0 "register_operand" "=r")
- + (if_then_else:ADDCC (match_operator 1 "avr32_comparison_operator"
- + [(match_operand:CMP 4 "register_operand" "r")
- + (match_operand:CMP 5 "<CMP:cmp_predicate>" "<CMP:cmp_constraint>")])
- + (match_operand:ADDCC 2 "register_operand" "0")
- + (plus:ADDCC
- + (match_dup 2)
- + (match_operand:ADDCC 3 "avr32_cond_immediate_operand" "Is08"))))]
- + ""
- + {
- + operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[4]), operands[4], operands[5]);
- + return "sub%i1\t%0, -%3";
- + }
- + [(set_attr "length" "8")
- + (set_attr "cc" "cmp_cond_insn")])
- +
- +;=============================================================================
- +; Conditional Move
- +;-----------------------------------------------------------------------------
- +; mov{cond4} Rd, (Rs/imm)
- +;=============================================================================
- +(define_expand "mov<mode>cc"
- + [(set (match_operand:MOVCC 0 "register_operand" "")
- + (if_then_else:MOVCC (match_operator 1 "avr32_comparison_operator"
- + [(match_dup 4)
- + (match_dup 5)])
- + (match_operand:MOVCC 2 "avr32_cond_register_immediate_operand" "")
- + (match_operand:MOVCC 3 "avr32_cond_register_immediate_operand" "")))]
- + ""
- + {
- + /* Delete compare instruction as it is merged into this instruction */
- + remove_insn (get_last_insn_anywhere ());
- +
- + operands[4] = avr32_compare_op0;
- + operands[5] = avr32_compare_op1;
- + }
- + )
- +
- +
- +(define_insn "mov<MOVCC:mode>cc_cmp<CMP:mode>"
- + [(set (match_operand:MOVCC 0 "register_operand" "=r,r,r")
- + (if_then_else:MOVCC (match_operator 1 "avr32_comparison_operator"
- + [(match_operand:CMP 4 "register_operand" "r,r,r")
- + (match_operand:CMP 5 "<CMP:cmp_predicate>" "<CMP:cmp_constraint>,<CMP:cmp_constraint>,<CMP:cmp_constraint>")])
- + (match_operand:MOVCC 2 "avr32_cond_register_immediate_operand" "0, rKs08,rKs08")
- + (match_operand:MOVCC 3 "avr32_cond_register_immediate_operand" "rKs08,0,rKs08")))]
- + ""
- + {
- + operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[4]), operands[4], operands[5]);
- +
- + switch( which_alternative ){
- + case 0:
- + return "mov%i1 %0, %3";
- + case 1:
- + return "mov%1 %0, %2";
- + case 2:
- + return "mov%1 %0, %2\;mov%i1 %0, %3";
- + default:
- + abort();
- + }
- +
- + }
- + [(set_attr "length" "8,8,12")
- + (set_attr "cc" "cmp_cond_insn")])
- +
- +
- +
- +
- +;;=============================================================================
- +;; jump
- +;;-----------------------------------------------------------------------------
- +;; Jump inside a function; an unconditional branch to a label.
- +;;=============================================================================
- +(define_insn "jump"
- + [(set (pc)
- + (label_ref (match_operand 0 "" "")))]
- + ""
- + {
- + if (get_attr_length(insn) > 4)
- + return "Can't jump this far";
- + return (get_attr_length(insn) == 2 ?
- + "rjmp %0" : "bral %0");
- + }
- + [(set_attr "type" "branch")
- + (set (attr "length")
- + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 1022))
- + (le (minus (pc) (match_dup 0)) (const_int 1024)))
- + (const_int 2) ; use rjmp
- + (le (match_dup 0) (const_int 1048575))
- + (const_int 4)] ; use bral
- + (const_int 8))) ; do something else
- + (set_attr "cc" "none")])
- +
- +;;=============================================================================
- +;; call
- +;;-----------------------------------------------------------------------------
- +;; Subroutine call instruction returning no value.
- +;;=============================================================================
- +(define_insn "call_internal"
- + [(parallel [(call (mem:SI (match_operand:SI 0 "avr32_call_operand" "r,U,T,W"))
- + (match_operand 1 "" ""))
- + (clobber (reg:SI LR_REGNUM))])]
- + ""
- + {
- +
- + /* Check for a flashvault call. */
- + if (avr32_flashvault_call (SYMBOL_REF_DECL (operands[0])))
- + {
- + /* Assembly is already emitted. */
- + return "";
- + }
- +
- + switch (which_alternative) {
- + case 0:
- + return "icall\t%0";
- + case 1:
- + return "rcall\t%0";
- + case 2:
- + return "mcall\t%0";
- + case 3:
- + if (TARGET_HAS_ASM_ADDR_PSEUDOS)
- + return "call\t%0";
- + else
- + return "mcall\tr6[%0@got]";
- + default:
- + abort();
- + }
- + }
- + [(set_attr "type" "call")
- + (set_attr "length" "2,4,4,10")
- + (set_attr "cc" "clobber")])
- +
- +
- +(define_expand "call"
- + [(parallel [(call (match_operand:SI 0 "" "")
- + (match_operand 1 "" ""))
- + (clobber (reg:SI LR_REGNUM))])]
- + ""
- + {
- + rtx call_address;
- + if ( GET_CODE(operands[0]) != MEM )
- + FAIL;
- +
- + call_address = XEXP(operands[0], 0);
- +
- + /* If assembler supports call pseudo insn and the call address is a symbol then nothing special needs to be done. */
- + if (TARGET_HAS_ASM_ADDR_PSEUDOS && (GET_CODE(call_address) == SYMBOL_REF) )
- + {
- + /* We must however mark the function as using the GOT if flag_pic is set, since the call insn might turn into a mcall using the GOT ptr register. */
- + if (flag_pic)
- + {
- + crtl->uses_pic_offset_table = 1;
- + emit_call_insn(gen_call_internal(call_address, operands[1]));
- + DONE;
- + }
- + }
- + else
- + {
- + if (flag_pic && GET_CODE(call_address) == SYMBOL_REF )
- + {
- + crtl->uses_pic_offset_table = 1;
- + emit_call_insn(gen_call_internal(call_address, operands[1]));
- + DONE;
- + }
- +
- + if (!SYMBOL_REF_RCALL_FUNCTION_P(operands[0]) )
- + {
- + if (optimize_size && GET_CODE(call_address) == SYMBOL_REF )
- + {
- + call_address = force_const_mem(SImode, call_address);
- + }
- + else
- + {
- + call_address = force_reg(SImode, call_address);
- + }
- + }
- + }
- + emit_call_insn(gen_call_internal(call_address, operands[1]));
- + DONE;
- +
- + }
- +)
- +
- +;;=============================================================================
- +;; call_value
- +;;-----------------------------------------------------------------------------
- +;; Subroutine call instruction returning a value.
- +;;=============================================================================
- +(define_expand "call_value"
- + [(parallel [(set (match_operand:SI 0 "" "")
- + (call (match_operand:SI 1 "" "")
- + (match_operand 2 "" "")))
- + (clobber (reg:SI LR_REGNUM))])]
- + ""
- + {
- + rtx call_address;
- + if ( GET_CODE(operands[1]) != MEM )
- + FAIL;
- +
- + call_address = XEXP(operands[1], 0);
- +
- + /* Check for a flashvault call.
- + if (GET_CODE (call_address) == SYMBOL_REF
- + && avr32_flashvault_call (SYMBOL_REF_DECL (call_address)))
- + DONE;
- +
- + */
- +
- + /* If assembler supports call pseudo insn and the call
- + address is a symbol then nothing special needs to be done. */
- + if ( TARGET_HAS_ASM_ADDR_PSEUDOS
- + && (GET_CODE(call_address) == SYMBOL_REF) ){
- + /* We must however mark the function as using the GOT if
- + flag_pic is set, since the call insn might turn into
- + a mcall using the GOT ptr register. */
- + if ( flag_pic ) {
- + crtl->uses_pic_offset_table = 1;
- + emit_call_insn(gen_call_value_internal(operands[0], call_address, operands[2]));
- + DONE;
- + }
- + } else {
- + if ( flag_pic &&
- + GET_CODE(call_address) == SYMBOL_REF ){
- + crtl->uses_pic_offset_table = 1;
- + emit_call_insn(gen_call_value_internal(operands[0], call_address, operands[2]));
- + DONE;
- + }
- +
- + if ( !SYMBOL_REF_RCALL_FUNCTION_P(operands[1]) ){
- + if ( optimize_size &&
- + GET_CODE(call_address) == SYMBOL_REF){
- + call_address = force_const_mem(SImode, call_address);
- + } else {
- + call_address = force_reg(SImode, call_address);
- + }
- + }
- + }
- + emit_call_insn(gen_call_value_internal(operands[0], call_address,
- + operands[2]));
- + DONE;
- +
- + })
- +
- +(define_insn "call_value_internal"
- + [(parallel [(set (match_operand 0 "register_operand" "=r,r,r,r")
- + (call (mem:SI (match_operand:SI 1 "avr32_call_operand" "r,U,T,W"))
- + (match_operand 2 "" "")))
- + (clobber (reg:SI LR_REGNUM))])]
- + ;; Operand 2 not used on the AVR32.
- + ""
- + {
- + /* Check for a flashvault call. */
- + if (avr32_flashvault_call (SYMBOL_REF_DECL (operands[1])))
- + {
- + /* Assembly is already emitted. */
- + return "";
- + }
- +
- +
- + switch (which_alternative) {
- + case 0:
- + return "icall\t%1";
- + case 1:
- + return "rcall\t%1";
- + case 2:
- + return "mcall\t%1";
- + case 3:
- + if ( TARGET_HAS_ASM_ADDR_PSEUDOS )
- + return "call\t%1";
- + else
- + return "mcall\tr6[%1@got]";
- + default:
- + abort();
- + }
- + }
- + [(set_attr "type" "call")
- + (set_attr "length" "2,4,4,10")
- + (set_attr "cc" "call_set")])
- +
- +
- +;;=============================================================================
- +;; untyped_call
- +;;-----------------------------------------------------------------------------
- +;; Subrutine call instruction returning a value of any type.
- +;; The code is copied from m68k.md (except gen_blockage is removed)
- +;; Fixme!
- +;;=============================================================================
- +(define_expand "untyped_call"
- + [(parallel [(call (match_operand 0 "avr32_call_operand" "")
- + (const_int 0))
- + (match_operand 1 "" "")
- + (match_operand 2 "" "")])]
- + ""
- + {
- + int i;
- +
- + emit_call_insn (GEN_CALL (operands[0], const0_rtx, NULL, const0_rtx));
- +
- + for (i = 0; i < XVECLEN (operands[2], 0); i++) {
- + rtx set = XVECEXP (operands[2], 0, i);
- + emit_move_insn (SET_DEST (set), SET_SRC (set));
- + }
- +
- + /* The optimizer does not know that the call sets the function value
- + registers we stored in the result block. We avoid problems by
- + claiming that all hard registers are used and clobbered at this
- + point. */
- + emit_insn (gen_blockage ());
- +
- + DONE;
- + })
- +
- +
- +;;=============================================================================
- +;; return
- +;;=============================================================================
- +
- +(define_insn "return"
- + [(return)]
- + "USE_RETURN_INSN (FALSE)"
- + {
- + avr32_output_return_instruction(TRUE, FALSE, NULL, NULL);
- + return "";
- + }
- + [(set_attr "length" "4")
- + (set_attr "type" "call")]
- + )
- +
- +
- +(define_insn "return_cond"
- + [(set (pc)
- + (if_then_else (match_operand 0 "avr32_comparison_operand" "")
- + (return)
- + (pc)))]
- + "USE_RETURN_INSN (TRUE)"
- + "ret%0\tr12";
- + [(set_attr "type" "call")])
- +
- +(define_insn "return_cond_predicable"
- + [(return)]
- + "USE_RETURN_INSN (TRUE)"
- + "ret%?\tr12";
- + [(set_attr "type" "call")
- + (set_attr "predicable" "yes")])
- +
- +
- +(define_insn "return_imm"
- + [(parallel [(set (reg RETVAL_REGNUM) (match_operand 0 "immediate_operand" "i"))
- + (use (reg RETVAL_REGNUM))
- + (return)])]
- + "USE_RETURN_INSN (FALSE) &&
- + ((INTVAL(operands[0]) == -1) || (INTVAL(operands[0]) == 0) || (INTVAL(operands[0]) == 1))"
- + {
- + avr32_output_return_instruction(TRUE, FALSE, NULL, operands[0]);
- + return "";
- + }
- + [(set_attr "length" "4")
- + (set_attr "type" "call")]
- + )
- +
- +(define_insn "return_imm_cond"
- + [(parallel [(set (reg RETVAL_REGNUM) (match_operand 0 "immediate_operand" "i"))
- + (use (reg RETVAL_REGNUM))
- + (set (pc)
- + (if_then_else (match_operand 1 "avr32_comparison_operand" "")
- + (return)
- + (pc)))])]
- + "USE_RETURN_INSN (TRUE) &&
- + ((INTVAL(operands[0]) == -1) || (INTVAL(operands[0]) == 0) || (INTVAL(operands[0]) == 1))"
- + "ret%1\t%0";
- + [(set_attr "type" "call")]
- + )
- +
- +(define_insn "return_imm_predicable"
- + [(parallel [(set (reg RETVAL_REGNUM) (match_operand 0 "immediate_operand" "i"))
- + (use (reg RETVAL_REGNUM))
- + (return)])]
- + "USE_RETURN_INSN (TRUE) &&
- + ((INTVAL(operands[0]) == -1) || (INTVAL(operands[0]) == 0) || (INTVAL(operands[0]) == 1))"
- + "ret%?\t%0";
- + [(set_attr "type" "call")
- + (set_attr "predicable" "yes")])
- +
- +(define_insn "return_<mode>reg"
- + [(set (reg RETVAL_REGNUM) (match_operand:MOVM 0 "register_operand" "r"))
- + (use (reg RETVAL_REGNUM))
- + (return)]
- + "USE_RETURN_INSN (TRUE)"
- + "ret%?\t%0";
- + [(set_attr "type" "call")
- + (set_attr "predicable" "yes")])
- +
- +(define_insn "return_<mode>reg_cond"
- + [(set (reg RETVAL_REGNUM) (match_operand:MOVM 0 "register_operand" "r"))
- + (use (reg RETVAL_REGNUM))
- + (set (pc)
- + (if_then_else (match_operator 1 "avr32_comparison_operator"
- + [(cc0) (const_int 0)])
- + (return)
- + (pc)))]
- + "USE_RETURN_INSN (TRUE)"
- + "ret%1\t%0";
- + [(set_attr "type" "call")])
- +
- +;;=============================================================================
- +;; nonlocal_goto_receiver
- +;;-----------------------------------------------------------------------------
- +;; For targets with a return stack we must make sure to flush the return stack
- +;; since it will be corrupt after a nonlocal goto.
- +;;=============================================================================
- +(define_expand "nonlocal_goto_receiver"
- + [(const_int 0)]
- + "TARGET_RETURN_STACK"
- + "
- + {
- + emit_insn ( gen_frs() );
- + DONE;
- + }
- + "
- + )
- +
- +
- +;;=============================================================================
- +;; builtin_setjmp_receiver
- +;;-----------------------------------------------------------------------------
- +;; For pic code we need to reload the pic register.
- +;; For targets with a return stack we must make sure to flush the return stack
- +;; since it will probably be corrupted.
- +;;=============================================================================
- +(define_expand "builtin_setjmp_receiver"
- + [(label_ref (match_operand 0 "" ""))]
- + "flag_pic"
- + "
- + {
- + if ( TARGET_RETURN_STACK )
- + emit_insn ( gen_frs() );
- +
- + avr32_load_pic_register ();
- + DONE;
- + }
- + "
- +)
- +
- +
- +;;=============================================================================
- +;; indirect_jump
- +;;-----------------------------------------------------------------------------
- +;; Jump to an address in reg or memory.
- +;;=============================================================================
- +(define_expand "indirect_jump"
- + [(set (pc)
- + (match_operand:SI 0 "general_operand" ""))]
- + ""
- + {
- + /* One of the ops has to be in a register. */
- + if ( (flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS )
- + && !avr32_legitimate_pic_operand_p(operands[0]) )
- + operands[0] = legitimize_pic_address (operands[0], SImode, 0);
- + else if ( flag_pic && avr32_address_operand(operands[0], GET_MODE(operands[0])) )
- + /* If we have an address operand then this function uses the pic register. */
- + crtl->uses_pic_offset_table = 1;
- + })
- +
- +
- +(define_insn "indirect_jump_internal"
- + [(set (pc)
- + (match_operand:SI 0 "avr32_non_rmw_general_operand" "r,m,W"))]
- + ""
- + {
- + switch( which_alternative ){
- + case 0:
- + return "mov\tpc, %0";
- + case 1:
- + if ( avr32_const_pool_ref_operand(operands[0], GET_MODE(operands[0])) )
- + return "lddpc\tpc, %0";
- + else
- + return "ld.w\tpc, %0";
- + case 2:
- + if ( flag_pic )
- + return "ld.w\tpc, r6[%0@got]";
- + else
- + return "lda.w\tpc, %0";
- + default:
- + abort();
- + }
- + }
- + [(set_attr "length" "2,4,8")
- + (set_attr "type" "call,call,call")
- + (set_attr "cc" "none,none,clobber")])
- +
- +
- +
- +;;=============================================================================
- +;; casesi and tablejump
- +;;=============================================================================
- +(define_insn "tablejump_add"
- + [(set (pc)
- + (plus:SI (match_operand:SI 0 "register_operand" "r")
- + (mult:SI (match_operand:SI 1 "register_operand" "r")
- + (match_operand:SI 2 "immediate_operand" "Ku04" ))))
- + (use (label_ref (match_operand 3 "" "")))]
- + "flag_pic &&
- + ((INTVAL(operands[2]) == 0) || (INTVAL(operands[2]) == 2) ||
- + (INTVAL(operands[2]) == 4) || (INTVAL(operands[2]) == 8))"
- + "add\tpc, %0, %1 << %p2"
- + [(set_attr "length" "4")
- + (set_attr "cc" "clobber")])
- +
- +(define_insn "tablejump_insn"
- + [(set (pc) (match_operand:SI 0 "memory_operand" "m"))
- + (use (label_ref (match_operand 1 "" "")))]
- + "!flag_pic"
- + "ld.w\tpc, %0"
- + [(set_attr "length" "4")
- + (set_attr "type" "call")
- + (set_attr "cc" "none")])
- +
- +(define_expand "casesi"
- + [(match_operand:SI 0 "register_operand" "") ; index to jump on
- + (match_operand:SI 1 "const_int_operand" "") ; lower bound
- + (match_operand:SI 2 "const_int_operand" "") ; total range
- + (match_operand:SI 3 "" "") ; table label
- + (match_operand:SI 4 "" "")] ; Out of range label
- + ""
- + "
- + {
- + rtx reg;
- + rtx index = operands[0];
- + rtx low_bound = operands[1];
- + rtx range = operands[2];
- + rtx table_label = operands[3];
- + rtx oor_label = operands[4];
- +
- + index = force_reg ( SImode, index );
- + if (low_bound != const0_rtx)
- + {
- + if (!avr32_const_ok_for_constraint_p(INTVAL (low_bound), 'I', \"Is21\")){
- + reg = force_reg(SImode, GEN_INT (INTVAL (low_bound)));
- + emit_insn (gen_subsi3 (reg, index,
- + reg));
- + } else {
- + reg = gen_reg_rtx (SImode);
- + emit_insn (gen_addsi3 (reg, index,
- + GEN_INT (-INTVAL (low_bound))));
- + }
- + index = reg;
- + }
- +
- + if (!avr32_const_ok_for_constraint_p (INTVAL (range), 'K', \"Ks21\"))
- + range = force_reg (SImode, range);
- +
- + emit_cmp_and_jump_insns ( index, range, GTU, NULL_RTX, SImode, 1, oor_label );
- + reg = gen_reg_rtx (SImode);
- + emit_move_insn ( reg, gen_rtx_LABEL_REF (VOIDmode, table_label));
- +
- + if ( flag_pic )
- + emit_jump_insn ( gen_tablejump_add ( reg, index, GEN_INT(4), table_label));
- + else
- + emit_jump_insn (
- + gen_tablejump_insn ( gen_rtx_MEM ( SImode,
- + gen_rtx_PLUS ( SImode,
- + reg,
- + gen_rtx_MULT ( SImode,
- + index,
- + GEN_INT(4)))),
- + table_label));
- + DONE;
- + }"
- +)
- +
- +
- +
- +(define_insn "prefetch"
- + [(prefetch (match_operand:SI 0 "avr32_ks16_address_operand" "p")
- + (match_operand 1 "const_int_operand" "")
- + (match_operand 2 "const_int_operand" ""))]
- + ""
- + {
- + return "pref\t%0";
- + }
- +
- + [(set_attr "length" "4")
- + (set_attr "type" "load")
- + (set_attr "cc" "none")])
- +
- +
- +
- +;;=============================================================================
- +;; prologue
- +;;-----------------------------------------------------------------------------
- +;; This pattern, if defined, emits RTL for entry to a function. The function
- +;; entry i responsible for setting up the stack frame, initializing the frame
- +;; pointer register, saving callee saved registers, etc.
- +;;=============================================================================
- +(define_expand "prologue"
- + [(clobber (const_int 0))]
- + ""
- + "
- + avr32_expand_prologue();
- + DONE;
- + "
- + )
- +
- +;;=============================================================================
- +;; eh_return
- +;;-----------------------------------------------------------------------------
- +;; This pattern, if defined, affects the way __builtin_eh_return, and
- +;; thence the call frame exception handling library routines, are
- +;; built. It is intended to handle non-trivial actions needed along
- +;; the abnormal return path.
- +;;
- +;; The address of the exception handler to which the function should
- +;; return is passed as operand to this pattern. It will normally need
- +;; to copied by the pattern to some special register or memory
- +;; location. If the pattern needs to determine the location of the
- +;; target call frame in order to do so, it may use
- +;; EH_RETURN_STACKADJ_RTX, if defined; it will have already been
- +;; assigned.
- +;;
- +;; If this pattern is not defined, the default action will be to
- +;; simply copy the return address to EH_RETURN_HANDLER_RTX. Either
- +;; that macro or this pattern needs to be defined if call frame
- +;; exception handling is to be used.
- +
- +;; We can't expand this before we know where the link register is stored.
- +(define_insn_and_split "eh_return"
- + [(unspec_volatile [(match_operand:SI 0 "register_operand" "r")]
- + VUNSPEC_EH_RETURN)
- + (clobber (match_scratch:SI 1 "=&r"))]
- + ""
- + "#"
- + "reload_completed"
- + [(const_int 0)]
- + "
- + {
- + avr32_set_return_address (operands[0], operands[1]);
- + DONE;
- + }"
- + )
- +
- +
- +;;=============================================================================
- +;; ffssi2
- +;;-----------------------------------------------------------------------------
- +(define_insn "ffssi2"
- + [ (set (match_operand:SI 0 "register_operand" "=r")
- + (ffs:SI (match_operand:SI 1 "register_operand" "r"))) ]
- + ""
- + "mov %0, %1
- + brev %0
- + clz %0, %0
- + sub %0, -1
- + cp %0, 33
- + moveq %0, 0"
- + [(set_attr "length" "18")
- + (set_attr "cc" "clobber")]
- + )
- +
- +
- +
- +;;=============================================================================
- +;; swap_h
- +;;-----------------------------------------------------------------------------
- +(define_insn "*swap_h"
- + [ (set (match_operand:SI 0 "register_operand" "=r")
- + (ior:SI (ashift:SI (match_dup 0) (const_int 16))
- + (lshiftrt:SI (match_dup 0) (const_int 16))))]
- + ""
- + "swap.h %0"
- + [(set_attr "length" "2")]
- + )
- +
- +(define_insn_and_split "bswap_16"
- + [ (set (match_operand:HI 0 "avr32_bswap_operand" "=r,RKs13,r")
- + (ior:HI (and:HI (lshiftrt:HI (match_operand:HI 1 "avr32_bswap_operand" "r,r,RKs13")
- + (const_int 8))
- + (const_int 255))
- + (ashift:HI (and:HI (match_dup 1)
- + (const_int 255))
- + (const_int 8))))]
- + ""
- + {
- + switch ( which_alternative ){
- + case 0:
- + if ( REGNO(operands[0]) == REGNO(operands[1]))
- + return "swap.bh\t%0";
- + else
- + return "mov\t%0, %1\;swap.bh\t%0";
- + case 1:
- + return "stswp.h\t%0, %1";
- + case 2:
- + return "ldswp.sh\t%0, %1";
- + default:
- + abort();
- + }
- + }
- +
- + "(reload_completed &&
- + REG_P(operands[0]) && REG_P(operands[1])
- + && (REGNO(operands[0]) != REGNO(operands[1])))"
- + [(set (match_dup 0) (match_dup 1))
- + (set (match_dup 0)
- + (ior:HI (and:HI (lshiftrt:HI (match_dup 0)
- + (const_int 8))
- + (const_int 255))
- + (ashift:HI (and:HI (match_dup 0)
- + (const_int 255))
- + (const_int 8))))]
- + ""
- +
- + [(set_attr "length" "4,4,4")
- + (set_attr "type" "alu,store,load_rm")]
- + )
- +
- +(define_insn_and_split "bswap_32"
- + [ (set (match_operand:SI 0 "avr32_bswap_operand" "=r,RKs14,r")
- + (ior:SI (ior:SI (lshiftrt:SI (and:SI (match_operand:SI 1 "avr32_bswap_operand" "r,r,RKs14")
- + (const_int -16777216))
- + (const_int 24))
- + (lshiftrt:SI (and:SI (match_dup 1)
- + (const_int 16711680))
- + (const_int 8)))
- + (ior:SI (ashift:SI (and:SI (match_dup 1)
- + (const_int 65280))
- + (const_int 8))
- + (ashift:SI (and:SI (match_dup 1)
- + (const_int 255))
- + (const_int 24)))))]
- + ""
- + {
- + switch ( which_alternative ){
- + case 0:
- + if ( REGNO(operands[0]) == REGNO(operands[1]))
- + return "swap.b\t%0";
- + else
- + return "#";
- + case 1:
- + return "stswp.w\t%0, %1";
- + case 2:
- + return "ldswp.w\t%0, %1";
- + default:
- + abort();
- + }
- + }
- + "(reload_completed &&
- + REG_P(operands[0]) && REG_P(operands[1])
- + && (REGNO(operands[0]) != REGNO(operands[1])))"
- + [(set (match_dup 0) (match_dup 1))
- + (set (match_dup 0)
- + (ior:SI (ior:SI (lshiftrt:SI (and:SI (match_dup 0)
- + (const_int -16777216))
- + (const_int 24))
- + (lshiftrt:SI (and:SI (match_dup 0)
- + (const_int 16711680))
- + (const_int 8)))
- + (ior:SI (ashift:SI (and:SI (match_dup 0)
- + (const_int 65280))
- + (const_int 8))
- + (ashift:SI (and:SI (match_dup 0)
- + (const_int 255))
- + (const_int 24)))))]
- + ""
- +
- + [(set_attr "length" "4,4,4")
- + (set_attr "type" "alu,store,load_rm")]
- + )
- +
- +
- +;;=============================================================================
- +;; blockage
- +;;-----------------------------------------------------------------------------
- +;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
- +;; all of memory. This blocks insns from being moved across this point.
- +
- +(define_insn "blockage"
- + [(unspec_volatile [(const_int 0)] VUNSPEC_BLOCKAGE)]
- + ""
- + ""
- + [(set_attr "length" "0")]
- +)
- +
- +;;=============================================================================
- +;; clzsi2
- +;;-----------------------------------------------------------------------------
- +(define_insn "clzsi2"
- + [ (set (match_operand:SI 0 "register_operand" "=r")
- + (clz:SI (match_operand:SI 1 "register_operand" "r"))) ]
- + ""
- + "clz %0, %1"
- + [(set_attr "length" "4")
- + (set_attr "cc" "set_z")]
- + )
- +
- +;;=============================================================================
- +;; ctzsi2
- +;;-----------------------------------------------------------------------------
- +(define_insn "ctzsi2"
- + [ (set (match_operand:SI 0 "register_operand" "=r,r")
- + (ctz:SI (match_operand:SI 1 "register_operand" "0,r"))) ]
- + ""
- + "@
- + brev\t%0\;clz\t%0, %0
- + mov\t%0, %1\;brev\t%0\;clz\t%0, %0"
- + [(set_attr "length" "8")
- + (set_attr "cc" "set_z")]
- + )
- +
- +;;=============================================================================
- +;; cache instructions
- +;;-----------------------------------------------------------------------------
- +(define_insn "cache"
- + [ (unspec_volatile [(match_operand:SI 0 "avr32_ks11_address_operand" "p")
- + (match_operand:SI 1 "immediate_operand" "Ku05")] VUNSPEC_CACHE)]
- + ""
- + "cache %0, %1"
- + [(set_attr "length" "4")]
- + )
- +
- +(define_insn "sync"
- + [ (unspec_volatile [(match_operand:SI 0 "immediate_operand" "Ku08")] VUNSPEC_SYNC)]
- + ""
- + "sync %0"
- + [(set_attr "length" "4")]
- + )
- +
- +;;=============================================================================
- +;; TLB instructions
- +;;-----------------------------------------------------------------------------
- +(define_insn "tlbr"
- + [ (unspec_volatile [(const_int 0)] VUNSPEC_TLBR)]
- + ""
- + "tlbr"
- + [(set_attr "length" "2")]
- + )
- +
- +(define_insn "tlbw"
- + [ (unspec_volatile [(const_int 0)] VUNSPEC_TLBW)]
- + ""
- + "tlbw"
- + [(set_attr "length" "2")]
- + )
- +
- +(define_insn "tlbs"
- + [ (unspec_volatile [(const_int 0)] VUNSPEC_TLBS)]
- + ""
- + "tlbs"
- + [(set_attr "length" "2")]
- + )
- +
- +;;=============================================================================
- +;; Breakpoint instruction
- +;;-----------------------------------------------------------------------------
- +(define_insn "breakpoint"
- + [ (unspec_volatile [(const_int 0)] VUNSPEC_BREAKPOINT)]
- + ""
- + "breakpoint"
- + [(set_attr "length" "2")]
- + )
- +
- +
- +;;=============================================================================
- +;; mtsr/mfsr instruction
- +;;-----------------------------------------------------------------------------
- +(define_insn "mtsr"
- + [ (unspec_volatile [(match_operand 0 "immediate_operand" "i")
- + (match_operand:SI 1 "register_operand" "r")] VUNSPEC_MTSR)]
- + ""
- + "mtsr\t%0, %1"
- + [(set_attr "length" "4")]
- + )
- +
- +(define_insn "mfsr"
- + [ (set (match_operand:SI 0 "register_operand" "=r")
- + (unspec_volatile:SI [(match_operand 1 "immediate_operand" "i")] VUNSPEC_MFSR)) ]
- + ""
- + "mfsr\t%0, %1"
- + [(set_attr "length" "4")]
- + )
- +
- +;;=============================================================================
- +;; mtdr/mfdr instruction
- +;;-----------------------------------------------------------------------------
- +(define_insn "mtdr"
- + [ (unspec_volatile [(match_operand 0 "immediate_operand" "i")
- + (match_operand:SI 1 "register_operand" "r")] VUNSPEC_MTDR)]
- + ""
- + "mtdr\t%0, %1"
- + [(set_attr "length" "4")]
- + )
- +
- +(define_insn "mfdr"
- + [ (set (match_operand:SI 0 "register_operand" "=r")
- + (unspec_volatile:SI [(match_operand 1 "immediate_operand" "i")] VUNSPEC_MFDR)) ]
- + ""
- + "mfdr\t%0, %1"
- + [(set_attr "length" "4")]
- + )
- +
- +;;=============================================================================
- +;; musfr
- +;;-----------------------------------------------------------------------------
- +(define_insn "musfr"
- + [ (unspec_volatile [(match_operand:SI 0 "register_operand" "r")] VUNSPEC_MUSFR)]
- + ""
- + "musfr\t%0"
- + [(set_attr "length" "2")
- + (set_attr "cc" "clobber")]
- + )
- +
- +(define_insn "mustr"
- + [ (set (match_operand:SI 0 "register_operand" "=r")
- + (unspec_volatile:SI [(const_int 0)] VUNSPEC_MUSTR)) ]
- + ""
- + "mustr\t%0"
- + [(set_attr "length" "2")]
- + )
- +
- +(define_insn "ssrf"
- + [ (unspec_volatile [(match_operand:SI 0 "immediate_operand" "Ku05")] VUNSPEC_SSRF)]
- + ""
- + "ssrf %0"
- + [(set_attr "length" "2")
- + (set_attr "cc" "clobber")]
- + )
- +
- +(define_insn "csrf"
- + [ (unspec_volatile [(match_operand:SI 0 "immediate_operand" "Ku05")] VUNSPEC_CSRF)]
- + ""
- + "csrf %0"
- + [(set_attr "length" "2")
- + (set_attr "cc" "clobber")]
- + )
- +
- +;;=============================================================================
- +;; Flush Return Stack instruction
- +;;-----------------------------------------------------------------------------
- +(define_insn "frs"
- + [ (unspec_volatile [(const_int 0)] VUNSPEC_FRS)]
- + ""
- + "frs"
- + [(set_attr "length" "2")
- + (set_attr "cc" "none")]
- + )
- +
- +
- +;;=============================================================================
- +;; Saturation Round Scale instruction
- +;;-----------------------------------------------------------------------------
- +(define_insn "sats"
- + [ (set (match_operand:SI 0 "register_operand" "+r")
- + (unspec:SI [(match_dup 0)
- + (match_operand 1 "immediate_operand" "Ku05")
- + (match_operand 2 "immediate_operand" "Ku05")]
- + UNSPEC_SATS)) ]
- + "TARGET_DSP"
- + "sats\t%0 >> %1, %2"
- + [(set_attr "type" "alu_sat")
- + (set_attr "length" "4")]
- + )
- +
- +(define_insn "satu"
- + [ (set (match_operand:SI 0 "register_operand" "+r")
- + (unspec:SI [(match_dup 0)
- + (match_operand 1 "immediate_operand" "Ku05")
- + (match_operand 2 "immediate_operand" "Ku05")]
- + UNSPEC_SATU)) ]
- + "TARGET_DSP"
- + "satu\t%0 >> %1, %2"
- + [(set_attr "type" "alu_sat")
- + (set_attr "length" "4")]
- + )
- +
- +(define_insn "satrnds"
- + [ (set (match_operand:SI 0 "register_operand" "+r")
- + (unspec:SI [(match_dup 0)
- + (match_operand 1 "immediate_operand" "Ku05")
- + (match_operand 2 "immediate_operand" "Ku05")]
- + UNSPEC_SATRNDS)) ]
- + "TARGET_DSP"
- + "satrnds\t%0 >> %1, %2"
- + [(set_attr "type" "alu_sat")
- + (set_attr "length" "4")]
- + )
- +
- +(define_insn "satrndu"
- + [ (set (match_operand:SI 0 "register_operand" "+r")
- + (unspec:SI [(match_dup 0)
- + (match_operand 1 "immediate_operand" "Ku05")
- + (match_operand 2 "immediate_operand" "Ku05")]
- + UNSPEC_SATRNDU)) ]
- + "TARGET_DSP"
- + "sats\t%0 >> %1, %2"
- + [(set_attr "type" "alu_sat")
- + (set_attr "length" "4")]
- + )
- +
- +(define_insn "sleep"
- + [(unspec_volatile [(const_int 0)] VUNSPEC_SLEEP)
- + (match_operand:SI 0 "const_int_operand" "")]
- + ""
- + "sleep %0"
- + [(set_attr "length" "1")
- + (set_attr "cc" "none")
- + ])
- +
- +(define_expand "delay_cycles"
- + [(unspec_volatile [(match_operand:SI 0 "const_int_operand" "i")]
- + VUNSPEC_DELAY_CYCLES)]
- + ""
- + "
- + unsigned int cycles = UINTVAL (operands[0]);
- + if (IN_RANGE(cycles,0x10000 ,0xFFFFFFFF))
- + {
- + unsigned int msb = (cycles & 0xFFFF0000);
- + unsigned int shift = 16;
- + msb = (msb >> shift);
- + unsigned int cycles_used = (msb*0x10000);
- + emit_insn (gen_delay_cycles_2 (gen_int_mode (msb, SImode)));
- + cycles -= cycles_used;
- + }
- + if (IN_RANGE(cycles, 4, 0xFFFF))
- + {
- + unsigned int loop_count = (cycles/ 4);
- + unsigned int cycles_used = (loop_count*4);
- + emit_insn (gen_delay_cycles_1 (gen_int_mode (loop_count, SImode)));
- + cycles -= cycles_used;
- + }
- + while (cycles >= 3)
- + {
- + emit_insn (gen_nop3 ());
- + cycles -= 3;
- + }
- + if (cycles == 1 || cycles == 2)
- + {
- + while (cycles--)
- + emit_insn (gen_nop ());
- + }
- + DONE;
- + ")
- +
- +(define_insn "delay_cycles_1"
- +[(unspec_volatile [(const_int 0)] VUNSPEC_DELAY_CYCLES_1)
- + (match_operand:SI 0 "immediate_operand" "")
- + (clobber (match_scratch:SI 1 "=&r"))]
- + ""
- + "mov\t%1, %0
- + 1: sub\t%1, 1
- + brne\t1b
- + nop"
- +)
- +
- +(define_insn "delay_cycles_2"
- +[(unspec_volatile [(const_int 0)] VUNSPEC_DELAY_CYCLES_2)
- + (match_operand:SI 0 "immediate_operand" "")
- + (clobber (match_scratch:SI 1 "=&r"))
- + (clobber (match_scratch:SI 2 "=&r"))]
- + ""
- + "mov\t%1, %0
- + 1: mov\t%2, 16383
- + 2: sub\t%2, 1
- + brne\t2b
- + nop
- + sub\t%1, 1
- + brne\t1b
- + nop"
- +)
- +
- +;; CPU instructions
- +
- +;;=============================================================================
- +;; nop
- +;;-----------------------------------------------------------------------------
- +;; No-op instruction.
- +;;=============================================================================
- +(define_insn "nop"
- + [(unspec_volatile [(const_int 0)] VUNSPEC_NOP)]
- + ""
- + "nop"
- + [(set_attr "length" "1")
- + (set_attr "type" "alu")
- + (set_attr "cc" "none")])
- +
- +;; NOP3
- +(define_insn "nop3"
- + [(unspec_volatile [(const_int 0)] VUNSPEC_NOP3)]
- + ""
- + "rjmp\t2"
- + [(set_attr "length" "3")
- + (set_attr "type" "alu")
- + (set_attr "cc" "none")])
- +
- +;; Special patterns for dealing with the constant pool
- +
- +(define_insn "align_4"
- + [(unspec_volatile [(const_int 0)] VUNSPEC_ALIGN)]
- + ""
- + {
- + assemble_align (32);
- + return "";
- + }
- + [(set_attr "length" "2")]
- +)
- +
- +
- +(define_insn "consttable_start"
- + [(unspec_volatile [(const_int 0)] VUNSPEC_POOL_START)]
- + ""
- + {
- + return ".cpool";
- + }
- + [(set_attr "length" "0")]
- + )
- +
- +(define_insn "consttable_end"
- + [(unspec_volatile [(const_int 0)] VUNSPEC_POOL_END)]
- + ""
- + {
- + making_const_table = FALSE;
- + return "";
- + }
- + [(set_attr "length" "0")]
- +)
- +
- +
- +(define_insn "consttable_4"
- + [(unspec_volatile [(match_operand 0 "" "")] VUNSPEC_POOL_4)]
- + ""
- + {
- + making_const_table = TRUE;
- + switch (GET_MODE_CLASS (GET_MODE (operands[0])))
- + {
- + case MODE_FLOAT:
- + {
- + REAL_VALUE_TYPE r;
- + char real_string[1024];
- + REAL_VALUE_FROM_CONST_DOUBLE (r, operands[0]);
- + real_to_decimal(real_string, &r, 1024, 0, 1);
- + asm_fprintf (asm_out_file, "\t.float\t%s\n", real_string);
- + break;
- + }
- + default:
- + assemble_integer (operands[0], 4, 0, 1);
- + break;
- + }
- + return "";
- + }
- + [(set_attr "length" "4")]
- +)
- +
- +(define_insn "consttable_8"
- + [(unspec_volatile [(match_operand 0 "" "")] VUNSPEC_POOL_8)]
- + ""
- + {
- + making_const_table = TRUE;
- + switch (GET_MODE_CLASS (GET_MODE (operands[0])))
- + {
- + case MODE_FLOAT:
- + {
- + REAL_VALUE_TYPE r;
- + char real_string[1024];
- + REAL_VALUE_FROM_CONST_DOUBLE (r, operands[0]);
- + real_to_decimal(real_string, &r, 1024, 0, 1);
- + asm_fprintf (asm_out_file, "\t.double\t%s\n", real_string);
- + break;
- + }
- + default:
- + assemble_integer(operands[0], 8, 0, 1);
- + break;
- + }
- + return "";
- + }
- + [(set_attr "length" "8")]
- +)
- +
- +(define_insn "consttable_16"
- + [(unspec_volatile [(match_operand 0 "" "")] VUNSPEC_POOL_16)]
- + ""
- + {
- + making_const_table = TRUE;
- + assemble_integer(operands[0], 16, 0, 1);
- + return "";
- + }
- + [(set_attr "length" "16")]
- +)
- +
- +;;=============================================================================
- +;; coprocessor instructions
- +;;-----------------------------------------------------------------------------
- +(define_insn "cop"
- + [ (unspec_volatile [(match_operand 0 "immediate_operand" "Ku03")
- + (match_operand 1 "immediate_operand" "Ku04")
- + (match_operand 2 "immediate_operand" "Ku04")
- + (match_operand 3 "immediate_operand" "Ku04")
- + (match_operand 4 "immediate_operand" "Ku07")] VUNSPEC_COP)]
- + ""
- + "cop\tcp%0, cr%1, cr%2, cr%3, %4"
- + [(set_attr "length" "4")]
- + )
- +
- +(define_insn "mvcrsi"
- + [ (set (match_operand:SI 0 "avr32_cop_move_operand" "=r,<,Z")
- + (unspec_volatile:SI [(match_operand 1 "immediate_operand" "Ku03,Ku03,Ku03")
- + (match_operand 2 "immediate_operand" "Ku04,Ku04,Ku04")]
- + VUNSPEC_MVCR)) ]
- + ""
- + "@
- + mvcr.w\tcp%1, %0, cr%2
- + stcm.w\tcp%1, %0, cr%2
- + stc.w\tcp%1, %0, cr%2"
- + [(set_attr "length" "4")]
- + )
- +
- +(define_insn "mvcrdi"
- + [ (set (match_operand:DI 0 "avr32_cop_move_operand" "=r,<,Z")
- + (unspec_volatile:DI [(match_operand 1 "immediate_operand" "Ku03,Ku03,Ku03")
- + (match_operand 2 "immediate_operand" "Ku04,Ku04,Ku04")]
- + VUNSPEC_MVCR)) ]
- + ""
- + "@
- + mvcr.d\tcp%1, %0, cr%2
- + stcm.d\tcp%1, %0, cr%2-cr%i2
- + stc.d\tcp%1, %0, cr%2"
- + [(set_attr "length" "4")]
- + )
- +
- +(define_insn "mvrcsi"
- + [ (unspec_volatile:SI [(match_operand 0 "immediate_operand" "Ku03,Ku03,Ku03")
- + (match_operand 1 "immediate_operand" "Ku04,Ku04,Ku04")
- + (match_operand:SI 2 "avr32_cop_move_operand" "r,>,Z")]
- + VUNSPEC_MVRC)]
- + ""
- + {
- + switch (which_alternative){
- + case 0:
- + return "mvrc.w\tcp%0, cr%1, %2";
- + case 1:
- + return "ldcm.w\tcp%0, %2, cr%1";
- + case 2:
- + return "ldc.w\tcp%0, cr%1, %2";
- + default:
- + abort();
- + }
- + }
- + [(set_attr "length" "4")]
- + )
- +
- +(define_insn "mvrcdi"
- + [ (unspec_volatile:DI [(match_operand 0 "immediate_operand" "Ku03,Ku03,Ku03")
- + (match_operand 1 "immediate_operand" "Ku04,Ku04,Ku04")
- + (match_operand:DI 2 "avr32_cop_move_operand" "r,>,Z")]
- + VUNSPEC_MVRC)]
- + ""
- + {
- + switch (which_alternative){
- + case 0:
- + return "mvrc.d\tcp%0, cr%1, %2";
- + case 1:
- + return "ldcm.d\tcp%0, %2, cr%1-cr%i1";
- + case 2:
- + return "ldc.d\tcp%0, cr%1, %2";
- + default:
- + abort();
- + }
- + }
- + [(set_attr "length" "4")]
- + )
- +
- +;;=============================================================================
- +;; epilogue
- +;;-----------------------------------------------------------------------------
- +;; This pattern emits RTL for exit from a function. The function exit is
- +;; responsible for deallocating the stack frame, restoring callee saved
- +;; registers and emitting the return instruction.
- +;; ToDo: using TARGET_ASM_FUNCTION_PROLOGUE instead.
- +;;=============================================================================
- +(define_expand "epilogue"
- + [(unspec_volatile [(return)] VUNSPEC_EPILOGUE)]
- + ""
- + "
- + if (USE_RETURN_INSN (FALSE)){
- + emit_jump_insn (gen_return ());
- + DONE;
- + }
- + emit_jump_insn (gen_rtx_UNSPEC_VOLATILE (VOIDmode,
- + gen_rtvec (1,
- + gen_rtx_RETURN (VOIDmode)),
- + VUNSPEC_EPILOGUE));
- + DONE;
- + "
- + )
- +
- +(define_insn "*epilogue_insns"
- + [(unspec_volatile [(return)] VUNSPEC_EPILOGUE)]
- + ""
- + {
- + avr32_output_return_instruction (FALSE, FALSE, NULL, NULL);
- + return "";
- + }
- + ; Length is absolute worst case
- + [(set_attr "type" "branch")
- + (set_attr "length" "12")]
- + )
- +
- +(define_insn "*epilogue_insns_ret_imm"
- + [(parallel [(set (reg RETVAL_REGNUM) (match_operand 0 "immediate_operand" "i"))
- + (use (reg RETVAL_REGNUM))
- + (unspec_volatile [(return)] VUNSPEC_EPILOGUE)])]
- + "((INTVAL(operands[0]) == -1) || (INTVAL(operands[0]) == 0) || (INTVAL(operands[0]) == 1))"
- + {
- + avr32_output_return_instruction (FALSE, FALSE, NULL, operands[0]);
- + return "";
- + }
- + ; Length is absolute worst case
- + [(set_attr "type" "branch")
- + (set_attr "length" "12")]
- + )
- +
- +(define_insn "sibcall_epilogue"
- + [(unspec_volatile [(const_int 0)] VUNSPEC_EPILOGUE)]
- + ""
- + {
- + avr32_output_return_instruction (FALSE, FALSE, NULL, NULL);
- + return "";
- + }
- +;; Length is absolute worst case
- + [(set_attr "type" "branch")
- + (set_attr "length" "12")]
- + )
- +
- +(define_insn "*sibcall_epilogue_insns_ret_imm"
- + [(parallel [(set (reg RETVAL_REGNUM) (match_operand 0 "immediate_operand" "i"))
- + (use (reg RETVAL_REGNUM))
- + (unspec_volatile [(const_int 0)] VUNSPEC_EPILOGUE)])]
- + "((INTVAL(operands[0]) == -1) || (INTVAL(operands[0]) == 0) || (INTVAL(operands[0]) == 1))"
- + {
- + avr32_output_return_instruction (FALSE, FALSE, NULL, operands[0]);
- + return "";
- + }
- + ; Length is absolute worst case
- + [(set_attr "type" "branch")
- + (set_attr "length" "12")]
- + )
- +
- +(define_insn "ldxi"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (mem:SI (plus:SI
- + (match_operand:SI 1 "register_operand" "r")
- + (mult:SI (zero_extract:SI (match_operand:SI 2 "register_operand" "r")
- + (const_int 8)
- + (match_operand:SI 3 "immediate_operand" "Ku05"))
- + (const_int 4)))))]
- + "(INTVAL(operands[3]) == 24 || INTVAL(operands[3]) == 16 || INTVAL(operands[3]) == 8
- + || INTVAL(operands[3]) == 0)"
- + {
- + switch ( INTVAL(operands[3]) ){
- + case 0:
- + return "ld.w %0, %1[%2:b << 2]";
- + case 8:
- + return "ld.w %0, %1[%2:l << 2]";
- + case 16:
- + return "ld.w %0, %1[%2:u << 2]";
- + case 24:
- + return "ld.w %0, %1[%2:t << 2]";
- + default:
- + internal_error("illegal operand for ldxi");
- + }
- + }
- + [(set_attr "type" "load")
- + (set_attr "length" "4")
- + (set_attr "cc" "none")])
- +
- +
- +
- +
- +
- +
- +;;=============================================================================
- +;; Peephole optimizing
- +;;-----------------------------------------------------------------------------
- +;; Changing
- +;; sub r8, r7, 8
- +;; st.w r8[0x0], r12
- +;; to
- +;; sub r8, r7, 8
- +;; st.w r7[-0x8], r12
- +;;=============================================================================
- +; (set (reg:SI 9 r8)
- +; (plus:SI (reg/f:SI 6 r7)
- +; (const_int ...)))
- +; (set (mem:SI (reg:SI 9 r8))
- +; (reg:SI 12 r12))
- +(define_peephole2
- + [(set (match_operand:SI 0 "register_operand" "")
- + (plus:SI (match_operand:SI 1 "register_operand" "")
- + (match_operand:SI 2 "immediate_operand" "")))
- + (set (mem:SI (match_dup 0))
- + (match_operand:SI 3 "register_operand" ""))]
- + "REGNO(operands[0]) != REGNO(operands[1]) && avr32_const_ok_for_constraint_p(INTVAL(operands[2]), 'K', \"Ks16\")"
- + [(set (match_dup 0)
- + (plus:SI (match_dup 1)
- + (match_dup 2)))
- + (set (mem:SI (plus:SI (match_dup 1)
- + (match_dup 2)))
- + (match_dup 3))]
- + "")
- +
- +;;=============================================================================
- +;; Peephole optimizing
- +;;-----------------------------------------------------------------------------
- +;; Changing
- +;; sub r6, r7, 4
- +;; ld.w r6, r6[0x0]
- +;; to
- +;; sub r6, r7, 4
- +;; ld.w r6, r7[-0x4]
- +;;=============================================================================
- +; (set (reg:SI 7 r6)
- +; (plus:SI (reg/f:SI 6 r7)
- +; (const_int -4 [0xfffffffc])))
- +; (set (reg:SI 7 r6)
- +; (mem:SI (reg:SI 7 r6)))
- +(define_peephole2
- + [(set (match_operand:SI 0 "register_operand" "")
- + (plus:SI (match_operand:SI 1 "register_operand" "")
- + (match_operand:SI 2 "immediate_operand" "")))
- + (set (match_operand:SI 3 "register_operand" "")
- + (mem:SI (match_dup 0)))]
- + "REGNO(operands[0]) != REGNO(operands[1]) && avr32_const_ok_for_constraint_p(INTVAL(operands[2]), 'K', \"Ks16\")"
- + [(set (match_dup 0)
- + (plus:SI (match_dup 1)
- + (match_dup 2)))
- + (set (match_dup 3)
- + (mem:SI (plus:SI (match_dup 1)
- + (match_dup 2))))]
- + "")
- +
- +;;=============================================================================
- +;; Peephole optimizing
- +;;-----------------------------------------------------------------------------
- +;; Changing
- +;; ld.sb r0, r7[-0x6]
- +;; cashs.b r0
- +;; to
- +;; ld.sb r0, r7[-0x6]
- +;;=============================================================================
- +(define_peephole2
- + [(set (match_operand:QI 0 "register_operand" "")
- + (match_operand:QI 1 "load_sb_memory_operand" ""))
- + (set (match_operand:SI 2 "register_operand" "")
- + (sign_extend:SI (match_dup 0)))]
- + "(REGNO(operands[0]) == REGNO(operands[2]) || peep2_reg_dead_p(2, operands[0]))"
- + [(set (match_dup 2)
- + (sign_extend:SI (match_dup 1)))]
- + "")
- +
- +;;=============================================================================
- +;; Peephole optimizing
- +;;-----------------------------------------------------------------------------
- +;; Changing
- +;; ld.ub r0, r7[-0x6]
- +;; cashu.b r0
- +;; to
- +;; ld.ub r0, r7[-0x6]
- +;;=============================================================================
- +(define_peephole2
- + [(set (match_operand:QI 0 "register_operand" "")
- + (match_operand:QI 1 "memory_operand" ""))
- + (set (match_operand:SI 2 "register_operand" "")
- + (zero_extend:SI (match_dup 0)))]
- + "(REGNO(operands[0]) == REGNO(operands[2])) || peep2_reg_dead_p(2, operands[0])"
- + [(set (match_dup 2)
- + (zero_extend:SI (match_dup 1)))]
- + "")
- +
- +;;=============================================================================
- +;; Peephole optimizing
- +;;-----------------------------------------------------------------------------
- +;; Changing
- +;; ld.sh r0, r7[-0x6]
- +;; casts.h r0
- +;; to
- +;; ld.sh r0, r7[-0x6]
- +;;=============================================================================
- +(define_peephole2
- + [(set (match_operand:HI 0 "register_operand" "")
- + (match_operand:HI 1 "memory_operand" ""))
- + (set (match_operand:SI 2 "register_operand" "")
- + (sign_extend:SI (match_dup 0)))]
- + "(REGNO(operands[0]) == REGNO(operands[2])) || peep2_reg_dead_p(2, operands[0])"
- + [(set (match_dup 2)
- + (sign_extend:SI (match_dup 1)))]
- + "")
- +
- +;;=============================================================================
- +;; Peephole optimizing
- +;;-----------------------------------------------------------------------------
- +;; Changing
- +;; ld.uh r0, r7[-0x6]
- +;; castu.h r0
- +;; to
- +;; ld.uh r0, r7[-0x6]
- +;;=============================================================================
- +(define_peephole2
- + [(set (match_operand:HI 0 "register_operand" "")
- + (match_operand:HI 1 "memory_operand" ""))
- + (set (match_operand:SI 2 "register_operand" "")
- + (zero_extend:SI (match_dup 0)))]
- + "(REGNO(operands[0]) == REGNO(operands[2])) || peep2_reg_dead_p(2, operands[0])"
- + [(set (match_dup 2)
- + (zero_extend:SI (match_dup 1)))]
- + "")
- +
- +;;=============================================================================
- +;; Peephole optimizing
- +;;-----------------------------------------------------------------------------
- +;; Changing
- +;; mul rd, rx, ry
- +;; add rd2, rd
- +;; or
- +;; add rd2, rd, rd2
- +;; to
- +;; mac rd2, rx, ry
- +;;=============================================================================
- +(define_peephole2
- + [(set (match_operand:SI 0 "register_operand" "")
- + (mult:SI (match_operand:SI 1 "register_operand" "")
- + (match_operand:SI 2 "register_operand" "")))
- + (set (match_operand:SI 3 "register_operand" "")
- + (plus:SI (match_dup 3)
- + (match_dup 0)))]
- + "peep2_reg_dead_p(2, operands[0])"
- + [(set (match_dup 3)
- + (plus:SI (mult:SI (match_dup 1)
- + (match_dup 2))
- + (match_dup 3)))]
- + "")
- +
- +(define_peephole2
- + [(set (match_operand:SI 0 "register_operand" "")
- + (mult:SI (match_operand:SI 1 "register_operand" "")
- + (match_operand:SI 2 "register_operand" "")))
- + (set (match_operand:SI 3 "register_operand" "")
- + (plus:SI (match_dup 0)
- + (match_dup 3)))]
- + "peep2_reg_dead_p(2, operands[0])"
- + [(set (match_dup 3)
- + (plus:SI (mult:SI (match_dup 1)
- + (match_dup 2))
- + (match_dup 3)))]
- + "")
- +
- +
- +;;=============================================================================
- +;; Peephole optimizing
- +;;-----------------------------------------------------------------------------
- +;; Changing
- +;; bfextu rd, rs, k5, 1 or and(h/l) rd, one_bit_set_mask
- +;; to
- +;; bld rs, k5
- +;;
- +;; If rd is dead after the operation.
- +;;=============================================================================
- +(define_peephole2
- + [ (set (match_operand:SI 0 "register_operand" "")
- + (zero_extract:SI (match_operand:SI 1 "register_operand" "")
- + (const_int 1)
- + (match_operand:SI 2 "immediate_operand" "")))
- + (set (cc0)
- + (match_dup 0))]
- + "peep2_reg_dead_p(2, operands[0])"
- + [(set (cc0)
- + (and:SI (match_dup 1)
- + (match_dup 2)))]
- + "operands[2] = GEN_INT(1 << INTVAL(operands[2]));")
- +
- +(define_peephole2
- + [ (set (match_operand:SI 0 "register_operand" "")
- + (and:SI (match_operand:SI 1 "register_operand" "")
- + (match_operand:SI 2 "one_bit_set_operand" "")))
- + (set (cc0)
- + (match_dup 0))]
- + "peep2_reg_dead_p(2, operands[0])"
- + [(set (cc0)
- + (and:SI (match_dup 1)
- + (match_dup 2)))]
- + "")
- +
- +;;=============================================================================
- +;; Peephole optimizing
- +;;-----------------------------------------------------------------------------
- +;; Load with extracted index: ld.w Rd, Rb[Ri:{t/u/b/l} << 2]
- +;;
- +;;=============================================================================
- +
- +
- +(define_peephole
- + [(set (match_operand:SI 0 "register_operand" "")
- + (zero_extract:SI (match_operand:SI 1 "register_operand" "")
- + (const_int 8)
- + (match_operand:SI 2 "avr32_extract_shift_operand" "")))
- + (set (match_operand:SI 3 "register_operand" "")
- + (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
- + (match_operand:SI 4 "register_operand" ""))))]
- +
- + "(dead_or_set_p(insn, operands[0]))"
- + {
- + switch ( INTVAL(operands[2]) ){
- + case 0:
- + return "ld.w %3, %4[%1:b << 2]";
- + case 8:
- + return "ld.w %3, %4[%1:l << 2]";
- + case 16:
- + return "ld.w %3, %4[%1:u << 2]";
- + case 24:
- + return "ld.w %3, %4[%1:t << 2]";
- + default:
- + internal_error("illegal operand for ldxi");
- + }
- + }
- + [(set_attr "type" "load")
- + (set_attr "length" "4")
- + (set_attr "cc" "clobber")]
- + )
- +
- +
- +
- +(define_peephole
- + [(set (match_operand:SI 0 "register_operand" "")
- + (and:SI (match_operand:SI 1 "register_operand" "") (const_int 255)))
- + (set (match_operand:SI 2 "register_operand" "")
- + (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
- + (match_operand:SI 3 "register_operand" ""))))]
- +
- + "(dead_or_set_p(insn, operands[0]))"
- +
- + "ld.w %2, %3[%1:b << 2]"
- + [(set_attr "type" "load")
- + (set_attr "length" "4")
- + (set_attr "cc" "clobber")]
- + )
- +
- +
- +(define_peephole2
- + [(set (match_operand:SI 0 "register_operand" "")
- + (zero_extract:SI (match_operand:SI 1 "register_operand" "")
- + (const_int 8)
- + (match_operand:SI 2 "avr32_extract_shift_operand" "")))
- + (set (match_operand:SI 3 "register_operand" "")
- + (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
- + (match_operand:SI 4 "register_operand" ""))))]
- +
- + "(peep2_reg_dead_p(2, operands[0]))
- + || (REGNO(operands[0]) == REGNO(operands[3]))"
- + [(set (match_dup 3)
- + (mem:SI (plus:SI
- + (match_dup 4)
- + (mult:SI (zero_extract:SI (match_dup 1)
- + (const_int 8)
- + (match_dup 2))
- + (const_int 4)))))]
- + )
- +
- +(define_peephole2
- + [(set (match_operand:SI 0 "register_operand" "")
- + (zero_extend:SI (match_operand:QI 1 "register_operand" "")))
- + (set (match_operand:SI 2 "register_operand" "")
- + (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
- + (match_operand:SI 3 "register_operand" ""))))]
- +
- + "(peep2_reg_dead_p(2, operands[0]))
- + || (REGNO(operands[0]) == REGNO(operands[2]))"
- + [(set (match_dup 2)
- + (mem:SI (plus:SI
- + (match_dup 3)
- + (mult:SI (zero_extract:SI (match_dup 1)
- + (const_int 8)
- + (const_int 0))
- + (const_int 4)))))]
- + "operands[1] = gen_rtx_REG(SImode, REGNO(operands[1]));"
- + )
- +
- +
- +(define_peephole2
- + [(set (match_operand:SI 0 "register_operand" "")
- + (and:SI (match_operand:SI 1 "register_operand" "")
- + (const_int 255)))
- + (set (match_operand:SI 2 "register_operand" "")
- + (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
- + (match_operand:SI 3 "register_operand" ""))))]
- +
- + "(peep2_reg_dead_p(2, operands[0]))
- + || (REGNO(operands[0]) == REGNO(operands[2]))"
- + [(set (match_dup 2)
- + (mem:SI (plus:SI
- + (match_dup 3)
- + (mult:SI (zero_extract:SI (match_dup 1)
- + (const_int 8)
- + (const_int 0))
- + (const_int 4)))))]
- + ""
- + )
- +
- +
- +
- +(define_peephole2
- + [(set (match_operand:SI 0 "register_operand" "")
- + (lshiftrt:SI (match_operand:SI 1 "register_operand" "")
- + (const_int 24)))
- + (set (match_operand:SI 2 "register_operand" "")
- + (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
- + (match_operand:SI 3 "register_operand" ""))))]
- +
- + "(peep2_reg_dead_p(2, operands[0]))
- + || (REGNO(operands[0]) == REGNO(operands[2]))"
- + [(set (match_dup 2)
- + (mem:SI (plus:SI
- + (match_dup 3)
- + (mult:SI (zero_extract:SI (match_dup 1)
- + (const_int 8)
- + (const_int 24))
- + (const_int 4)))))]
- + ""
- + )
- +
- +
- +;;************************************************
- +;; ANDN
- +;;
- +;;************************************************
- +
- +
- +(define_peephole2
- + [(set (match_operand:SI 0 "register_operand" "")
- + (not:SI (match_operand:SI 1 "register_operand" "")))
- + (set (match_operand:SI 2 "register_operand" "")
- + (and:SI (match_dup 2)
- + (match_dup 0)))]
- + "peep2_reg_dead_p(2, operands[0])"
- +
- + [(set (match_dup 2)
- + (and:SI (match_dup 2)
- + (not:SI (match_dup 1))
- + ))]
- + ""
- +)
- +
- +(define_peephole2
- + [(set (match_operand:SI 0 "register_operand" "")
- + (not:SI (match_operand:SI 1 "register_operand" "")))
- + (set (match_operand:SI 2 "register_operand" "")
- + (and:SI (match_dup 0)
- + (match_dup 2)
- + ))]
- + "peep2_reg_dead_p(2, operands[0])"
- +
- + [(set (match_dup 2)
- + (and:SI (match_dup 2)
- + (not:SI (match_dup 1))
- + ))]
- +
- + ""
- +)
- +
- +
- +;;=================================================================
- +;; Addabs peephole
- +;;=================================================================
- +
- +(define_peephole
- + [(set (match_operand:SI 2 "register_operand" "=r")
- + (abs:SI (match_operand:SI 1 "register_operand" "r")))
- + (set (match_operand:SI 0 "register_operand" "=r")
- + (plus:SI (match_operand:SI 3 "register_operand" "r")
- + (match_dup 2)))]
- + "dead_or_set_p(insn, operands[2])"
- + "addabs %0, %3, %1"
- + [(set_attr "length" "4")
- + (set_attr "cc" "set_z")])
- +
- +(define_peephole
- + [(set (match_operand:SI 2 "register_operand" "=r")
- + (abs:SI (match_operand:SI 1 "register_operand" "r")))
- + (set (match_operand:SI 0 "register_operand" "=r")
- + (plus:SI (match_dup 2)
- + (match_operand:SI 3 "register_operand" "r")))]
- + "dead_or_set_p(insn, operands[2])"
- + "addabs %0, %3, %1"
- + [(set_attr "length" "4")
- + (set_attr "cc" "set_z")])
- +
- +
- +;;=================================================================
- +;; Detect roundings
- +;;=================================================================
- +
- +(define_insn "*round"
- + [(set (match_operand:SI 0 "register_operand" "+r")
- + (ashiftrt:SI (plus:SI (match_dup 0)
- + (match_operand:SI 1 "immediate_operand" "i"))
- + (match_operand:SI 2 "immediate_operand" "i")))]
- + "avr32_rnd_operands(operands[1], operands[2])"
- +
- + "satrnds %0 >> %2, 31"
- +
- + [(set_attr "type" "alu_sat")
- + (set_attr "length" "4")]
- +
- + )
- +
- +
- +(define_peephole2
- + [(set (match_operand:SI 0 "register_operand" "")
- + (plus:SI (match_dup 0)
- + (match_operand:SI 1 "immediate_operand" "")))
- + (set (match_dup 0)
- + (ashiftrt:SI (match_dup 0)
- + (match_operand:SI 2 "immediate_operand" "")))]
- + "avr32_rnd_operands(operands[1], operands[2])"
- +
- + [(set (match_dup 0)
- + (ashiftrt:SI (plus:SI (match_dup 0)
- + (match_dup 1))
- + (match_dup 2)))]
- + )
- +
- +(define_peephole
- + [(set (match_operand:SI 0 "register_operand" "r")
- + (plus:SI (match_dup 0)
- + (match_operand:SI 1 "immediate_operand" "i")))
- + (set (match_dup 0)
- + (ashiftrt:SI (match_dup 0)
- + (match_operand:SI 2 "immediate_operand" "i")))]
- + "avr32_rnd_operands(operands[1], operands[2])"
- +
- + "satrnds %0 >> %2, 31"
- +
- + [(set_attr "type" "alu_sat")
- + (set_attr "length" "4")
- + (set_attr "cc" "clobber")]
- +
- + )
- +
- +
- +;;=================================================================
- +;; mcall
- +;;=================================================================
- +(define_peephole
- + [(set (match_operand:SI 0 "register_operand" "")
- + (match_operand 1 "avr32_const_pool_ref_operand" ""))
- + (parallel [(call (mem:SI (match_dup 0))
- + (match_operand 2 "" ""))
- + (clobber (reg:SI LR_REGNUM))])]
- + "dead_or_set_p(insn, operands[0])"
- + "mcall %1"
- + [(set_attr "type" "call")
- + (set_attr "length" "4")
- + (set_attr "cc" "clobber")]
- +)
- +
- +(define_peephole
- + [(set (match_operand:SI 2 "register_operand" "")
- + (match_operand 1 "avr32_const_pool_ref_operand" ""))
- + (parallel [(set (match_operand 0 "register_operand" "")
- + (call (mem:SI (match_dup 2))
- + (match_operand 3 "" "")))
- + (clobber (reg:SI LR_REGNUM))])]
- + "dead_or_set_p(insn, operands[2])"
- + "mcall %1"
- + [(set_attr "type" "call")
- + (set_attr "length" "4")
- + (set_attr "cc" "call_set")]
- +)
- +
- +
- +(define_peephole2
- + [(set (match_operand:SI 0 "register_operand" "")
- + (match_operand 1 "avr32_const_pool_ref_operand" ""))
- + (parallel [(call (mem:SI (match_dup 0))
- + (match_operand 2 "" ""))
- + (clobber (reg:SI LR_REGNUM))])]
- + "peep2_reg_dead_p(2, operands[0])"
- + [(parallel [(call (mem:SI (match_dup 1))
- + (match_dup 2))
- + (clobber (reg:SI LR_REGNUM))])]
- + ""
- +)
- +
- +(define_peephole2
- + [(set (match_operand:SI 0 "register_operand" "")
- + (match_operand 1 "avr32_const_pool_ref_operand" ""))
- + (parallel [(set (match_operand 2 "register_operand" "")
- + (call (mem:SI (match_dup 0))
- + (match_operand 3 "" "")))
- + (clobber (reg:SI LR_REGNUM))])]
- + "(peep2_reg_dead_p(2, operands[0]) || (REGNO(operands[2]) == REGNO(operands[0])))"
- + [(parallel [(set (match_dup 2)
- + (call (mem:SI (match_dup 1))
- + (match_dup 3)))
- + (clobber (reg:SI LR_REGNUM))])]
- + ""
- +)
- +
- +;;=================================================================
- +;; Returning a value
- +;;=================================================================
- +
- +
- +(define_peephole
- + [(set (match_operand 0 "register_operand" "")
- + (match_operand 1 "register_operand" ""))
- + (return)]
- + "USE_RETURN_INSN (TRUE) && (REGNO(operands[0]) == RETVAL_REGNUM)
- + && (REGNO(operands[1]) != LR_REGNUM)
- + && (REGNO_REG_CLASS(REGNO(operands[1])) == GENERAL_REGS)"
- + "retal %1"
- + [(set_attr "type" "call")
- + (set_attr "length" "2")]
- + )
- +
- +
- +(define_peephole
- + [(set (match_operand 0 "register_operand" "r")
- + (match_operand 1 "immediate_operand" "i"))
- + (return)]
- + "(USE_RETURN_INSN (FALSE) && (REGNO(operands[0]) == RETVAL_REGNUM) &&
- + ((INTVAL(operands[1]) == -1) || (INTVAL(operands[1]) == 0) || (INTVAL(operands[1]) == 1)))"
- + {
- + avr32_output_return_instruction (TRUE, FALSE, NULL, operands[1]);
- + return "";
- + }
- + [(set_attr "type" "call")
- + (set_attr "length" "4")]
- + )
- +
- +(define_peephole
- + [(set (match_operand 0 "register_operand" "r")
- + (match_operand 1 "immediate_operand" "i"))
- + (unspec_volatile [(return)] VUNSPEC_EPILOGUE)]
- + "(REGNO(operands[0]) == RETVAL_REGNUM) &&
- + ((INTVAL(operands[1]) == -1) || (INTVAL(operands[1]) == 0) || (INTVAL(operands[1]) == 1))"
- + {
- + avr32_output_return_instruction (FALSE, FALSE, NULL, operands[1]);
- + return "";
- + }
- + ; Length is absolute worst case
- + [(set_attr "type" "branch")
- + (set_attr "length" "12")]
- + )
- +
- +(define_peephole
- + [(set (match_operand 0 "register_operand" "=r")
- + (if_then_else (match_operator 1 "avr32_comparison_operator"
- + [(match_operand 4 "register_operand" "r")
- + (match_operand 5 "register_immediate_operand" "rKs21")])
- + (match_operand 2 "avr32_cond_register_immediate_operand" "rKs08")
- + (match_operand 3 "avr32_cond_register_immediate_operand" "rKs08")))
- + (return)]
- + "USE_RETURN_INSN (TRUE) && (REGNO(operands[0]) == RETVAL_REGNUM)"
- + {
- + operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[4]), operands[4], operands[5]);
- +
- + if ( GET_CODE(operands[2]) == REG
- + && GET_CODE(operands[3]) == REG
- + && REGNO(operands[2]) != LR_REGNUM
- + && REGNO(operands[3]) != LR_REGNUM ){
- + return "ret%1 %2\;ret%i1 %3";
- + } else if ( GET_CODE(operands[2]) == REG
- + && GET_CODE(operands[3]) == CONST_INT ){
- + if ( INTVAL(operands[3]) == -1
- + || INTVAL(operands[3]) == 0
- + || INTVAL(operands[3]) == 1 ){
- + return "ret%1 %2\;ret%i1 %d3";
- + } else {
- + return "mov%1 r12, %2\;mov%i1 r12, %3\;retal r12";
- + }
- + } else if ( GET_CODE(operands[2]) == CONST_INT
- + && GET_CODE(operands[3]) == REG ){
- + if ( INTVAL(operands[2]) == -1
- + || INTVAL(operands[2]) == 0
- + || INTVAL(operands[2]) == 1 ){
- + return "ret%1 %d2\;ret%i1 %3";
- + } else {
- + return "mov%1 r12, %2\;mov%i1 r12, %3\;retal r12";
- + }
- + } else {
- + if ( (INTVAL(operands[2]) == -1
- + || INTVAL(operands[2]) == 0
- + || INTVAL(operands[2]) == 1 )
- + && (INTVAL(operands[3]) == -1
- + || INTVAL(operands[3]) == 0
- + || INTVAL(operands[3]) == 1 )){
- + return "ret%1 %d2\;ret%i1 %d3";
- + } else {
- + return "mov%1 r12, %2\;mov%i1 r12, %3\;retal r12";
- + }
- + }
- + }
- +
- + [(set_attr "length" "10")
- + (set_attr "cc" "none")
- + (set_attr "type" "call")])
- +
- +
- +
- +;;=================================================================
- +;; mulnhh.w
- +;;=================================================================
- +
- +(define_peephole2
- + [(set (match_operand:HI 0 "register_operand" "")
- + (neg:HI (match_operand:HI 1 "register_operand" "")))
- + (set (match_operand:SI 2 "register_operand" "")
- + (mult:SI
- + (sign_extend:SI (match_dup 0))
- + (sign_extend:SI (match_operand:HI 3 "register_operand" ""))))]
- + "(peep2_reg_dead_p(2, operands[0])) || (REGNO(operands[2]) == REGNO(operands[0]))"
- + [ (set (match_dup 2)
- + (mult:SI
- + (sign_extend:SI (neg:HI (match_dup 1)))
- + (sign_extend:SI (match_dup 3))))]
- + ""
- + )
- +
- +(define_peephole2
- + [(set (match_operand:HI 0 "register_operand" "")
- + (neg:HI (match_operand:HI 1 "register_operand" "")))
- + (set (match_operand:SI 2 "register_operand" "")
- + (mult:SI
- + (sign_extend:SI (match_operand:HI 3 "register_operand" ""))
- + (sign_extend:SI (match_dup 0))))]
- + "(peep2_reg_dead_p(2, operands[0])) || (REGNO(operands[2]) == REGNO(operands[0]))"
- + [ (set (match_dup 2)
- + (mult:SI
- + (sign_extend:SI (neg:HI (match_dup 1)))
- + (sign_extend:SI (match_dup 3))))]
- + ""
- + )
- +
- +
- +
- +;;=================================================================
- +;; Vector set and extract operations
- +;;=================================================================
- +(define_insn "vec_setv2hi_hi"
- + [(set (match_operand:V2HI 0 "register_operand" "=r")
- + (vec_merge:V2HI
- + (match_dup 0)
- + (vec_duplicate:V2HI
- + (match_operand:HI 1 "register_operand" "r"))
- + (const_int 1)))]
- + ""
- + "bfins\t%0, %1, 16, 16"
- + [(set_attr "type" "alu")
- + (set_attr "length" "4")
- + (set_attr "cc" "clobber")])
- +
- +(define_insn "vec_setv2hi_lo"
- + [(set (match_operand:V2HI 0 "register_operand" "+r")
- + (vec_merge:V2HI
- + (match_dup 0)
- + (vec_duplicate:V2HI
- + (match_operand:HI 1 "register_operand" "r"))
- + (const_int 2)))]
- + ""
- + "bfins\t%0, %1, 0, 16"
- + [(set_attr "type" "alu")
- + (set_attr "length" "4")
- + (set_attr "cc" "clobber")])
- +
- +(define_expand "vec_setv2hi"
- + [(set (match_operand:V2HI 0 "register_operand" "")
- + (vec_merge:V2HI
- + (match_dup 0)
- + (vec_duplicate:V2HI
- + (match_operand:HI 1 "register_operand" ""))
- + (match_operand 2 "immediate_operand" "")))]
- + ""
- + { operands[2] = GEN_INT(INTVAL(operands[2]) + 1); }
- + )
- +
- +(define_insn "vec_extractv2hi"
- + [(set (match_operand:HI 0 "register_operand" "=r")
- + (vec_select:HI
- + (match_operand:V2HI 1 "register_operand" "r")
- + (parallel [(match_operand:SI 2 "immediate_operand" "i")])))]
- + ""
- + {
- + if ( INTVAL(operands[2]) == 0 )
- + return "bfextu\t%0, %1, 16, 16";
- + else
- + return "bfextu\t%0, %1, 0, 16";
- + }
- + [(set_attr "type" "alu")
- + (set_attr "length" "4")
- + (set_attr "cc" "clobber")])
- +
- +(define_insn "vec_extractv4qi"
- + [(set (match_operand:QI 0 "register_operand" "=r")
- + (vec_select:QI
- + (match_operand:V4QI 1 "register_operand" "r")
- + (parallel [(match_operand:SI 2 "immediate_operand" "i")])))]
- + ""
- + {
- + switch ( INTVAL(operands[2]) ){
- + case 0:
- + return "bfextu\t%0, %1, 24, 8";
- + case 1:
- + return "bfextu\t%0, %1, 16, 8";
- + case 2:
- + return "bfextu\t%0, %1, 8, 8";
- + case 3:
- + return "bfextu\t%0, %1, 0, 8";
- + default:
- + abort();
- + }
- + }
- + [(set_attr "type" "alu")
- + (set_attr "length" "4")
- + (set_attr "cc" "clobber")])
- +
- +
- +(define_insn "concatv2hi"
- + [(set (match_operand:V2HI 0 "register_operand" "=r, r, r")
- + (vec_concat:V2HI
- + (match_operand:HI 1 "register_operand" "r, r, 0")
- + (match_operand:HI 2 "register_operand" "r, 0, r")))]
- + ""
- + "@
- + mov\t%0, %1\;bfins\t%0, %2, 0, 16
- + bfins\t%0, %2, 0, 16
- + bfins\t%0, %1, 16, 16"
- + [(set_attr "length" "6, 4, 4")
- + (set_attr "type" "alu")])
- +
- +
- +;; Load the atomic operation description
- +(include "sync.md")
- +
- +;; Load the SIMD description
- +(include "simd.md")
- +
- +;; Include the FPU for uc3
- +(include "uc3fpu.md")
- diff -Nur gcc-4.4.6.orig/gcc/config/avr32/avr32.opt gcc-4.4.6/gcc/config/avr32/avr32.opt
- --- gcc-4.4.6.orig/gcc/config/avr32/avr32.opt 1970-01-01 01:00:00.000000000 +0100
- +++ gcc-4.4.6/gcc/config/avr32/avr32.opt 2011-10-22 19:23:08.524581303 +0200
- @@ -0,0 +1,93 @@
- +; Options for the ATMEL AVR32 port of the compiler.
- +
- +; Copyright 2007 Atmel Corporation.
- +;
- +; This file is part of GCC.
- +;
- +; GCC is free software; you can redistribute it and/or modify it under
- +; the terms of the GNU General Public License as published by the Free
- +; Software Foundation; either version 2, or (at your option) any later
- +; version.
- +;
- +; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
- +; WARRANTY; without even the implied warranty of MERCHANTABILITY or
- +; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- +; for more details.
- +;
- +; You should have received a copy of the GNU General Public License
- +; along with GCC; see the file COPYING. If not, write to the Free
- +; Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
- +; 02110-1301, USA.
- +
- +muse-rodata-section
- +Target Report Mask(USE_RODATA_SECTION)
- +Use section .rodata for read-only data instead of .text.
- +
- +mhard-float
- +Target Report Mask(HARD_FLOAT)
- +Use FPU instructions instead of floating point emulation.
- +
- +msoft-float
- +Target Report InverseMask(HARD_FLOAT, SOFT_FLOAT)
- +Use floating point emulation for floating point operations.
- +
- +mforce-double-align
- +Target Report RejectNegative Mask(FORCE_DOUBLE_ALIGN)
- +Force double-word alignment for double-word memory accesses.
- +
- +mno-init-got
- +Target Report RejectNegative Mask(NO_INIT_GOT)
- +Do not initialize GOT register before using it when compiling PIC code.
- +
- +mrelax
- +Target Report Mask(RELAX)
- +Let invoked assembler and linker do relaxing (Enabled by default when optimization level is >1).
- +
- +mmd-reorg-opt
- +Target Report Undocumented Mask(MD_REORG_OPTIMIZATION)
- +Perform machine dependent optimizations in reorg stage.
- +
- +masm-addr-pseudos
- +Target Report Mask(HAS_ASM_ADDR_PSEUDOS)
- +Use assembler pseudo-instructions lda.w and call for handling direct addresses. (Enabled by default)
- +
- +mpart=
- +Target Report RejectNegative Joined Var(avr32_part_name)
- +Specify the AVR32 part name
- +
- +mcpu=
- +Target Report RejectNegative Joined Undocumented Var(avr32_part_name)
- +Specify the AVR32 part name (deprecated)
- +
- +march=
- +Target Report RejectNegative Joined Var(avr32_arch_name)
- +Specify the AVR32 architecture name
- +
- +mfast-float
- +Target Report Mask(FAST_FLOAT)
- +Enable fast floating-point library. Enabled by default if the -funsafe-math-optimizations switch is specified.
- +
- +mimm-in-const-pool
- +Target Report Var(avr32_imm_in_const_pool) Init(-1)
- +Put large immediates in constant pool. This is enabled by default for archs with insn-cache.
- +
- +mno-pic
- +Target Report RejectNegative Mask(NO_PIC)
- +Do not generate position-independent code. (deprecated, use -fno-pic instead)
- +
- +mcond-exec-before-reload
- +Target Report Undocumented Mask(COND_EXEC_BEFORE_RELOAD)
- +Enable experimental conditional execution preparation before the reload stage.
- +
- +mrmw-addressable-data
- +Target Report Mask(RMW_ADDRESSABLE_DATA)
- +Signal that all data is in range for the Atomic Read-Modify-Write memory instructions, and that
- +gcc can safely generate these whenever possible.
- +
- +mflashvault
- +Target Var(TARGET_FLASHVAULT)
- +Generate code for flashvault
- +
- +mlist-devices
- +Target RejectNegative Var(avr32_list_supported_parts)
- +Print the list of parts supported while printing --target-help.
- diff -Nur gcc-4.4.6.orig/gcc/config/avr32/crti.asm gcc-4.4.6/gcc/config/avr32/crti.asm
- --- gcc-4.4.6.orig/gcc/config/avr32/crti.asm 1970-01-01 01:00:00.000000000 +0100
- +++ gcc-4.4.6/gcc/config/avr32/crti.asm 2011-10-22 19:23:08.524581303 +0200
- @@ -0,0 +1,64 @@
- +/*
- + Init/fini stuff for AVR32.
- + Copyright 2003-2006 Atmel Corporation.
- +
- + Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
- +
- + This file is part of GCC.
- +
- + This program is free software; you can redistribute it and/or modify
- + it under the terms of the GNU General Public License as published by
- + the Free Software Foundation; either version 2 of the License, or
- + (at your option) any later version.
- +
- + This program is distributed in the hope that it will be useful,
- + but WITHOUT ANY WARRANTY; without even the implied warranty of
- + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- + GNU General Public License for more details.
- +
- + You should have received a copy of the GNU General Public License
- + along with this program; if not, write to the Free Software
- + Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
- +
- +
- +/* The code in sections .init and .fini is supposed to be a single
- + regular function. The function in .init is called directly from
- + start in crt1.asm. The function in .fini is atexit()ed in crt1.asm
- + too.
- +
- + crti.asm contributes the prologue of a function to these sections,
- + and crtn.asm comes up the epilogue. STARTFILE_SPEC should list
- + crti.o before any other object files that might add code to .init
- + or .fini sections, and ENDFILE_SPEC should list crtn.o after any
- + such object files. */
- +
- + .file "crti.asm"
- +
- + .section ".init"
- +/* Just load the GOT */
- + .align 2
- + .global _init
- +_init:
- + stm --sp, r6, lr
- + lddpc r6, 1f
- +0:
- + rsub r6, pc
- + rjmp 2f
- + .align 2
- +1: .long 0b - _GLOBAL_OFFSET_TABLE_
- +2:
- +
- + .section ".fini"
- +/* Just load the GOT */
- + .align 2
- + .global _fini
- +_fini:
- + stm --sp, r6, lr
- + lddpc r6, 1f
- +0:
- + rsub r6, pc
- + rjmp 2f
- + .align 2
- +1: .long 0b - _GLOBAL_OFFSET_TABLE_
- +2:
- +
- diff -Nur gcc-4.4.6.orig/gcc/config/avr32/crtn.asm gcc-4.4.6/gcc/config/avr32/crtn.asm
- --- gcc-4.4.6.orig/gcc/config/avr32/crtn.asm 1970-01-01 01:00:00.000000000 +0100
- +++ gcc-4.4.6/gcc/config/avr32/crtn.asm 2011-10-22 19:23:08.524581303 +0200
- @@ -0,0 +1,44 @@
- +/* Copyright (C) 2001 Free Software Foundation, Inc.
- + Written By Nick Clifton
- +
- + This file is free software; you can redistribute it and/or modify it
- + under the terms of the GNU General Public License as published by the
- + Free Software Foundation; either version 2, or (at your option) any
- + later version.
- +
- + In addition to the permissions in the GNU General Public License, the
- + Free Software Foundation gives you unlimited permission to link the
- + compiled version of this file with other programs, and to distribute
- + those programs without any restriction coming from the use of this
- + file. (The General Public License restrictions do apply in other
- + respects; for example, they cover modification of the file, and
- + distribution when not linked into another program.)
- +
- + This file is distributed in the hope that it will be useful, but
- + WITHOUT ANY WARRANTY; without even the implied warranty of
- + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- + General Public License for more details.
- +
- + You should have received a copy of the GNU General Public License
- + along with this program; see the file COPYING. If not, write to
- + the Free Software Foundation, 59 Temple Place - Suite 330,
- + Boston, MA 02111-1307, USA.
- +
- + As a special exception, if you link this library with files
- + compiled with GCC to produce an executable, this does not cause
- + the resulting executable to be covered by the GNU General Public License.
- + This exception does not however invalidate any other reasons why
- + the executable file might be covered by the GNU General Public License.
- +*/
- +
- +
- +
- +
- + .file "crtn.asm"
- +
- + .section ".init"
- + ldm sp++, r6, pc
- +
- + .section ".fini"
- + ldm sp++, r6, pc
- +
- diff -Nur gcc-4.4.6.orig/gcc/config/avr32/lib1funcs.S gcc-4.4.6/gcc/config/avr32/lib1funcs.S
- --- gcc-4.4.6.orig/gcc/config/avr32/lib1funcs.S 1970-01-01 01:00:00.000000000 +0100
- +++ gcc-4.4.6/gcc/config/avr32/lib1funcs.S 2011-10-22 19:23:08.524581303 +0200
- @@ -0,0 +1,2902 @@
- +/* Macro for moving immediate value to register. */
- +.macro mov_imm reg, imm
- +.if (((\imm & 0xfffff) == \imm) || ((\imm | 0xfff00000) == \imm))
- + mov \reg, \imm
- +#if __AVR32_UC__ >= 2
- +.elseif ((\imm & 0xffff) == 0)
- + movh \reg, hi(\imm)
- +
- +#endif
- +.else
- + mov \reg, lo(\imm)
- + orh \reg, hi(\imm)
- +.endif
- +.endm
- +
- +
- +
- +/* Adjust the unpacked double number if it is a subnormal number.
- + The exponent and mantissa pair are stored
- + in [mant_hi,mant_lo] and [exp]. A register with the correct sign bit in
- + the MSB is passed in [sign]. Needs two scratch
- + registers [scratch1] and [scratch2]. An adjusted and packed double float
- + is present in [mant_hi,mant_lo] after macro has executed */
- +.macro adjust_subnormal_df exp, mant_lo, mant_hi, sign, scratch1, scratch2
- + /* We have an exponent which is <=0 indicating a subnormal number
- + As it should be stored as if the exponent was 1 (although the
- + exponent field is all zeros to indicate a subnormal number)
- + we have to shift down the mantissa to its correct position. */
- + neg \exp
- + sub \exp,-1 /* amount to shift down */
- + cp.w \exp,54
- + brlo 50f /* if more than 53 shift steps, the
- + entire mantissa will disappear
- + without any rounding to occur */
- + mov \mant_hi, 0
- + mov \mant_lo, 0
- + rjmp 52f
- +50:
- + sub \exp,-10 /* do the shift to position the
- + mantissa at the same time
- + note! this does not include the
- + final 1 step shift to add the sign */
- +
- + /* when shifting, save all shifted out bits in [scratch2]. we may need to
- + look at them to make correct rounding. */
- +
- + rsub \scratch1,\exp,32 /* get inverted shift count */
- + cp.w \exp,32 /* handle shifts >= 32 separately */
- + brhs 51f
- +
- + /* small (<32) shift amount, both words are part of the shift */
- + lsl \scratch2,\mant_lo,\scratch1 /* save bits to shift out from lsw*/
- + lsl \scratch1,\mant_hi,\scratch1 /* get bits from msw destined for lsw*/
- + lsr \mant_lo,\mant_lo,\exp /* shift down lsw */
- + lsr \mant_hi,\mant_hi,\exp /* shift down msw */
- + or \mant_hi,\scratch1 /* add bits from msw with prepared lsw */
- + rjmp 50f
- +
- + /* large (>=32) shift amount, only lsw will have bits left after shift.
- + note that shift operations will use ((shift count) mod 32) so
- + we do not need to subtract 32 from shift count. */
- +51:
- + lsl \scratch2,\mant_hi,\scratch1 /* save bits to shift out from msw */
- + or \scratch2,\mant_lo /* also save all bits from lsw */
- + mov \mant_lo,\mant_hi /* msw -> lsw (i.e. "shift 32 first") */
- + mov \mant_hi,0 /* clear msw */
- + lsr \mant_lo,\mant_lo,\exp /* make rest of shift inside lsw */
- +
- +50:
- + /* result is almost ready to return, except that least significant bit
- + and the part we already shifted out may cause the result to be
- + rounded */
- + bld \mant_lo,0 /* get bit to be shifted out */
- + brcc 51f /* if bit was 0, no rounding */
- +
- + /* msb of part to remove is 1, so rounding depends on rest of bits */
- + tst \scratch2,\scratch2 /* get shifted out tail */
- + brne 50f /* if rest > 0, do round */
- + bld \mant_lo,1 /* we have to look at lsb in result */
- + brcc 51f /* if lsb is 0, don't round */
- +
- +50:
- + /* subnormal result requires rounding
- + rounding may cause subnormal to become smallest normal number
- + luckily, smallest normal number has exactly the representation
- + we got by rippling a one bit up from mantissa into exponent field. */
- + sub \mant_lo,-1
- + subcc \mant_hi,-1
- +
- +51:
- + /* shift and return packed double with correct sign */
- + rol \sign
- + ror \mant_hi
- + ror \mant_lo
- +52:
- +.endm
- +
- +
- +/* Adjust subnormal single float number with exponent [exp]
- + and mantissa [mant] and round. */
- +.macro adjust_subnormal_sf sf, exp, mant, sign, scratch
- + /* subnormal number */
- + rsub \exp,\exp, 1 /* shift amount */
- + cp.w \exp, 25
- + movhs \mant, 0
- + brhs 90f /* Return zero */
- + rsub \scratch, \exp, 32
- + lsl \scratch, \mant,\scratch/* Check if there are any bits set
- + in the bits discarded in the mantissa */
- + srne \scratch /* If so set the lsb of the shifted mantissa */
- + lsr \mant,\mant,\exp /* Shift the mantissa */
- + or \mant, \scratch /* Round lsb if any bits were shifted out */
- + /* Rounding : For explaination, see round_sf. */
- + mov \scratch, 0x7f /* Set rounding constant */
- + bld \mant, 8
- + subeq \scratch, -1 /* For odd numbers use rounding constant 0x80 */
- + add \mant, \scratch /* Add rounding constant to mantissa */
- + /* We can't overflow because mantissa is at least shifted one position
- + to the right so the implicit bit is zero. We can however get the implicit
- + bit set after rounding which means that we have the lowest normal number
- + but this is ok since this bit has the same position as the LSB of the
- + exponent */
- + lsr \sf, \mant, 7
- + /* Rotate in sign */
- + lsl \sign, 1
- + ror \sf
- +90:
- +.endm
- +
- +
- +/* Round the unpacked df number with exponent [exp] and
- + mantissa [mant_hi, mant_lo]. Uses scratch register
- + [scratch] */
- +.macro round_df exp, mant_lo, mant_hi, scratch
- + mov \scratch, 0x3ff /* Rounding constant */
- + bld \mant_lo,11 /* Check if lsb in the final result is
- + set */
- + subeq \scratch, -1 /* Adjust rounding constant to 0x400
- + if rounding 0.5 upwards */
- + add \mant_lo, \scratch /* Round */
- + acr \mant_hi /* If overflowing we know that
- + we have all zeros in the bits not
- + scaled out so we can leave them
- + but we must increase the exponent with
- + two since we had an implicit bit
- + which is lost + the extra overflow bit */
- + subcs \exp, -2 /* Update exponent */
- +.endm
- +
- +/* Round single float number stored in [mant] and [exp] */
- +.macro round_sf exp, mant, scratch
- + /* Round:
- + For 0.5 we round to nearest even integer
- + for all other cases we round to nearest integer.
- + This means that if the digit left of the "point" (.)
- + is 1 we can add 0x80 to the mantissa since the
- + corner case 0x180 will round up to 0x200. If the
- + digit left of the "point" is 0 we will have to
- + add 0x7f since this will give 0xff and hence a
- + truncation/rounding downwards for the corner
- + case when the 9 lowest bits are 0x080 */
- + mov \scratch, 0x7f /* Set rounding constant */
- + /* Check if the mantissa is even or odd */
- + bld \mant, 8
- + subeq \scratch, -1 /* Rounding constant should be 0x80 */
- + add \mant, \scratch
- + subcs \exp, -2 /* Adjust exponent if we overflowed */
- +.endm
- +
- +
- +
- +/* Pack a single float number stored in [mant] and [exp]
- + into a single float number in [sf] */
- +.macro pack_sf sf, exp, mant
- + bld \mant,31 /* implicit bit to z */
- + subne \exp,1 /* if subnormal (implicit bit 0)
- + adjust exponent to storage format */
- +
- + lsr \sf, \mant, 7
- + bfins \sf, \exp, 24, 8
- +.endm
- +
- +/* Pack exponent [exp] and mantissa [mant_hi, mant_lo]
- + into [df_hi, df_lo]. [df_hi] is shifted
- + one bit up so the sign bit can be shifted into it */
- +
- +.macro pack_df exp, mant_lo, mant_hi, df_lo, df_hi
- + bld \mant_hi,31 /* implicit bit to z */
- + subne \exp,1 /* if subnormal (implicit bit 0)
- + adjust exponent to storage format */
- +
- + lsr \mant_lo,11 /* shift back lsw */
- + or \df_lo,\mant_lo,\mant_hi<<21 /* combine with low bits from msw */
- + lsl \mant_hi,1 /* get rid of implicit bit */
- + lsr \mant_hi,11 /* shift back msw except for one step*/
- + or \df_hi,\mant_hi,\exp<<21 /* combine msw with exponent */
- +.endm
- +
- +/* Normalize single float number stored in [mant] and [exp]
- + using scratch register [scratch] */
- +.macro normalize_sf exp, mant, scratch
- + /* Adjust exponent and mantissa */
- + clz \scratch, \mant
- + sub \exp, \scratch
- + lsl \mant, \mant, \scratch
- +.endm
- +
- +/* Normalize the exponent and mantissa pair stored
- + in [mant_hi,mant_lo] and [exp]. Needs two scratch
- + registers [scratch1] and [scratch2]. */
- +.macro normalize_df exp, mant_lo, mant_hi, scratch1, scratch2
- + clz \scratch1,\mant_hi /* Check if we have zeros in high bits */
- + breq 80f /* No need for scaling if no zeros in high bits */
- + brcs 81f /* Check for all zeros */
- +
- + /* shift amount is smaller than 32, and involves both msw and lsw*/
- + rsub \scratch2,\scratch1,32 /* shift mantissa */
- + lsl \mant_hi,\mant_hi,\scratch1
- + lsr \scratch2,\mant_lo,\scratch2
- + or \mant_hi,\scratch2
- + lsl \mant_lo,\mant_lo,\scratch1
- + sub \exp,\scratch1 /* adjust exponent */
- + rjmp 80f /* Finished */
- +81:
- + /* shift amount is greater than 32 */
- + clz \scratch1,\mant_lo /* shift mantissa */
- + movcs \scratch1, 0
- + subcc \scratch1,-32
- + lsl \mant_hi,\mant_lo,\scratch1
- + mov \mant_lo,0
- + sub \exp,\scratch1 /* adjust exponent */
- +80:
- +.endm
- +
- +
- +/* Fast but approximate multiply of two 64-bit numbers to give a 64 bit result.
- + The multiplication of [al]x[bl] is discarded.
- + Operands in [ah], [al], [bh], [bl].
- + Scratch registers in [sh], [sl].
- + Returns results in registers [rh], [rl].*/
- +.macro mul_approx_df ah, al, bh, bl, rh, rl, sh, sl
- + mulu.d \sl, \ah, \bl
- + macu.d \sl, \al, \bh
- + mulu.d \rl, \ah, \bh
- + add \rl, \sh
- + acr \rh
- +.endm
- +
- +
- +
- +#if defined(L_avr32_f64_mul) || defined(L_avr32_f64_mul_fast)
- + .align 2
- +#if defined(L_avr32_f64_mul)
- + .global __avr32_f64_mul
- + .type __avr32_f64_mul,@function
- +__avr32_f64_mul:
- +#else
- + .global __avr32_f64_mul_fast
- + .type __avr32_f64_mul_fast,@function
- +__avr32_f64_mul_fast:
- +#endif
- + or r12, r10, r11 << 1
- + breq __avr32_f64_mul_op1_zero
- +
- +#if defined(L_avr32_f64_mul)
- + pushm r4-r7, lr
- +#else
- + stm --sp, r5,r6,r7,lr
- +#endif
- +
- +#define AVR32_F64_MUL_OP1_INT_BITS 1
- +#define AVR32_F64_MUL_OP2_INT_BITS 10
- +#define AVR32_F64_MUL_RES_INT_BITS 11
- +
- + /* op1 in {r11,r10}*/
- + /* op2 in {r9,r8}*/
- + eor lr, r11, r9 /* MSB(lr) = Sign(op1) ^ Sign(op2) */
- +
- + /* Unpack op1 to 1.63 format*/
- + /* exp: r7 */
- + /* sf: r11, r10 */
- + bfextu r7, r11, 20, 11 /* Extract exponent */
- +
- + mov r5, 1
- +
- + /* Check if normalization is needed */
- + breq __avr32_f64_mul_op1_subnormal /*If number is subnormal, normalize it */
- +
- + lsl r11, (12-AVR32_F64_MUL_OP1_INT_BITS-1) /* Extract mantissa, leave room for implicit bit */
- + or r11, r11, r10>>(32-(12-AVR32_F64_MUL_OP1_INT_BITS-1))
- + lsl r10, (12-AVR32_F64_MUL_OP1_INT_BITS-1)
- + bfins r11, r5, 32 - (1 + AVR32_F64_MUL_OP1_INT_BITS), 1 + AVR32_F64_MUL_OP1_INT_BITS /* Insert implicit bit */
- +
- +
- +22:
- + /* Unpack op2 to 10.54 format */
- + /* exp: r6 */
- + /* sf: r9, r8 */
- + bfextu r6, r9, 20, 11 /* Extract exponent */
- +
- + /* Check if normalization is needed */
- + breq __avr32_f64_mul_op2_subnormal /*If number is subnormal, normalize it */
- +
- + lsl r8, 1 /* Extract mantissa, leave room for implicit bit */
- + rol r9
- + bfins r9, r5, 32 - (1 + AVR32_F64_MUL_OP2_INT_BITS), 1 + AVR32_F64_MUL_OP2_INT_BITS /* Insert implicit bit */
- +
- +23:
- +
- + /* Check if any operands are NaN or INF */
- + cp r7, 0x7ff
- + breq __avr32_f64_mul_op_nan_or_inf /* Check op1 for NaN or Inf */
- + cp r6, 0x7ff
- + breq __avr32_f64_mul_op_nan_or_inf /* Check op2 for NaN or Inf */
- +
- +
- + /* Calculate new exponent in r12*/
- + add r12, r7, r6
- + sub r12, (1023-1)
- +
- +#if defined(L_avr32_f64_mul)
- + /* Do the multiplication.
- + Place result in [r11, r10, r7, r6]. The result is in 11.117 format. */
- + mulu.d r4, r11, r8
- + macu.d r4, r10, r9
- + mulu.d r6, r10, r8
- + mulu.d r10, r11, r9
- + add r7, r4
- + adc r10, r10, r5
- + acr r11
- +#else
- + /* Do the multiplication using approximate calculation. discard the al x bl
- + calculation.
- + Place result in [r11, r10, r7]. The result is in 11.85 format. */
- +
- + /* Do the multiplication using approximate calculation.
- + Place result in r11, r10. Use r7, r6 as scratch registers */
- + mulu.d r6, r11, r8
- + macu.d r6, r10, r9
- + mulu.d r10, r11, r9
- + add r10, r7
- + acr r11
- +#endif
- + /* Adjust exponent and mantissa */
- + /* [r12]:exp, [r11, r10]:mant [r7, r6]:sticky bits */
- + /* Mantissa may be of the format 00000000000.0xxx or 00000000000.1xxx. */
- + /* In the first case, shift one pos to left.*/
- + bld r11, 32-AVR32_F64_MUL_RES_INT_BITS-1
- + breq 0f
- + lsl r7, 1
- + rol r10
- + rol r11
- + sub r12, 1
- +0:
- + cp r12, 0
- + brle __avr32_f64_mul_res_subnormal /*Result was subnormal.*/
- +
- + /* Check for Inf. */
- + cp.w r12, 0x7ff
- + brge __avr32_f64_mul_res_inf
- +
- + /* Insert exponent. */
- + bfins r11, r12, 20, 11
- +
- + /* Result was not subnormal. Perform rounding. */
- + /* For the fast version we discard the sticky bits and always round
- + the halfwaycase up. */
- +24:
- +#if defined(L_avr32_f64_mul)
- + or r6, r6, r10 << 31 /* Or in parity bit into stickybits */
- + or r7, r7, r6 >> 1 /* Or together sticky and still make the msb
- + of r7 represent the halfway bit. */
- + eorh r7, 0x8000 /* Toggle halfway bit. */
- + /* We should now round up by adding one for the following cases:
- +
- + halfway sticky|parity round-up
- + 0 x no
- + 1 0 no
- + 1 1 yes
- +
- + Since we have inverted the halfway bit we can use the satu instruction
- + by saturating to 1 bit to implement this.
- + */
- + satu r7 >> 0, 1
- +#else
- + lsr r7, 31
- +#endif
- + add r10, r7
- + acr r11
- +
- + /* Insert sign bit*/
- + bld lr, 31
- + bst r11, 31
- +
- + /* Return result in [r11,r10] */
- +#if defined(L_avr32_f64_mul)
- + popm r4-r7, pc
- +#else
- + ldm sp++, r5, r6, r7,pc
- +#endif
- +
- +
- +__avr32_f64_mul_op1_subnormal:
- + andh r11, 0x000f /* Remove sign bit and exponent */
- + clz r12, r10 /* Count leading zeros in lsw */
- + clz r6, r11 /* Count leading zeros in msw */
- + subcs r12, -32 + AVR32_F64_MUL_OP1_INT_BITS
- + movcs r6, r12
- + subcc r6, AVR32_F64_MUL_OP1_INT_BITS
- + cp.w r6, 32
- + brge 0f
- +
- + /* shifting involves both msw and lsw*/
- + rsub r12, r6, 32 /* shift mantissa */
- + lsl r11, r11, r6
- + lsr r12, r10, r12
- + or r11, r12
- + lsl r10, r10, r6
- + sub r6, 12-AVR32_F64_MUL_OP1_INT_BITS
- + sub r7, r6 /* adjust exponent */
- + rjmp 22b /* Finished */
- +0:
- + /* msw is zero so only need to consider lsw */
- + lsl r11, r10, r6
- + breq __avr32_f64_mul_res_zero
- + mov r10, 0
- + sub r6, 12-AVR32_F64_MUL_OP1_INT_BITS
- + sub r7, r6 /* adjust exponent */
- + rjmp 22b
- +
- +
- +__avr32_f64_mul_op2_subnormal:
- + andh r9, 0x000f /* Remove sign bit and exponent */
- + clz r12, r8 /* Count leading zeros in lsw */
- + clz r5, r9 /* Count leading zeros in msw */
- + subcs r12, -32 + AVR32_F64_MUL_OP2_INT_BITS
- + movcs r5, r12
- + subcc r5, AVR32_F64_MUL_OP2_INT_BITS
- + cp.w r5, 32
- + brge 0f
- +
- + /* shifting involves both msw and lsw*/
- + rsub r12, r5, 32 /* shift mantissa */
- + lsl r9, r9, r5
- + lsr r12, r8, r12
- + or r9, r12
- + lsl r8, r8, r5
- + sub r5, 12 - AVR32_F64_MUL_OP2_INT_BITS
- + sub r6, r5 /* adjust exponent */
- + rjmp 23b /* Finished */
- +0:
- + /* msw is zero so only need to consider lsw */
- + lsl r9, r8, r5
- + breq __avr32_f64_mul_res_zero
- + mov r8, 0
- + sub r5, 12 - AVR32_F64_MUL_OP2_INT_BITS
- + sub r6, r5 /* adjust exponent */
- + rjmp 23b
- +
- +
- +__avr32_f64_mul_op_nan_or_inf:
- + /* Same code for OP1 and OP2*/
- + /* Since we are here, at least one of the OPs were NaN or INF*/
- + andh r9, 0x000f /* Remove sign bit and exponent */
- + andh r11, 0x000f /* Remove sign bit and exponent */
- + /* Merge the regs in each operand to check for zero*/
- + or r11, r10 /* op1 */
- + or r9, r8 /* op2 */
- + /* Check if op1 is NaN or INF */
- + cp r7, 0x7ff
- + brne __avr32_f64_mul_op1_not_naninf
- + /* op1 was NaN or INF.*/
- + cp r11, 0
- + brne __avr32_f64_mul_res_nan /* op1 was NaN. Result will be NaN*/
- + /*op1 was INF. check if op2 is NaN or INF*/
- + cp r6, 0x7ff
- + brne __avr32_f64_mul_res_inf /*op1 was INF, op2 was neither NaN nor INF*/
- + /* op1 is INF, op2 is either NaN or INF*/
- + cp r9, 0
- + breq __avr32_f64_mul_res_inf /*op2 was also INF*/
- + rjmp __avr32_f64_mul_res_nan /*op2 was NaN*/
- +
- +__avr32_f64_mul_op1_not_naninf:
- + /* op1 was not NaN nor INF. Then op2 must be NaN or INF*/
- + cp r9, 0
- + breq __avr32_f64_mul_res_inf /*op2 was INF, return INF*/
- + rjmp __avr32_f64_mul_res_nan /*else return NaN*/
- +
- +__avr32_f64_mul_res_subnormal:/* Multiply result was subnormal. */
- +#if defined(L_avr32_f64_mul)
- + /* Check how much we must scale down the mantissa. */
- + neg r12
- + sub r12, -1 /* We do no longer have an implicit bit. */
- + satu r12 >> 0, 6 /* Saturate shift amount to max 63. */
- + cp.w r12, 32
- + brge 0f
- + /* Shift amount <32 */
- + rsub r8, r12, 32
- + or r6, r7
- + lsr r7, r7, r12
- + lsl r9, r10, r8
- + or r7, r9
- + lsr r10, r10, r12
- + lsl r9, r11, r8
- + or r10, r9
- + lsr r11, r11, r12
- + rjmp 24b
- +0:
- + /* Shift amount >=32 */
- + rsub r8, r12, 32
- + moveq r9, 0
- + breq 0f
- + lsl r9, r11, r8
- +0:
- + or r6, r7
- + or r6, r6, r10 << 1
- + lsr r10, r10, r12
- + or r7, r9, r10
- + lsr r10, r11, r12
- + mov r11, 0
- + rjmp 24b
- +#else
- + /* Flush to zero for the fast version. */
- + mov r11, lr /*Get correct sign*/
- + andh r11, 0x8000, COH
- + mov r10, 0
- + ldm sp++, r5, r6, r7,pc
- +#endif
- +
- +__avr32_f64_mul_res_zero:/* Multiply result is zero. */
- + mov r11, lr /*Get correct sign*/
- + andh r11, 0x8000, COH
- + mov r10, 0
- +#if defined(L_avr32_f64_mul)
- + popm r4-r7, pc
- +#else
- + ldm sp++, r5, r6, r7,pc
- +#endif
- +
- +__avr32_f64_mul_res_nan: /* Return NaN. */
- + mov r11, -1
- + mov r10, -1
- +#if defined(L_avr32_f64_mul)
- + popm r4-r7, pc
- +#else
- + ldm sp++, r5, r6, r7,pc
- +#endif
- +
- +__avr32_f64_mul_res_inf: /* Return INF. */
- + mov r11, 0xfff00000
- + bld lr, 31
- + bst r11, 31
- + mov r10, 0
- +#if defined(L_avr32_f64_mul)
- + popm r4-r7, pc
- +#else
- + ldm sp++, r5, r6, r7,pc
- +#endif
- +
- +__avr32_f64_mul_op1_zero:
- + /* Get sign */
- + eor r11, r11, r9
- + andh r11, 0x8000, COH
- + /* Check if op2 is Inf or NaN. */
- + bfextu r12, r9, 20, 11
- + cp.w r12, 0x7ff
- + retne r12 /* Return 0.0 */
- + /* Return NaN */
- + mov r10, -1
- + mov r11, -1
- + ret r12
- +
- +
- +
- +#endif
- +
- +
- +#if defined(L_avr32_f64_addsub) || defined(L_avr32_f64_addsub_fast)
- + .align 2
- +
- +__avr32_f64_sub_from_add:
- + /* Switch sign on op2 */
- + eorh r9, 0x8000
- +
- +#if defined(L_avr32_f64_addsub_fast)
- + .global __avr32_f64_sub_fast
- + .type __avr32_f64_sub_fast,@function
- +__avr32_f64_sub_fast:
- +#else
- + .global __avr32_f64_sub
- + .type __avr32_f64_sub,@function
- +__avr32_f64_sub:
- +#endif
- +
- + /* op1 in {r11,r10}*/
- + /* op2 in {r9,r8}*/
- +
- +#if defined(L_avr32_f64_addsub_fast)
- + /* If op2 is zero just return op1 */
- + or r12, r8, r9 << 1
- + reteq r12
- +#endif
- +
- + /* Check signs */
- + eor r12, r11, r9
- + /* Different signs, use addition. */
- + brmi __avr32_f64_add_from_sub
- +
- + stm --sp, r5, r6, r7, lr
- +
- + /* Get sign of op1 into r12 */
- + mov r12, r11
- + andh r12, 0x8000, COH
- +
- + /* Remove sign from operands */
- + cbr r11, 31
- + cbr r9, 31
- +
- + /* Put the largest number in [r11, r10]
- + and the smallest number in [r9, r8] */
- + cp r10, r8
- + cpc r11, r9
- + brhs 1f /* Skip swap if operands already correctly ordered*/
- + /* Operands were not correctly ordered, swap them*/
- + mov r7, r11
- + mov r11, r9
- + mov r9, r7
- + mov r7, r10
- + mov r10, r8
- + mov r8, r7
- + eorh r12, 0x8000 /* Invert sign in r12*/
- +1:
- + /* Unpack largest operand - opH */
- + /* exp: r7 */
- + /* sf: r11, r10 */
- + lsr r7, r11, 20 /* Extract exponent */
- + lsl r11, 11 /* Extract mantissa, leave room for implicit bit */
- + or r11, r11, r10>>21
- + lsl r10, 11
- + sbr r11, 31 /* Insert implicit bit */
- +
- +
- + /* Unpack smallest operand - opL */
- + /* exp: r6 */
- + /* sf: r9, r8 */
- + lsr r6, r9, 20 /* Extract exponent */
- + breq __avr32_f64_sub_opL_subnormal /* If either zero or subnormal */
- + lsl r9, 11 /* Extract mantissa, leave room for implicit bit */
- + or r9, r9, r8>>21
- + lsl r8, 11
- + sbr r9, 31 /* Insert implicit bit */
- +
- +
- +__avr32_f64_sub_opL_subnormal_done:
- + /* opH is NaN or Inf. */
- + cp.w r7, 0x7ff
- + breq __avr32_f64_sub_opH_nan_or_inf
- +
- + /* Get shift amount to scale mantissa of op2. */
- + rsub r6, r7
- + breq __avr32_f64_sub_shift_done /* No need to shift, exponents are equal*/
- +
- + /* Scale mantissa [r9, r8] with amount [r6].
- + Uses scratch registers [r5] and [lr].
- + In IEEE mode:Must not forget the sticky bits we intend to shift out. */
- +
- + rsub r5,r6,32 /* get (32 - shift count)
- + (if shift count > 32 we get a
- + negative value, but that will
- + work as well in the code below.) */
- +
- + cp.w r6,32 /* handle shifts >= 32 separately */
- + brhs __avr32_f64_sub_longshift
- +
- + /* small (<32) shift amount, both words are part of the shift
- + first remember whether part that is lost contains any 1 bits ... */
- + lsl lr,r8,r5 /* shift away bits that are part of
- + final mantissa. only part that goes
- + to lr are bits that will be lost */
- +
- + /* ... and now to the actual shift */
- + lsl r5,r9,r5 /* get bits from msw destined for lsw*/
- + lsr r8,r8,r6 /* shift down lsw of mantissa */
- + lsr r9,r9,r6 /* shift down msw of mantissa */
- + or r8,r5 /* combine these bits with prepared lsw*/
- +#if defined(L_avr32_f64_addsub)
- + cp.w lr,0 /* if any '1' bit in part we lost ...*/
- + srne lr
- + or r8, lr /* ... we need to set sticky bit*/
- +#endif
- +
- +__avr32_f64_sub_shift_done:
- + /* Now subtract the mantissas. */
- + sub r10, r8
- + sbc r11, r11, r9
- +
- + /* Normalize the exponent and mantissa pair stored in
- + [r11,r10] and exponent in [r7]. Needs two scratch registers [r6] and [lr]. */
- + clz r6,r11 /* Check if we have zeros in high bits */
- + breq __avr32_f64_sub_longnormalize_done /* No need for scaling if no zeros in high bits */
- + brcs __avr32_f64_sub_longnormalize
- +
- +
- + /* shift amount is smaller than 32, and involves both msw and lsw*/
- + rsub lr,r6,32 /* shift mantissa */
- + lsl r11,r11,r6
- + lsr lr,r10,lr
- + or r11,lr
- + lsl r10,r10,r6
- +
- + sub r7,r6 /* adjust exponent */
- + brle __avr32_f64_sub_subnormal_result
- +__avr32_f64_sub_longnormalize_done:
- +
- +#if defined(L_avr32_f64_addsub)
- + /* Insert the bits we will remove from the mantissa r9[31:21] */
- + lsl r9, r10, (32 - 11)
- +#else
- + /* Keep the last bit shifted out. */
- + bfextu r9, r10, 10, 1
- +#endif
- +
- + /* Pack final result*/
- + /* Input: [r7]:exp, [r11, r10]:mant, [r12]:sign in MSB */
- + /* Result in [r11,r10] */
- + /* Insert mantissa */
- + lsr r10, 11
- + or r10, r10, r11<<21
- + lsr r11, 11
- + /* Insert exponent and sign bit*/
- + bfins r11, r7, 20, 11
- + or r11, r12
- +
- + /* Round */
- +__avr32_f64_sub_round:
- +#if defined(L_avr32_f64_addsub)
- + mov_imm r7, 0x80000000
- + bld r10, 0
- + subne r7, -1
- +
- + cp.w r9, r7
- + srhs r9
- +#endif
- + add r10, r9
- + acr r11
- +
- + /* Return result in [r11,r10] */
- + ldm sp++, r5, r6, r7,pc
- +
- +
- +
- +__avr32_f64_sub_opL_subnormal:
- + /* Extract the of mantissa */
- + lsl r9, 11 /* Extract mantissa, leave room for implicit bit */
- + or r9, r9, r8>>21
- + lsl r8, 11
- +
- + /* Set exponent to 1 if we do not have a zero. */
- + or lr, r9, r8
- + movne r6,1
- +
- + /* Check if opH is also subnormal. If so, clear implicit bit in r11*/
- + rsub lr, r7, 0
- + moveq r7,1
- + bst r11, 31
- +
- + /* Check if op1 is zero, if so set exponent to 0. */
- + or lr, r11, r10
- + moveq r7,0
- +
- + rjmp __avr32_f64_sub_opL_subnormal_done
- +
- +__avr32_f64_sub_opH_nan_or_inf:
- + /* Check if opH is NaN, if so return NaN */
- + cbr r11, 31
- + or lr, r11, r10
- + brne __avr32_f64_sub_return_nan
- +
- + /* opH is Inf. */
- + /* Check if opL is Inf. or NaN */
- + cp.w r6, 0x7ff
- + breq __avr32_f64_sub_return_nan
- + /* Return infinity with correct sign. */
- + or r11, r12, r7 << 20
- + ldm sp++, r5, r6, r7, pc/* opL not Inf or NaN, return opH */
- +__avr32_f64_sub_return_nan:
- + mov r10, -1 /* Generate NaN in r11, r10 */
- + mov r11, -1
- + ldm sp++, r5, r6, r7, pc/* opL Inf or NaN, return NaN */
- +
- +
- +__avr32_f64_sub_subnormal_result:
- +#if defined(L_avr32_f64_addsub)
- + /* Check how much we must scale down the mantissa. */
- + neg r7
- + sub r7, -1 /* We do no longer have an implicit bit. */
- + satu r7 >> 0, 6 /* Saturate shift amount to max 63. */
- + cp.w r7, 32
- + brge 0f
- + /* Shift amount <32 */
- + rsub r8, r7, 32
- + lsl r9, r10, r8
- + srne r6
- + lsr r10, r10, r7
- + or r10, r6 /* Sticky bit from the
- + part that was shifted out. */
- + lsl r9, r11, r8
- + or r10, r10, r9
- + lsr r11, r10, r7
- + /* Set exponent */
- + mov r7, 0
- + rjmp __avr32_f64_sub_longnormalize_done
- +0:
- + /* Shift amount >=32 */
- + rsub r8, r7, 64
- + lsl r9, r11, r8
- + or r9, r10
- + srne r6
- + lsr r10, r11, r7
- + or r10, r6 /* Sticky bit from the
- + part that was shifted out. */
- + mov r11, 0
- + /* Set exponent */
- + mov r7, 0
- + rjmp __avr32_f64_sub_longnormalize_done
- +#else
- + /* Just flush subnormals to zero. */
- + mov r10, 0
- + mov r11, 0
- +#endif
- + ldm sp++, r5, r6, r7, pc
- +
- +__avr32_f64_sub_longshift:
- + /* large (>=32) shift amount, only lsw will have bits left after shift.
- + note that shift operations will use ((shift count=r6) mod 32) so
- + we do not need to subtract 32 from shift count. */
- + /* Saturate the shift amount to 63. If the amount
- + is any larger op2 is insignificant. */
- + satu r6 >> 0, 6
- +
- +#if defined(L_avr32_f64_addsub)
- + /* first remember whether part that is lost contains any 1 bits ... */
- + moveq lr, r8 /* If shift amount is 32, no bits from msw are lost. */
- + breq 0f
- + lsl lr,r9,r5 /* save all lost bits from msw */
- + or lr,r8 /* also save lost bits (all) from lsw
- + now lr != 0 if we lose any bits */
- +#endif
- +0:
- + /* ... and now to the actual shift */
- + lsr r8,r9,r6 /* Move msw to lsw and shift. */
- + mov r9,0 /* clear msw */
- +#if defined(L_avr32_f64_addsub)
- + cp.w lr,0 /* if any '1' bit in part we lost ...*/
- + srne lr
- + or r8, lr /* ... we need to set sticky bit*/
- +#endif
- + rjmp __avr32_f64_sub_shift_done
- +
- +__avr32_f64_sub_longnormalize:
- + /* shift amount is greater than 32 */
- + clz r6,r10 /* shift mantissa */
- + /* If the resulting mantissa is zero the result is
- + zero so force exponent to zero. */
- + movcs r7, 0
- + movcs r6, 0
- + movcs r12, 0 /* Also clear sign bit. A zero result from subtraction
- + always is +0.0 */
- + subcc r6,-32
- + lsl r11,r10,r6
- + mov r10,0
- + sub r7,r6 /* adjust exponent */
- + brle __avr32_f64_sub_subnormal_result
- + rjmp __avr32_f64_sub_longnormalize_done
- +
- +
- +
- + .align 2
- +__avr32_f64_add_from_sub:
- + /* Switch sign on op2 */
- + eorh r9, 0x8000
- +
- +#if defined(L_avr32_f64_addsub_fast)
- + .global __avr32_f64_add_fast
- + .type __avr32_f64_add_fast,@function
- +__avr32_f64_add_fast:
- +#else
- + .global __avr32_f64_add
- + .type __avr32_f64_add,@function
- +__avr32_f64_add:
- +#endif
- +
- + /* op1 in {r11,r10}*/
- + /* op2 in {r9,r8}*/
- +
- +#if defined(L_avr32_f64_addsub_fast)
- + /* If op2 is zero just return op1 */
- + or r12, r8, r9 << 1
- + reteq r12
- +#endif
- +
- + /* Check signs */
- + eor r12, r11, r9
- + /* Different signs, use subtraction. */
- + brmi __avr32_f64_sub_from_add
- +
- + stm --sp, r5, r6, r7, lr
- +
- + /* Get sign of op1 into r12 */
- + mov r12, r11
- + andh r12, 0x8000, COH
- +
- + /* Remove sign from operands */
- + cbr r11, 31
- + cbr r9, 31
- +
- + /* Put the number with the largest exponent in [r11, r10]
- + and the number with the smallest exponent in [r9, r8] */
- + cp r11, r9
- + brhs 1f /* Skip swap if operands already correctly ordered */
- + /* Operands were not correctly ordered, swap them */
- + mov r7, r11
- + mov r11, r9
- + mov r9, r7
- + mov r7, r10
- + mov r10, r8
- + mov r8, r7
- +1:
- + mov lr, 0 /* Set sticky bits to zero */
- + /* Unpack largest operand - opH */
- + /* exp: r7 */
- + /* sf: r11, r10 */
- + bfextu R7, R11, 20, 11 /* Extract exponent */
- + bfextu r11, r11, 0, 20 /* Extract mantissa */
- + sbr r11, 20 /* Insert implicit bit */
- +
- + /* Unpack smallest operand - opL */
- + /* exp: r6 */
- + /* sf: r9, r8 */
- + bfextu R6, R9, 20, 11 /* Extract exponent */
- + breq __avr32_f64_add_op2_subnormal
- + bfextu r9, r9, 0, 20 /* Extract mantissa */
- + sbr r9, 20 /* Insert implicit bit */
- +
- +2:
- + /* opH is NaN or Inf. */
- + cp.w r7, 0x7ff
- + breq __avr32_f64_add_opH_nan_or_inf
- +
- + /* Get shift amount to scale mantissa of op2. */
- + rsub r6, r7
- + breq __avr32_f64_add_shift_done /* No need to shift, exponents are equal*/
- +
- + /* Scale mantissa [r9, r8] with amount [r6].
- + Uses scratch registers [r5] and [lr].
- + In IEEE mode:Must not forget the sticky bits we intend to shift out. */
- + rsub r5,r6,32 /* get (32 - shift count)
- + (if shift count > 32 we get a
- + negative value, but that will
- + work as well in the code below.) */
- +
- + cp.w r6,32 /* handle shifts >= 32 separately */
- + brhs __avr32_f64_add_longshift
- +
- + /* small (<32) shift amount, both words are part of the shift
- + first remember whether part that is lost contains any 1 bits ... */
- + lsl lr,r8,r5 /* shift away bits that are part of
- + final mantissa. only part that goes
- + to lr are bits that will be lost */
- +
- + /* ... and now to the actual shift */
- + lsl r5,r9,r5 /* get bits from msw destined for lsw*/
- + lsr r8,r8,r6 /* shift down lsw of mantissa */
- + lsr r9,r9,r6 /* shift down msw of mantissa */
- + or r8,r5 /* combine these bits with prepared lsw*/
- +
- +__avr32_f64_add_shift_done:
- + /* Now add the mantissas. */
- + add r10, r8
- + adc r11, r11, r9
- +
- + /* Check if we overflowed. */
- + bld r11, 21
- + breq __avr32_f64_add_res_of:
- +
- +__avr32_f64_add_res_of_done:
- +
- + /* Pack final result*/
- + /* Input: [r7]:exp, [r11, r10]:mant, [r12]:sign in MSB */
- + /* Result in [r11,r10] */
- + /* Insert exponent and sign bit*/
- + bfins r11, r7, 20, 11
- + or r11, r12
- +
- + /* Round */
- +__avr32_f64_add_round:
- +#if defined(L_avr32_f64_addsub)
- + bfextu r12, r10, 0, 1 /* Extract parity bit.*/
- + or lr, r12 /* or it together with the sticky bits. */
- + eorh lr, 0x8000 /* Toggle round bit. */
- + /* We should now round up by adding one for the following cases:
- +
- + halfway sticky|parity round-up
- + 0 x no
- + 1 0 no
- + 1 1 yes
- +
- + Since we have inverted the halfway bit we can use the satu instruction
- + by saturating to 1 bit to implement this.
- + */
- + satu lr >> 0, 1
- +#else
- + lsr lr, 31
- +#endif
- + add r10, lr
- + acr r11
- +
- + /* Return result in [r11,r10] */
- + ldm sp++, r5, r6, r7,pc
- +
- +
- +__avr32_f64_add_opH_nan_or_inf:
- + /* Check if opH is NaN, if so return NaN */
- + cbr r11, 20
- + or lr, r11, r10
- + brne __avr32_f64_add_return_nan
- +
- + /* opH is Inf. */
- + /* Check if opL is Inf. or NaN */
- + cp.w r6, 0x7ff
- + breq __avr32_f64_add_opL_nan_or_inf
- + ldm sp++, r5, r6, r7, pc/* opL not Inf or NaN, return opH */
- +__avr32_f64_add_opL_nan_or_inf:
- + cbr r9, 20
- + or lr, r9, r8
- + brne __avr32_f64_add_return_nan
- + mov r10, 0 /* Generate Inf in r11, r10 */
- + mov_imm r11, 0x7ff00000
- + or r11, r12 /* Put sign bit back */
- + ldm sp++, r5, r6, r7, pc/* opL Inf, return Inf */
- +__avr32_f64_add_return_nan:
- + mov r10, -1 /* Generate NaN in r11, r10 */
- + mov r11, -1
- + ldm sp++, r5, r6, r7, pc/* opL Inf or NaN, return NaN */
- +
- +
- +__avr32_f64_add_longshift:
- + /* large (>=32) shift amount, only lsw will have bits left after shift.
- + note that shift operations will use ((shift count=r6) mod 32) so
- + we do not need to subtract 32 from shift count. */
- + /* Saturate the shift amount to 63. If the amount
- + is any larger op2 is insignificant. */
- + satu r6 >> 0, 6
- + /* If shift amount is 32 there are no bits from the msw that are lost. */
- + moveq lr, r8
- + breq 0f
- + /* first remember whether part that is lost contains any 1 bits ... */
- + lsl lr,r9,r5 /* save all lost bits from msw */
- +#if defined(L_avr32_f64_addsub)
- + cp.w r8, 0
- + srne r8
- + or lr,r8 /* also save lost bits (all) from lsw
- + now lr != 0 if we lose any bits */
- +#endif
- +0:
- + /* ... and now to the actual shift */
- + lsr r8,r9,r6 /* msw -> lsw and make rest of shift inside lsw*/
- + mov r9,0 /* clear msw */
- + rjmp __avr32_f64_add_shift_done
- +
- +__avr32_f64_add_res_of:
- + /* We overflowed. Scale down mantissa by shifting right one position. */
- + or lr, lr, lr << 1 /* Remember stickybits*/
- + lsr r11, 1
- + ror r10
- + ror lr
- + sub r7, -1 /* Increment exponent */
- +
- + /* Clear mantissa to set result to Inf if the exponent is 255. */
- + cp.w r7, 0x7ff
- + moveq r10, 0
- + moveq r11, 0
- + moveq lr, 0
- + rjmp __avr32_f64_add_res_of_done
- +
- +__avr32_f64_add_op2_subnormal:
- + /* Set epxponent to 1 */
- + mov r6, 1
- +
- + /* Check if op2 is also subnormal. */
- + cp.w r7, 0
- + brne 2b
- +
- + cbr r11, 20
- + /* Both operands are subnormal. Just addd the mantissas
- + and the exponent will automatically be set to 1 if
- + we overflow into a normal number. */
- + add r10, r8
- + adc r11, r11, r9
- +
- + /* Add sign bit */
- + or r11, r12
- +
- + /* Return result in [r11,r10] */
- + ldm sp++, r5, r6, r7,pc
- +
- +
- +
- +#endif
- +
- +#ifdef L_avr32_f64_to_u32
- + /* This goes into L_fixdfsi */
- +#endif
- +
- +
- +#ifdef L_avr32_f64_to_s32
- + .global __avr32_f64_to_u32
- + .type __avr32_f64_to_u32,@function
- +__avr32_f64_to_u32:
- + cp.w r11, 0
- + retmi 0 /* Negative returns 0 */
- +
- + /* Fallthrough to df to signed si conversion */
- + .global __avr32_f64_to_s32
- + .type __avr32_f64_to_s32,@function
- +__avr32_f64_to_s32:
- + lsl r12,r11,1
- + lsr r12,21 /* extract exponent*/
- + sub r12,1023 /* convert to unbiased exponent.*/
- + retlo 0 /* too small exponent implies zero. */
- +
- +1:
- + rsub r12,r12,31 /* shift count = 31 - exponent */
- + mov r9,r11 /* save sign for later...*/
- + lsl r11,11 /* remove exponent and sign*/
- + sbr r11,31 /* add implicit bit*/
- + or r11,r11,r10>>21 /* get rest of bits from lsw of double */
- + lsr r11,r11,r12 /* shift down mantissa to final place */
- + lsl r9,1 /* sign -> carry */
- + retcc r11 /* if positive, we are done */
- + neg r11 /* if negative float, negate result */
- + ret r11
- +
- +#endif /* L_fixdfsi*/
- +
- +#ifdef L_avr32_f64_to_u64
- + /* Actual function is in L_fixdfdi */
- +#endif
- +
- +#ifdef L_avr32_f64_to_s64
- + .global __avr32_f64_to_u64
- + .type __avr32_f64_to_u64,@function
- +__avr32_f64_to_u64:
- + cp.w r11,0
- + /* Negative numbers return zero */
- + movmi r10, 0
- + movmi r11, 0
- + retmi r11
- +
- +
- +
- + /* Fallthrough */
- + .global __avr32_f64_to_s64
- + .type __avr32_f64_to_s64,@function
- +__avr32_f64_to_s64:
- + lsl r9,r11,1
- + lsr r9,21 /* get exponent*/
- + sub r9,1023 /* convert to correct range*/
- + /* Return zero if exponent to small */
- + movlo r10, 0
- + movlo r11, 0
- + retlo r11
- +
- + mov r8,r11 /* save sign for later...*/
- +1:
- + lsl r11,11 /* remove exponent */
- + sbr r11,31 /* add implicit bit*/
- + or r11,r11,r10>>21 /* get rest of bits from lsw of double*/
- + lsl r10,11 /* align lsw correctly as well */
- + rsub r9,r9,63 /* shift count = 63 - exponent */
- + breq 1f
- +
- + cp.w r9,32 /* is shift count more than one reg? */
- + brhs 0f
- +
- + mov r12,r11 /* save msw */
- + lsr r10,r10,r9 /* small shift count, shift down lsw */
- + lsr r11,r11,r9 /* small shift count, shift down msw */
- + rsub r9,r9,32 /* get 32-size of shifted out tail */
- + lsl r12,r12,r9 /* align part to move from msw to lsw */
- + or r10,r12 /* combine to get new lsw */
- + rjmp 1f
- +
- +0:
- + lsr r10,r11,r9 /* large shift count,only lsw get bits
- + note that shift count is modulo 32*/
- + mov r11,0 /* msw will be 0 */
- +
- +1:
- + lsl r8,1 /* sign -> carry */
- + retcc r11 /* if positive, we are done */
- +
- + neg r11 /* if negative float, negate result */
- + neg r10
- + scr r11
- + ret r11
- +
- +#endif
- +
- +#ifdef L_avr32_u32_to_f64
- + /* Code located in L_floatsidf */
- +#endif
- +
- +#ifdef L_avr32_s32_to_f64
- + .global __avr32_u32_to_f64
- + .type __avr32_u32_to_f64,@function
- +__avr32_u32_to_f64:
- + sub r11, r12, 0 /* Move to r11 and force Z flag to be updated */
- + mov r12, 0 /* always positive */
- + rjmp 0f /* Jump to common code for floatsidf */
- +
- + .global __avr32_s32_to_f64
- + .type __avr32_s32_to_f64,@function
- +__avr32_s32_to_f64:
- + mov r11, r12 /* Keep original value in r12 for sign */
- + abs r11 /* Absolute value if r12 */
- +0:
- + mov r10,0 /* let remaining bits be zero */
- + reteq r11 /* zero long will return zero float */
- +
- + pushm lr
- + mov r9,31+1023 /* set exponent */
- +
- + normalize_df r9 /*exp*/, r10, r11 /* mantissa */, r8, lr /* scratch */
- +
- + /* Check if a subnormal result was created */
- + cp.w r9, 0
- + brgt 0f
- +
- + adjust_subnormal_df r9 /* exp */, r10, r11 /* Mantissa */, r12 /*sign*/, r8, lr /* scratch */
- + popm pc
- +0:
- +
- + /* Round result */
- + round_df r9 /*exp*/, r10, r11 /* Mantissa */, r8 /*scratch*/
- + cp.w r9,0x7ff
- + brlt 0f
- + /*Return infinity */
- + mov r10, 0
- + mov_imm r11, 0xffe00000
- + rjmp __floatsidf_return_op1
- +
- +0:
- +
- + /* Pack */
- + pack_df r9 /*exp*/, r10, r11 /* mantissa */, r10, r11 /* Output df number*/
- +__floatsidf_return_op1:
- + lsl r12,1 /* shift in sign bit */
- + ror r11
- +
- + popm pc
- +#endif
- +
- +
- +#ifdef L_avr32_f32_cmp_eq
- + .global __avr32_f32_cmp_eq
- + .type __avr32_f32_cmp_eq,@function
- +__avr32_f32_cmp_eq:
- + cp.w r12, r11
- + breq 0f
- + /* If not equal check for +/-0 */
- + /* Or together the two values and shift out the sign bit.
- + If the result is zero, then the two values are both zero. */
- + or r12, r11
- + lsl r12, 1
- + reteq 1
- + ret 0
- +0:
- + /* Numbers were equal. Check for NaN or Inf */
- + mov_imm r11, 0xff000000
- + lsl r12, 1
- + cp.w r12, r11
- + retls 1 /* 0 if NaN, 1 otherwise */
- + ret 0
- +#endif
- +
- +#if defined(L_avr32_f32_cmp_ge) || defined(L_avr32_f32_cmp_lt)
- +#ifdef L_avr32_f32_cmp_ge
- + .global __avr32_f32_cmp_ge
- + .type __avr32_f32_cmp_ge,@function
- +__avr32_f32_cmp_ge:
- +#endif
- +#ifdef L_avr32_f32_cmp_lt
- + .global __avr32_f32_cmp_lt
- + .type __avr32_f32_cmp_lt,@function
- +__avr32_f32_cmp_lt:
- +#endif
- + lsl r10, r12, 1 /* Remove sign bits */
- + lsl r9, r11, 1
- + subfeq r10, 0
- +#ifdef L_avr32_f32_cmp_ge
- + reteq 1 /* Both number are zero. Return true. */
- +#endif
- +#ifdef L_avr32_f32_cmp_lt
- + reteq 0 /* Both number are zero. Return false. */
- +#endif
- + mov_imm r8, 0xff000000
- + cp.w r10, r8
- + rethi 0 /* Op0 is NaN */
- + cp.w r9, r8
- + rethi 0 /* Op1 is Nan */
- +
- + eor r8, r11, r12
- + bld r12, 31
- +#ifdef L_avr32_f32_cmp_ge
- + srcc r8 /* Set result to true if op0 is positive*/
- +#endif
- +#ifdef L_avr32_f32_cmp_lt
- + srcs r8 /* Set result to true if op0 is negative*/
- +#endif
- + retmi r8 /* Return if signs are different */
- + brcs 0f /* Both signs negative? */
- +
- + /* Both signs positive */
- + cp.w r12, r11
- +#ifdef L_avr32_f32_cmp_ge
- + reths 1
- + retlo 0
- +#endif
- +#ifdef L_avr32_f32_cmp_lt
- + reths 0
- + retlo 1
- +#endif
- +0:
- + /* Both signs negative */
- + cp.w r11, r12
- +#ifdef L_avr32_f32_cmp_ge
- + reths 1
- + retlo 0
- +#endif
- +#ifdef L_avr32_f32_cmp_lt
- + reths 0
- + retlo 1
- +#endif
- +#endif
- +
- +
- +#ifdef L_avr32_f64_cmp_eq
- + .global __avr32_f64_cmp_eq
- + .type __avr32_f64_cmp_eq,@function
- +__avr32_f64_cmp_eq:
- + cp.w r10,r8
- + cpc r11,r9
- + breq 0f
- +
- + /* Args were not equal*/
- + /* Both args could be zero with different sign bits */
- + lsl r11,1 /* get rid of sign bits */
- + lsl r9,1
- + or r11,r10 /* Check if all bits are zero */
- + or r11,r9
- + or r11,r8
- + reteq 1 /* If all zeros the arguments are equal
- + so return 1 else return 0 */
- + ret 0
- +0:
- + /* check for NaN */
- + lsl r11,1
- + mov_imm r12, 0xffe00000
- + cp.w r10,0
- + cpc r11,r12 /* check if nan or inf */
- + retls 1 /* If Arg is NaN return 0 else 1*/
- + ret 0 /* Return */
- +
- +#endif
- +
- +
- +#if defined(L_avr32_f64_cmp_ge) || defined(L_avr32_f64_cmp_lt)
- +
- +#ifdef L_avr32_f64_cmp_ge
- + .global __avr32_f64_cmp_ge
- + .type __avr32_f64_cmp_ge,@function
- +__avr32_f64_cmp_ge:
- +#endif
- +#ifdef L_avr32_f64_cmp_lt
- + .global __avr32_f64_cmp_lt
- + .type __avr32_f64_cmp_lt,@function
- +__avr32_f64_cmp_lt:
- +#endif
- +
- + /* compare magnitude of op1 and op2 */
- + st.w --sp, lr
- + st.w --sp, r7
- + lsl r11,1 /* Remove sign bit of op1 */
- + srcs r12 /* Sign op1 to lsb of r12*/
- + lsl r9,1 /* Remove sign bit of op2 */
- + srcs r7
- + rol r12 /* Sign op2 to lsb of lr, sign bit op1 bit 1 of r12*/
- +
- +
- + /* Check for Nan */
- + mov_imm lr, 0xffe00000
- + cp.w r10,0
- + cpc r11,lr
- + brhi 0f /* We have NaN */
- + cp.w r8,0
- + cpc r9,lr
- + brhi 0f /* We have NaN */
- +
- + cp.w r11, 0
- + subfeq r10, 0
- + breq 3f /* op1 zero */
- + ld.w r7, sp++
- + ld.w lr, sp++
- +
- + cp.w r12,3 /* both operands negative ?*/
- + breq 1f
- +
- + cp.w r12,1 /* both operands positive? */
- + brlo 2f
- +
- + /* Different signs. If sign of op1 is negative the difference
- + between op1 and op2 will always be negative, and if op1 is
- + positive the difference will always be positive */
- +#ifdef L_avr32_f64_cmp_ge
- + reteq 1
- + retne 0
- +#endif
- +#ifdef L_avr32_f64_cmp_lt
- + reteq 0
- + retne 1
- +#endif
- +
- +2:
- + /* Both operands positive. Just compute the difference */
- + cp.w r10,r8
- + cpc r11,r9
- +#ifdef L_avr32_f64_cmp_ge
- + reths 1
- + retlo 0
- +#endif
- +#ifdef L_avr32_f64_cmp_lt
- + reths 0
- + retlo 1
- +#endif
- +
- +1:
- + /* Both operands negative. Compute the difference with operands switched */
- + cp r8,r10
- + cpc r9,r11
- +#ifdef L_avr32_f64_cmp_ge
- + reths 1
- + retlo 0
- +#endif
- +#ifdef L_avr32_f64_cmp_lt
- + reths 0
- + retlo 1
- +#endif
- +
- +0:
- + ld.w r7, sp++
- + popm pc, r12=0
- +
- +3:
- + cp.w r7, 1 /* Check sign bit from r9 */
- +#ifdef L_avr32_f64_cmp_ge
- + sreq r12 /* If op2 is negative then op1 >= op2. */
- +#endif
- +#ifdef L_avr32_f64_cmp_lt
- + srne r12 /* If op2 is positve then op1 <= op2. */
- +#endif
- + cp.w r9, 0
- + subfeq r8, 0
- + ld.w r7, sp++
- + ld.w lr, sp++
- +#ifdef L_avr32_f64_cmp_ge
- + reteq 1 /* Both operands are zero. Return true. */
- +#endif
- +#ifdef L_avr32_f64_cmp_lt
- + reteq 0 /* Both operands are zero. Return false. */
- +#endif
- + ret r12
- +#endif
- +
- +#if defined(L_avr32_f64_div) || defined(L_avr32_f64_div_fast)
- + .align 2
- +
- +#if defined(L_avr32_f64_div_fast)
- + .global __avr32_f64_div_fast
- + .type __avr32_f64_div_fast,@function
- +__avr32_f64_div_fast:
- +#else
- + .global __avr32_f64_div
- + .type __avr32_f64_div,@function
- +__avr32_f64_div:
- +#endif
- + stm --sp, r0, r1, r2, r3, r4, r5, r6, r7,lr
- + /* op1 in {r11,r10}*/
- + /* op2 in {r9,r8}*/
- + eor lr, r11, r9 /* MSB(lr) = Sign(op1) ^ Sign(op2) */
- +
- +
- + /* Unpack op1 to 2.62 format*/
- + /* exp: r7 */
- + /* sf: r11, r10 */
- + lsr r7, r11, 20 /* Extract exponent */
- +
- + lsl r11, 9 /* Extract mantissa, leave room for implicit bit */
- + or r11, r11, r10>>23
- + lsl r10, 9
- + sbr r11, 29 /* Insert implicit bit */
- + andh r11, 0x3fff /*Mask last part of exponent since we use 2.62 format*/
- +
- + cbr r7, 11 /* Clear sign bit */
- + /* Check if normalization is needed */
- + breq 11f /*If number is subnormal, normalize it */
- +22:
- + cp r7, 0x7ff
- + brge 2f /* Check op1 for NaN or Inf */
- +
- + /* Unpack op2 to 2.62 format*/
- + /* exp: r6 */
- + /* sf: r9, r8 */
- + lsr r6, r9, 20 /* Extract exponent */
- +
- + lsl r9, 9 /* Extract mantissa, leave room for implicit bit */
- + or r9, r9, r8>>23
- + lsl r8, 9
- + sbr r9, 29 /* Insert implicit bit */
- + andh r9, 0x3fff /*Mask last part of exponent since we use 2.62 format*/
- +
- + cbr r6, 11 /* Clear sign bit */
- + /* Check if normalization is needed */
- + breq 13f /*If number is subnormal, normalize it */
- +23:
- + cp r6, 0x7ff
- + brge 3f /* Check op2 for NaN or Inf */
- +
- + /* Calculate new exponent */
- + sub r7, r6
- + sub r7,-1023
- +
- + /* Divide */
- + /* Approximating 1/d with the following recurrence: */
- + /* R[j+1] = R[j]*(2-R[j]*d) */
- + /* Using 2.62 format */
- + /* TWO: r12 */
- + /* d = op2 = divisor (2.62 format): r9,r8 */
- + /* Multiply result : r5, r4 */
- + /* Initial guess : r3, r2 */
- + /* New approximations : r3, r2 */
- + /* op1 = Dividend (2.62 format) : r11, r10 */
- +
- + mov_imm r12, 0x80000000
- +
- + /* Load initial guess, using look-up table */
- + /* Initial guess is of format 01.XY, where XY is constructed as follows: */
- + /* Let d be of following format: 00.1xy....., then XY=~xy */
- + /* For d=00.100 = 0,5 -> initial guess=01.11 = 1,75 */
- + /* For d=00.101 = 0,625 -> initial guess=01.11 = 1,5 */
- + /* For d=00.110 = 0,75 -> initial guess=01.11 = 1,25 */
- + /* For d=00.111 = 0,875 -> initial guess=01.11 = 1,0 */
- + /* r2 is also part of the reg pair forming initial guess, but it*/
- + /* is kept uninitialized to save one cycle since it has so low significance*/
- +
- + lsr r3, r12, 1
- + bfextu r4, r9, 27, 2
- + com r4
- + bfins r3, r4, 28, 2
- +
- + /* First approximation */
- + /* Approximating to 32 bits */
- + /* r5 = R[j]*d */
- + mulu.d r4, r3, r9
- + /* r5 = 2-R[j]*d */
- + sub r5, r12, r5<<2
- + /* r3 = R[j]*(2-R[j]*d) */
- + mulu.d r4, r3, r5
- + lsl r3, r5, 2
- +
- + /* Second approximation */
- + /* Approximating to 32 bits */
- + /* r5 = R[j]*d */
- + mulu.d r4, r3, r9
- + /* r5 = 2-R[j]*d */
- + sub r5, r12, r5<<2
- + /* r3 = R[j]*(2-R[j]*d) */
- + mulu.d r4, r3, r5
- + lsl r3, r5, 2
- +
- + /* Third approximation */
- + /* Approximating to 32 bits */
- + /* r5 = R[j]*d */
- + mulu.d r4, r3, r9
- + /* r5 = 2-R[j]*d */
- + sub r5, r12, r5<<2
- + /* r3 = R[j]*(2-R[j]*d) */
- + mulu.d r4, r3, r5
- + lsl r3, r5, 2
- +
- + /* Fourth approximation */
- + /* Approximating to 64 bits */
- + /* r5,r4 = R[j]*d */
- + mul_approx_df r3 /*ah*/, r2 /*al*/, r9 /*bh*/, r8 /*bl*/, r5 /*rh*/, r4 /*rl*/, r1 /*sh*/, r0 /*sl*/
- + lsl r5, 2
- + or r5, r5, r4>>30
- + lsl r4, 2
- + /* r5,r4 = 2-R[j]*d */
- + neg r4
- + sbc r5, r12, r5
- + /* r3,r2 = R[j]*(2-R[j]*d) */
- + mul_approx_df r3 /*ah*/, r2 /*al*/, r5 /*bh*/, r4 /*bl*/, r5 /*rh*/, r4 /*rl*/, r1 /*sh*/, r0 /*sl*/
- + lsl r3, r5, 2
- + or r3, r3, r4>>30
- + lsl r2, r4, 2
- +
- +
- + /* Fifth approximation */
- + /* Approximating to 64 bits */
- + /* r5,r4 = R[j]*d */
- + mul_approx_df r3 /*ah*/, r2 /*al*/, r9 /*bh*/, r8 /*bl*/, r5 /*rh*/, r4 /*rl*/, r1 /*sh*/, r0 /*sl*/
- + lsl r5, 2
- + or r5, r5, r4>>30
- + lsl r4, 2
- + /* r5,r4 = 2-R[j]*d */
- + neg r4
- + sbc r5, r12, r5
- + /* r3,r2 = R[j]*(2-R[j]*d) */
- + mul_approx_df r3 /*ah*/, r2 /*al*/, r5 /*bh*/, r4 /*bl*/, r5 /*rh*/, r4 /*rl*/, r1 /*sh*/, r0 /*sl*/
- + lsl r3, r5, 2
- + or r3, r3, r4>>30
- + lsl r2, r4, 2
- +
- +
- + /* Multiply with dividend to get quotient */
- + mul_approx_df r3 /*ah*/, r2 /*al*/, r11 /*bh*/, r10 /*bl*/, r3 /*rh*/, r2 /*rl*/, r1 /*sh*/, r0 /*sl*/
- +
- +
- + /* To increase speed, this result is not corrected before final rounding.*/
- + /* This may give a difference to IEEE compliant code of 1 ULP.*/
- +
- +
- + /* Adjust exponent and mantissa */
- + /* r7:exp, [r3, r2]:mant, [r5, r4]:scratch*/
- + /* Mantissa may be of the format 0.xxxx or 1.xxxx. */
- + /* In the first case, shift one pos to left.*/
- + bld r3, 31-3
- + breq 0f
- + lsl r2, 1
- + rol r3
- + sub r7, 1
- +#if defined(L_avr32_f64_div)
- + /* We must scale down the dividend to 5.59 format. */
- + lsr r10, 3
- + or r10, r10, r11 << 29
- + lsr r11, 3
- + rjmp 1f
- +#endif
- +0:
- +#if defined(L_avr32_f64_div)
- + /* We must scale down the dividend to 6.58 format. */
- + lsr r10, 4
- + or r10, r10, r11 << 28
- + lsr r11, 4
- +1:
- +#endif
- + cp r7, 0
- + brle __avr32_f64_div_res_subnormal /* Result was subnormal. */
- +
- +
- +#if defined(L_avr32_f64_div)
- + /* In order to round correctly we calculate the remainder:
- + Remainder = dividend[11:r10] - divisor[r9:r8]*quotient[r3:r2]
- + for the case when the quotient is halfway between the round-up
- + value and the round down value. If the remainder then is negative
- + it means that the quotient was to big and that it should not be
- + rounded up, if the remainder is positive the quotient was to small
- + and we need to round up. If the remainder is zero it means that the
- + quotient is exact but since we need to remove the guard bit we should
- + round to even. */
- +
- + /* Truncate and add guard bit. */
- + andl r2, 0xff00
- + orl r2, 0x0080
- +
- +
- + /* Now do the multiplication. The quotient has the format 4.60
- + while the divisor has the format 2.62 which gives a result
- + of 6.58 */
- + mulu.d r0, r3, r8
- + macu.d r0, r2, r9
- + mulu.d r4, r2, r8
- + mulu.d r8, r3, r9
- + add r5, r0
- + adc r8, r8, r1
- + acr r9
- +
- +
- + /* Check if remainder is positive, negative or equal. */
- + bfextu r12, r2, 8, 1 /* Get parity bit into bit 0 of r0 */
- + cp r4, 0
- + cpc r5
- +__avr32_f64_div_round_subnormal:
- + cpc r8, r10
- + cpc r9, r11
- + srlo r6 /* Remainder positive: we need to round up.*/
- + moveq r6, r12 /* Remainder zero: round up if mantissa odd. */
- +#else
- + bfextu r6, r2, 7, 1 /* Get guard bit */
- +#endif
- + /* Final packing, scale down mantissa. */
- + lsr r10, r2, 8
- + or r10, r10, r3<<24
- + lsr r11, r3, 8
- + /* Insert exponent and sign bit*/
- + bfins r11, r7, 20, 11
- + bld lr, 31
- + bst r11, 31
- +
- + /* Final rounding */
- + add r10, r6
- + acr r11
- +
- + /* Return result in [r11,r10] */
- + ldm sp++, r0, r1, r2, r3, r4, r5, r6, r7,pc
- +
- +
- +2:
- + /* Op1 is NaN or inf */
- + andh r11, 0x000f /* Extract mantissa */
- + or r11, r10
- + brne 16f /* Return NaN if op1 is NaN */
- + /* Op1 is inf check op2 */
- + lsr r6, r9, 20 /* Extract exponent */
- + cbr r6, 11 /* Clear sign bit */
- + cp r6, 0x7ff
- + brne 17f /* Inf/number gives inf, return inf */
- + rjmp 16f /* The rest gives NaN*/
- +
- +3:
- + /* Op1 is a valid number. Op 2 is NaN or inf */
- + andh r9, 0x000f /* Extract mantissa */
- + or r9, r8
- + brne 16f /* Return NaN if op2 is NaN */
- + rjmp 15f /* Op2 was inf, return zero*/
- +
- +11: /* Op1 was denormal. Fix it. */
- + lsl r11, 3
- + or r11, r11, r10 >> 29
- + lsl r10, 3
- + /* Check if op1 is zero. */
- + or r4, r10, r11
- + breq __avr32_f64_div_op1_zero
- + normalize_df r7 /*exp*/, r10, r11 /*Mantissa*/, r4, r5 /*scratch*/
- + lsr r10, 2
- + or r10, r10, r11 << 30
- + lsr r11, 2
- + rjmp 22b
- +
- +
- +13: /* Op2 was denormal. Fix it */
- + lsl r9, 3
- + or r9, r9, r8 >> 29
- + lsl r8, 3
- + /* Check if op2 is zero. */
- + or r4, r9, r8
- + breq 17f /* Divisor is zero -> return Inf */
- + normalize_df r6 /*exp*/, r8, r9 /*Mantissa*/, r4, r5 /*scratch*/
- + lsr r8, 2
- + or r8, r8, r9 << 30
- + lsr r9, 2
- + rjmp 23b
- +
- +
- +__avr32_f64_div_res_subnormal:/* Divide result was subnormal. */
- +#if defined(L_avr32_f64_div)
- + /* Check how much we must scale down the mantissa. */
- + neg r7
- + sub r7, -1 /* We do no longer have an implicit bit. */
- + satu r7 >> 0, 6 /* Saturate shift amount to max 63. */
- + cp.w r7, 32
- + brge 0f
- + /* Shift amount <32 */
- + /* Scale down quotient */
- + rsub r6, r7, 32
- + lsr r2, r2, r7
- + lsl r12, r3, r6
- + or r2, r12
- + lsr r3, r3, r7
- + /* Scale down the dividend to match the scaling of the quotient. */
- + lsl r1, r10, r6
- + lsr r10, r10, r7
- + lsl r12, r11, r6
- + or r10, r12
- + lsr r11, r11, r7
- + mov r0, 0
- + rjmp 1f
- +0:
- + /* Shift amount >=32 */
- + rsub r6, r7, 32
- + moveq r0, 0
- + moveq r12, 0
- + breq 0f
- + lsl r0, r10, r6
- + lsl r12, r11, r6
- +0:
- + lsr r2, r3, r7
- + mov r3, 0
- + /* Scale down the dividend to match the scaling of the quotient. */
- + lsr r1, r10, r7
- + or r1, r12
- + lsr r10, r11, r7
- + mov r11, 0
- +1:
- + /* Start performing the same rounding as done for normal numbers
- + but this time we have scaled the quotient and dividend and hence
- + need a little different comparison. */
- + /* Truncate and add guard bit. */
- + andl r2, 0xff00
- + orl r2, 0x0080
- +
- + /* Now do the multiplication. */
- + mulu.d r6, r3, r8
- + macu.d r6, r2, r9
- + mulu.d r4, r2, r8
- + mulu.d r8, r3, r9
- + add r5, r6
- + adc r8, r8, r7
- + acr r9
- +
- + /* Set exponent to 0 */
- + mov r7, 0
- +
- + /* Check if remainder is positive, negative or equal. */
- + bfextu r12, r2, 8, 1 /* Get parity bit into bit 0 of r0 */
- + cp r4, r0
- + cpc r5, r1
- + /* Now the rest of the rounding is the same as for normals. */
- + rjmp __avr32_f64_div_round_subnormal
- +
- +#endif
- +15:
- + /* Flush to zero for the fast version. */
- + mov r11, lr /*Get correct sign*/
- + andh r11, 0x8000, COH
- + mov r10, 0
- + ldm sp++, r0, r1, r2, r3, r4, r5, r6, r7,pc
- +
- +16: /* Return NaN. */
- + mov r11, -1
- + mov r10, 0
- + ldm sp++, r0, r1, r2, r3, r4, r5, r6, r7,pc
- +
- +17:
- + /* Check if op1 is zero. */
- + or r4, r10, r11
- + breq __avr32_f64_div_op1_zero
- + /* Return INF. */
- + mov r11, lr /*Get correct sign*/
- + andh r11, 0x8000, COH
- + orh r11, 0x7ff0
- + mov r10, 0
- + ldm sp++, r0, r1, r2, r3, r4, r5, r6, r7,pc
- +
- +__avr32_f64_div_op1_zero:
- + or r5, r8, r9 << 1
- + breq 16b /* 0.0/0.0 -> NaN */
- + bfextu r4, r9, 20, 11
- + cp r4, 0x7ff
- + brne 15b /* Return zero */
- + /* Check if divisor is Inf or NaN */
- + or r5, r8, r9 << 12
- + breq 15b /* Divisor is inf -> return zero */
- + rjmp 16b /* Return NaN */
- +
- +
- +
- +
- +#endif
- +
- +#if defined(L_avr32_f32_addsub) || defined(L_avr32_f32_addsub_fast)
- +
- + .align 2
- +__avr32_f32_sub_from_add:
- + /* Switch sign on op2 */
- + eorh r11, 0x8000
- +
- +#if defined(L_avr32_f32_addsub_fast)
- + .global __avr32_f32_sub_fast
- + .type __avr32_f32_sub_fast,@function
- +__avr32_f32_sub_fast:
- +#else
- + .global __avr32_f32_sub
- + .type __avr32_f32_sub,@function
- +__avr32_f32_sub:
- +#endif
- +
- + /* Check signs */
- + eor r8, r11, r12
- + /* Different signs, use subtraction. */
- + brmi __avr32_f32_add_from_sub
- +
- + /* Get sign of op1 */
- + mov r8, r12
- + andh r12, 0x8000, COH
- +
- + /* Remove sign from operands */
- + cbr r11, 31
- +#if defined(L_avr32_f32_addsub_fast)
- + reteq r8 /* If op2 is zero return op1 */
- +#endif
- + cbr r8, 31
- +
- + /* Put the number with the largest exponent in r10
- + and the number with the smallest exponent in r9 */
- + max r10, r8, r11
- + min r9, r8, r11
- + cp r10, r8 /*If largest operand (in R10) is not equal to op1*/
- + subne r12, 1 /* Subtract 1 from sign, which will invert MSB of r12*/
- + andh r12, 0x8000, COH /*Mask all but MSB*/
- +
- + /* Unpack exponent and mantissa of op1 */
- + lsl r8, r10, 8
- + sbr r8, 31 /* Set implicit bit. */
- + lsr r10, 23
- +
- + /* op1 is NaN or Inf. */
- + cp.w r10, 0xff
- + breq __avr32_f32_sub_op1_nan_or_inf
- +
- + /* Unpack exponent and mantissa of op2 */
- + lsl r11, r9, 8
- + sbr r11, 31 /* Set implicit bit. */
- + lsr r9, 23
- +
- +#if defined(L_avr32_f32_addsub)
- + /* Keep sticky bit for correct IEEE rounding */
- + st.w --sp, r12
- +
- + /* op2 is either zero or subnormal. */
- + breq __avr32_f32_sub_op2_subnormal
- +0:
- + /* Get shift amount to scale mantissa of op2. */
- + sub r12, r10, r9
- +
- + breq __avr32_f32_sub_shift_done
- +
- + /* Saturate the shift amount to 31. If the amount
- + is any larger op2 is insignificant. */
- + satu r12 >> 0, 5
- +
- + /* Put the remaining bits into r9.*/
- + rsub r9, r12, 32
- + lsl r9, r11, r9
- +
- + /* If the remaining bits are non-zero then we must subtract one
- + more from opL. */
- + subne r8, 1
- + srne r9 /* LSB of r9 represents sticky bits. */
- +
- + /* Shift mantissa of op2 to same decimal point as the mantissa
- + of op1. */
- + lsr r11, r11, r12
- +
- +
- +__avr32_f32_sub_shift_done:
- + /* Now subtract the mantissas. */
- + sub r8, r11
- +
- + ld.w r12, sp++
- +
- + /* Normalize resulting mantissa. */
- + clz r11, r8
- +
- + retcs 0
- + lsl r8, r8, r11
- + sub r10, r11
- + brle __avr32_f32_sub_subnormal_result
- +
- + /* Insert the bits we will remove from the mantissa into r9[31:24] */
- + or r9, r9, r8 << 24
- +#else
- + /* Ignore sticky bit to simplify and speed up rounding */
- + /* op2 is either zero or subnormal. */
- + breq __avr32_f32_sub_op2_subnormal
- +0:
- + /* Get shift amount to scale mantissa of op2. */
- + rsub r9, r10
- +
- + /* Saturate the shift amount to 31. If the amount
- + is any larger op2 is insignificant. */
- + satu r9 >> 0, 5
- +
- + /* Shift mantissa of op2 to same decimal point as the mantissa
- + of op1. */
- + lsr r11, r11, r9
- +
- + /* Now subtract the mantissas. */
- + sub r8, r11
- +
- + /* Normalize resulting mantissa. */
- + clz r9, r8
- + retcs 0
- + lsl r8, r8, r9
- + sub r10, r9
- + brle __avr32_f32_sub_subnormal_result
- +#endif
- +
- + /* Pack result. */
- + or r12, r12, r8 >> 8
- + bfins r12, r10, 23, 8
- +
- + /* Round */
- +__avr32_f32_sub_round:
- +#if defined(L_avr32_f32_addsub)
- + mov_imm r10, 0x80000000
- + bld r12, 0
- + subne r10, -1
- + cp.w r9, r10
- + subhs r12, -1
- +#else
- + bld r8, 7
- + acr r12
- +#endif
- +
- + ret r12
- +
- +
- +__avr32_f32_sub_op2_subnormal:
- + /* Fix implicit bit and adjust exponent of subnormals. */
- + cbr r11, 31
- + /* Set exponent to 1 if we do not have a zero. */
- + movne r9,1
- +
- + /* Check if op1 is also subnormal. */
- + cp.w r10, 0
- + brne 0b
- +
- + cbr r8, 31
- + /* If op1 is not zero set exponent to 1. */
- + movne r10,1
- +
- + rjmp 0b
- +
- +__avr32_f32_sub_op1_nan_or_inf:
- + /* Check if op1 is NaN, if so return NaN */
- + lsl r11, r8, 1
- + retne -1
- +
- + /* op1 is Inf. */
- + bfins r12, r10, 23, 8 /* Generate Inf in r12 */
- +
- + /* Check if op2 is Inf. or NaN */
- + lsr r11, r9, 23
- + cp.w r11, 0xff
- + retne r12 /* op2 not Inf or NaN, return op1 */
- +
- + ret -1 /* op2 Inf or NaN, return NaN */
- +
- +__avr32_f32_sub_subnormal_result:
- + /* Check if the number is so small that
- + it will be represented with zero. */
- + rsub r10, r10, 9
- + rsub r11, r10, 32
- + retcs 0
- +
- + /* Shift the mantissa into the correct position.*/
- + lsr r10, r8, r10
- + /* Add sign bit. */
- + or r12, r10
- +
- + /* Put the shifted out bits in the most significant part
- + of r8. */
- + lsl r8, r8, r11
- +
- +#if defined(L_avr32_f32_addsub)
- + /* Add all the remainder bits used for rounding into r9 */
- + or r9, r8
- +#else
- + lsr r8, 24
- +#endif
- + rjmp __avr32_f32_sub_round
- +
- +
- + .align 2
- +
- +__avr32_f32_add_from_sub:
- + /* Switch sign on op2 */
- + eorh r11, 0x8000
- +
- +#if defined(L_avr32_f32_addsub_fast)
- + .global __avr32_f32_add_fast
- + .type __avr32_f32_add_fast,@function
- +__avr32_f32_add_fast:
- +#else
- + .global __avr32_f32_add
- + .type __avr32_f32_add,@function
- +__avr32_f32_add:
- +#endif
- +
- + /* Check signs */
- + eor r8, r11, r12
- + /* Different signs, use subtraction. */
- + brmi __avr32_f32_sub_from_add
- +
- + /* Get sign of op1 */
- + mov r8, r12
- + andh r12, 0x8000, COH
- +
- + /* Remove sign from operands */
- + cbr r11, 31
- +#if defined(L_avr32_f32_addsub_fast)
- + reteq r8 /* If op2 is zero return op1 */
- +#endif
- + cbr r8, 31
- +
- + /* Put the number with the largest exponent in r10
- + and the number with the smallest exponent in r9 */
- + max r10, r8, r11
- + min r9, r8, r11
- +
- + /* Unpack exponent and mantissa of op1 */
- + lsl r8, r10, 8
- + sbr r8, 31 /* Set implicit bit. */
- + lsr r10, 23
- +
- + /* op1 is NaN or Inf. */
- + cp.w r10, 0xff
- + breq __avr32_f32_add_op1_nan_or_inf
- +
- + /* Unpack exponent and mantissa of op2 */
- + lsl r11, r9, 8
- + sbr r11, 31 /* Set implicit bit. */
- + lsr r9, 23
- +
- +#if defined(L_avr32_f32_addsub)
- + /* op2 is either zero or subnormal. */
- + breq __avr32_f32_add_op2_subnormal
- +0:
- + /* Keep sticky bit for correct IEEE rounding */
- + st.w --sp, r12
- +
- + /* Get shift amount to scale mantissa of op2. */
- + rsub r9, r10
- +
- + /* Saturate the shift amount to 31. If the amount
- + is any larger op2 is insignificant. */
- + satu r9 >> 0, 5
- +
- + /* Shift mantissa of op2 to same decimal point as the mantissa
- + of op1. */
- + lsr r12, r11, r9
- +
- + /* Put the remainding bits into r11[23:..].*/
- + rsub r9, r9, (32-8)
- + lsl r11, r11, r9
- + /* Insert the bits we will remove from the mantissa into r11[31:24] */
- + bfins r11, r12, 24, 8
- +
- + /* Now add the mantissas. */
- + add r8, r12
- +
- + ld.w r12, sp++
- +#else
- + /* Ignore sticky bit to simplify and speed up rounding */
- + /* op2 is either zero or subnormal. */
- + breq __avr32_f32_add_op2_subnormal
- +0:
- + /* Get shift amount to scale mantissa of op2. */
- + rsub r9, r10
- +
- + /* Saturate the shift amount to 31. If the amount
- + is any larger op2 is insignificant. */
- + satu r9 >> 0, 5
- +
- + /* Shift mantissa of op2 to same decimal point as the mantissa
- + of op1. */
- + lsr r11, r11, r9
- +
- + /* Now add the mantissas. */
- + add r8, r11
- +
- +#endif
- + /* Check if we overflowed. */
- + brcs __avr32_f32_add_res_of
- +1:
- + /* Pack result. */
- + or r12, r12, r8 >> 8
- + bfins r12, r10, 23, 8
- +
- + /* Round */
- +#if defined(L_avr32_f32_addsub)
- + mov_imm r10, 0x80000000
- + bld r12, 0
- + subne r10, -1
- + cp.w r11, r10
- + subhs r12, -1
- +#else
- + bld r8, 7
- + acr r12
- +#endif
- +
- + ret r12
- +
- +__avr32_f32_add_op2_subnormal:
- + /* Fix implicit bit and adjust exponent of subnormals. */
- + cbr r11, 31
- + /* Set exponent to 1 if we do not have a zero. */
- + movne r9,1
- +
- + /* Check if op1 is also subnormal. */
- + cp.w r10, 0
- + brne 0b
- + /* Both operands subnormal, just add the mantissas and
- + pack. If the addition of the subnormal numbers results
- + in a normal number then the exponent will automatically
- + be set to 1 by the addition. */
- + cbr r8, 31
- + add r11, r8
- + or r12, r12, r11 >> 8
- + ret r12
- +
- +__avr32_f32_add_op1_nan_or_inf:
- + /* Check if op1 is NaN, if so return NaN */
- + lsl r11, r8, 1
- + retne -1
- +
- + /* op1 is Inf. */
- + bfins r12, r10, 23, 8 /* Generate Inf in r12 */
- +
- + /* Check if op2 is Inf. or NaN */
- + lsr r11, r9, 23
- + cp.w r11, 0xff
- + retne r12 /* op2 not Inf or NaN, return op1 */
- +
- + lsl r9, 9
- + reteq r12 /* op2 Inf return op1 */
- + ret -1 /* op2 is NaN, return NaN */
- +
- +__avr32_f32_add_res_of:
- + /* We overflowed. Increase exponent and shift mantissa.*/
- + lsr r8, 1
- + sub r10, -1
- +
- + /* Clear mantissa to set result to Inf if the exponent is 255. */
- + cp.w r10, 255
- + moveq r8, 0
- + moveq r11, 0
- + rjmp 1b
- +
- +
- +#endif
- +
- +
- +#if defined(L_avr32_f32_div) || defined(L_avr32_f32_div_fast)
- + .align 2
- +
- +#if defined(L_avr32_f32_div_fast)
- + .global __avr32_f32_div_fast
- + .type __avr32_f32_div_fast,@function
- +__avr32_f32_div_fast:
- +#else
- + .global __avr32_f32_div
- + .type __avr32_f32_div,@function
- +__avr32_f32_div:
- +#endif
- +
- + eor r8, r11, r12 /* MSB(r8) = Sign(op1) ^ Sign(op2) */
- +
- + /* Unpack */
- + lsl r12,1
- + lsl r11,1
- + breq 4f /* Check op2 for zero */
- +
- + tst r12, r12
- + moveq r9, 0
- + breq 12f
- +
- + /* Unpack op1*/
- + /* exp: r9 */
- + /* sf: r12 */
- + lsr r9, r12, 24
- + breq 11f /*If number is subnormal*/
- + cp r9, 0xff
- + brhs 2f /* Check op1 for NaN or Inf */
- + lsl r12, 7
- + sbr r12, 31 /*Implicit bit*/
- +12:
- +
- + /* Unpack op2*/
- + /* exp: r10 */
- + /* sf: r11 */
- + lsr r10, r11, 24
- + breq 13f /*If number is subnormal*/
- + cp r10, 0xff
- + brhs 3f /* Check op2 for NaN or Inf */
- + lsl r11,7
- + sbr r11, 31 /*Implicit bit*/
- +
- + cp.w r9, 0
- + subfeq r12, 0
- + reteq 0 /* op1 is zero and op2 is not zero */
- + /* or NaN so return zero */
- +
- +14:
- +
- + /* For UC3, store with predecrement is faster than stm */
- + st.w --sp, r5
- + st.d --sp, r6
- +
- + /* Calculate new exponent */
- + sub r9, r10
- + sub r9,-127
- +
- + /* Divide */
- + /* Approximating 1/d with the following recurrence: */
- + /* R[j+1] = R[j]*(2-R[j]*d) */
- + /* Using 2.30 format */
- + /* TWO: r10 */
- + /* d: r5 */
- + /* Multiply result : r6, r7 */
- + /* Initial guess : r11 */
- + /* New approximations : r11 */
- + /* Dividend : r12 */
- +
- + /* Load TWO */
- + mov_imm r10, 0x80000000
- +
- + lsr r12, 2 /* Get significand of Op1 in 2.30 format */
- + lsr r5, r11, 2 /* Get significand of Op2 (=d) in 2.30 format */
- +
- + /* Load initial guess, using look-up table */
- + /* Initial guess is of format 01.XY, where XY is constructed as follows: */
- + /* Let d be of following format: 00.1xy....., then XY=~xy */
- + /* For d=00.100 = 0,5 -> initial guess=01.11 = 1,75 */
- + /* For d=00.101 = 0,625 -> initial guess=01.11 = 1,5 */
- + /* For d=00.110 = 0,75 -> initial guess=01.11 = 1,25 */
- + /* For d=00.111 = 0,875 -> initial guess=01.11 = 1,0 */
- +
- + lsr r11, r10, 1
- + bfextu r6, r5, 27, 2
- + com r6
- + bfins r11, r6, 28, 2
- +
- + /* First approximation */
- + /* r7 = R[j]*d */
- + mulu.d r6, r11, r5
- + /* r7 = 2-R[j]*d */
- + sub r7, r10, r7<<2
- + /* r11 = R[j]*(2-R[j]*d) */
- + mulu.d r6, r11, r7
- + lsl r11, r7, 2
- +
- + /* Second approximation */
- + /* r7 = R[j]*d */
- + mulu.d r6, r11, r5
- + /* r7 = 2-R[j]*d */
- + sub r7, r10, r7<<2
- + /* r11 = R[j]*(2-R[j]*d) */
- + mulu.d r6, r11, r7
- + lsl r11, r7, 2
- +
- + /* Third approximation */
- + /* r7 = R[j]*d */
- + mulu.d r6, r11, r5
- + /* r7 = 2-R[j]*d */
- + sub r7, r10, r7<<2
- + /* r11 = R[j]*(2-R[j]*d) */
- + mulu.d r6, r11, r7
- + lsl r11, r7, 2
- +
- + /* Fourth approximation */
- + /* r7 = R[j]*d */
- + mulu.d r6, r11, r5
- + /* r7 = 2-R[j]*d */
- + sub r7, r10, r7<<2
- + /* r11 = R[j]*(2-R[j]*d) */
- + mulu.d r6, r11, r7
- + lsl r11, r7, 2
- +
- +
- + /* Multiply with dividend to get quotient, r7 = sf(op1)/sf(op2) */
- + mulu.d r6, r11, r12
- +
- + /* Shift by 3 to get result in 1.31 format, as required by the exponent. */
- + /* Note that 1.31 format is already used by the exponent in r9, since */
- + /* a bias of 127 was added to the result exponent, even though the implicit */
- + /* bit was inserted. This gives the exponent an additional bias of 1, which */
- + /* supports 1.31 format. */
- + //lsl r10, r7, 3
- +
- + /* Adjust exponent and mantissa in case the result is of format
- + 0000.1xxx to 0001.xxx*/
- +#if defined(L_avr32_f32_div)
- + lsr r12, 4 /* Scale dividend to 6.26 format to match the
- + result of the multiplication of the divisor and
- + quotient to get the remainder. */
- +#endif
- + bld r7, 31-3
- + breq 0f
- + lsl r7, 1
- + sub r9, 1
- +#if defined(L_avr32_f32_div)
- + lsl r12, 1 /* Scale dividend to 5.27 format to match the
- + result of the multiplication of the divisor and
- + quotient to get the remainder. */
- +#endif
- +0:
- + cp r9, 0
- + brle __avr32_f32_div_res_subnormal /* Result was subnormal. */
- +
- +
- +#if defined(L_avr32_f32_div)
- + /* In order to round correctly we calculate the remainder:
- + Remainder = dividend[r12] - divisor[r5]*quotient[r7]
- + for the case when the quotient is halfway between the round-up
- + value and the round down value. If the remainder then is negative
- + it means that the quotient was to big and that it should not be
- + rounded up, if the remainder is positive the quotient was to small
- + and we need to round up. If the remainder is zero it means that the
- + quotient is exact but since we need to remove the guard bit we should
- + round to even. */
- + andl r7, 0xffe0
- + orl r7, 0x0010
- +
- + /* Now do the multiplication. The quotient has the format 4.28
- + while the divisor has the format 2.30 which gives a result
- + of 6.26 */
- + mulu.d r10, r5, r7
- +
- + /* Check if remainder is positive, negative or equal. */
- + bfextu r5, r7, 5, 1 /* Get parity bit into bit 0 of r5 */
- + cp r10, 0
- +__avr32_f32_div_round_subnormal:
- + cpc r11, r12
- + srlo r11 /* Remainder positive: we need to round up.*/
- + moveq r11, r5 /* Remainder zero: round up if mantissa odd. */
- +#else
- + bfextu r11, r7, 4, 1 /* Get guard bit */
- +#endif
- +
- + /* Pack final result*/
- + lsr r12, r7, 5
- + bfins r12, r9, 23, 8
- + /* For UC3, load with postincrement is faster than ldm */
- + ld.d r6, sp++
- + ld.w r5, sp++
- + bld r8, 31
- + bst r12, 31
- + /* Rounding add. */
- + add r12, r11
- + ret r12
- +
- +__divsf_return_op1:
- + lsl r8, 1
- + ror r12
- + ret r12
- +
- +
- +2:
- + /* Op1 is NaN or inf */
- + retne -1 /* Return NaN if op1 is NaN */
- + /* Op1 is inf check op2 */
- + mov_imm r9, 0xff000000
- + cp r11, r9
- + brlo __divsf_return_op1 /* inf/number gives inf */
- + ret -1 /* The rest gives NaN*/
- +3:
- + /* Op2 is NaN or inf */
- + reteq 0 /* Return zero if number/inf*/
- + ret -1 /* Return NaN*/
- +4:
- + /* Op1 is zero ? */
- + tst r12,r12
- + reteq -1 /* 0.0/0.0 is NaN */
- + /* Op1 is Nan? */
- + lsr r9, r12, 24
- + breq 11f /*If number is subnormal*/
- + cp r9, 0xff
- + brhs 2b /* Check op1 for NaN or Inf */
- + /* Nonzero/0.0 is Inf. Sign bit will be shifted in before returning*/
- + mov_imm r12, 0xff000000
- + rjmp __divsf_return_op1
- +
- +11: /* Op1 was denormal. Fix it. */
- + lsl r12,7
- + clz r9,r12
- + lsl r12,r12,r9
- + rsub r9,r9,1
- + rjmp 12b
- +
- +13: /* Op2 was denormal. Fix it. */
- + lsl r11,7
- + clz r10,r11
- + lsl r11,r11,r10
- + rsub r10,r10,1
- + rjmp 14b
- +
- +
- +__avr32_f32_div_res_subnormal: /* Divide result was subnormal */
- +#if defined(L_avr32_f32_div)
- + /* Check how much we must scale down the mantissa. */
- + neg r9
- + sub r9, -1 /* We do no longer have an implicit bit. */
- + satu r9 >> 0, 5 /* Saturate shift amount to max 32. */
- + /* Scale down quotient */
- + rsub r10, r9, 32
- + lsr r7, r7, r9
- + /* Scale down the dividend to match the scaling of the quotient. */
- + lsl r6, r12, r10 /* Make the divident 64-bit and put the lsw in r6 */
- + lsr r12, r12, r9
- +
- + /* Start performing the same rounding as done for normal numbers
- + but this time we have scaled the quotient and dividend and hence
- + need a little different comparison. */
- + andl r7, 0xffe0
- + orl r7, 0x0010
- +
- + /* Now do the multiplication. The quotient has the format 4.28
- + while the divisor has the format 2.30 which gives a result
- + of 6.26 */
- + mulu.d r10, r5, r7
- +
- + /* Set exponent to 0 */
- + mov r9, 0
- +
- + /* Check if remainder is positive, negative or equal. */
- + bfextu r5, r7, 5, 1 /* Get parity bit into bit 0 of r5 */
- + cp r10, r6
- + rjmp __avr32_f32_div_round_subnormal
- +
- +#else
- + ld.d r6, sp++
- + ld.w r5, sp++
- + /*Flush to zero*/
- + ret 0
- +#endif
- +#endif
- +
- +#ifdef L_avr32_f32_mul
- + .global __avr32_f32_mul
- + .type __avr32_f32_mul,@function
- +
- +
- +__avr32_f32_mul:
- + mov r8, r12
- + eor r12, r11 /* MSB(r8) = Sign(op1) ^ Sign(op2) */
- + andh r12, 0x8000, COH
- +
- + /* arrange operands so that that op1 >= op2 */
- + cbr r8, 31
- + breq __avr32_f32_mul_op1_zero
- + cbr r11, 31
- +
- + /* Put the number with the largest exponent in r10
- + and the number with the smallest exponent in r9 */
- + max r10, r8, r11
- + min r9, r8, r11
- +
- + /* Unpack exponent and mantissa of op1 */
- + lsl r8, r10, 8
- + sbr r8, 31 /* Set implicit bit. */
- + lsr r10, 23
- +
- + /* op1 is NaN or Inf. */
- + cp.w r10, 0xff
- + breq __avr32_f32_mul_op1_nan_or_inf
- +
- + /* Unpack exponent and mantissa of op2 */
- + lsl r11, r9, 8
- + sbr r11, 31 /* Set implicit bit. */
- + lsr r9, 23
- +
- + /* op2 is either zero or subnormal. */
- + breq __avr32_f32_mul_op2_subnormal
- +0:
- + /* Calculate new exponent */
- + add r9,r10
- +
- + /* Do the multiplication */
- + mulu.d r10,r8,r11
- +
- + /* We might need to scale up by two if the MSB of the result is
- + zero. */
- + lsl r8, r11, 1
- + movcc r11, r8
- + subcc r9, 1
- +
- + /* Put the shifted out bits of the mantissa into r10 */
- + lsr r10, 8
- + bfins r10, r11, 24, 8
- +
- + sub r9,(127-1) /* remove extra exponent bias */
- + brle __avr32_f32_mul_res_subnormal
- +
- + /* Check for Inf. */
- + cp.w r9, 0xff
- + brge 1f
- +
- + /* Pack result. */
- + or r12, r12, r11 >> 8
- + bfins r12, r9, 23, 8
- +
- + /* Round */
- +__avr32_f32_mul_round:
- + mov_imm r8, 0x80000000
- + bld r12, 0
- + subne r8, -1
- +
- + cp.w r10, r8
- + subhs r12, -1
- +
- + ret r12
- +
- +1:
- + /* Return Inf */
- + orh r12, 0x7f80
- + ret r12
- +
- +__avr32_f32_mul_op2_subnormal:
- + cbr r11, 31
- + clz r9, r11
- + retcs 0 /* op2 is zero. Return 0 */
- + sub r9, 8
- + lsl r11, r11, r9
- + rsub r9, r9, 1
- +
- + /* Check if op2 is subnormal. */
- + tst r10, r10
- + brne 0b
- +
- + /* op2 is subnormal */
- + cbr r8, 31
- + clz r10, r11
- + retcs 0 /* op1 is zero. Return 0 */
- + lsl r8, r8, r10
- + rsub r10, r10, 1
- +
- + rjmp 0b
- +
- +
- +__avr32_f32_mul_op1_nan_or_inf:
- + /* Check if op1 is NaN, if so return NaN */
- + lsl r11, r8, 1
- + retne -1
- +
- + /* op1 is Inf. */
- + tst r9, r9
- + reteq -1 /* Inf * 0 -> NaN */
- +
- + bfins r12, r10, 23, 8 /* Generate Inf in r12 */
- +
- + /* Check if op2 is Inf. or NaN */
- + lsr r11, r9, 23
- + cp.w r11, 0xff
- + retne r12 /* op2 not Inf or NaN, return Info */
- +
- + lsl r9, 9
- + reteq r12 /* op2 Inf return Inf */
- + ret -1 /* op2 is NaN, return NaN */
- +
- +__avr32_f32_mul_res_subnormal:
- + /* Check if the number is so small that
- + it will be represented with zero. */
- + rsub r9, r9, 9
- + rsub r8, r9, 32
- + retcs 0
- +
- + /* Shift the mantissa into the correct position.*/
- + lsr r9, r11, r9
- + /* Add sign bit. */
- + or r12, r9
- + /* Put the shifted out bits in the most significant part
- + of r8. */
- + lsl r11, r11, r8
- +
- + /* Add all the remainder bits used for rounding into r11 */
- + andh r10, 0x00FF
- + or r10, r11
- + rjmp __avr32_f32_mul_round
- +
- +__avr32_f32_mul_op1_zero:
- + bfextu r10, r11, 23, 8
- + cp.w r10, 0xff
- + retne r12
- + reteq -1
- +
- +#endif
- +
- +
- +#ifdef L_avr32_s32_to_f32
- + .global __avr32_s32_to_f32
- + .type __avr32_s32_to_f32,@function
- +__avr32_s32_to_f32:
- + cp r12, 0
- + reteq r12 /* If zero then return zero float */
- + mov r11, r12 /* Keep the sign */
- + abs r12 /* Compute the absolute value */
- + mov r10, 31 + 127 /* Set the correct exponent */
- +
- + /* Normalize */
- + normalize_sf r10 /*exp*/, r12 /*mant*/, r9 /*scratch*/
- +
- + /* Check for subnormal result */
- + cp.w r10, 0
- + brle __avr32_s32_to_f32_subnormal
- +
- + round_sf r10 /*exp*/, r12 /*mant*/, r9 /*scratch*/
- + pack_sf r12 /*sf*/, r10 /*exp*/, r12 /*mant*/
- + lsl r11, 1
- + ror r12
- + ret r12
- +
- +__avr32_s32_to_f32_subnormal:
- + /* Adjust a subnormal result */
- + adjust_subnormal_sf r12/*sf*/, r10 /*exp*/, r12 /*mant*/, r11/*sign*/, r9 /*scratch*/
- + ret r12
- +
- +#endif
- +
- +#ifdef L_avr32_u32_to_f32
- + .global __avr32_u32_to_f32
- + .type __avr32_u32_to_f32,@function
- +__avr32_u32_to_f32:
- + cp r12, 0
- + reteq r12 /* If zero then return zero float */
- + mov r10, 31 + 127 /* Set the correct exponent */
- +
- + /* Normalize */
- + normalize_sf r10 /*exp*/, r12 /*mant*/, r9 /*scratch*/
- +
- + /* Check for subnormal result */
- + cp.w r10, 0
- + brle __avr32_u32_to_f32_subnormal
- +
- + round_sf r10 /*exp*/, r12 /*mant*/, r9 /*scratch*/
- + pack_sf r12 /*sf*/, r10 /*exp*/, r12 /*mant*/
- + lsr r12,1 /* Sign bit is 0 for unsigned int */
- + ret r12
- +
- +__avr32_u32_to_f32_subnormal:
- + /* Adjust a subnormal result */
- + mov r8, 0
- + adjust_subnormal_sf r12/*sf*/,r10 /*exp*/, r12 /*mant*/,r8/*sign*/, r9 /*scratch*/
- + ret r12
- +
- +
- +#endif
- +
- +
- +#ifdef L_avr32_f32_to_s32
- + .global __avr32_f32_to_s32
- + .type __avr32_f32_to_s32,@function
- +__avr32_f32_to_s32:
- + bfextu r11, r12, 23, 8
- + sub r11,127 /* Fix bias */
- + retlo 0 /* Negative exponent yields zero integer */
- +
- + /* Shift mantissa into correct position */
- + rsub r11,r11,31 /* Shift amount */
- + lsl r10,r12,8 /* Get mantissa */
- + sbr r10,31 /* Add implicit bit */
- + lsr r10,r10,r11 /* Perform shift */
- + lsl r12,1 /* Check sign */
- + retcc r10 /* if positive, we are done */
- + neg r10 /* if negative float, negate result */
- + ret r10
- +
- +#endif
- +
- +#ifdef L_avr32_f32_to_u32
- + .global __avr32_f32_to_u32
- + .type __avr32_f32_to_u32,@function
- +__avr32_f32_to_u32:
- + cp r12,0
- + retmi 0 /* Negative numbers gives 0 */
- + bfextu r11, r12, 23, 8 /* Extract exponent */
- + sub r11,127 /* Fix bias */
- + retlo 0 /* Negative exponent yields zero integer */
- +
- + /* Shift mantissa into correct position */
- + rsub r11,r11,31 /* Shift amount */
- + lsl r12,8 /* Get mantissa */
- + sbr r12,31 /* Add implicit bit */
- + lsr r12,r12,r11 /* Perform shift */
- + ret r12
- +
- +#endif
- +
- +#ifdef L_avr32_f32_to_f64
- + .global __avr32_f32_to_f64
- + .type __avr32_f32_to_f64,@function
- +
- +__avr32_f32_to_f64:
- + lsl r11,r12,1 /* Remove sign bit, keep original value in r12*/
- + moveq r10, 0
- + reteq r11 /* Return zero if input is zero */
- +
- + bfextu r9,r11,24,8 /* Get exponent */
- + cp.w r9,0xff /* check for NaN or inf */
- + breq 0f
- +
- + lsl r11,7 /* Convert sf mantissa to df format */
- + mov r10,0
- +
- + /* Check if implicit bit should be set */
- + cp.w r9, 0
- + subeq r9,-1 /* Adjust exponent if it was 0 */
- + srne r8
- + or r11, r11, r8 << 31 /* Set implicit bit if needed */
- + sub r9,(127-0x3ff) /* Convert exponent to df format exponent */
- +
- + /*We know that low register of mantissa is 0, and will be unaffected by normalization.*/
- + /*We can therefore use the faster normalize_sf function instead of normalize_df.*/
- + normalize_sf r9 /*exp*/, r11 /*mantissa*/, r8 /*scratch*/
- + pack_df r9 /*exp*/, r10, r11 /*mantissa*/, r10, r11 /*df*/
- +
- +__extendsfdf_return_op1:
- + /* Rotate in sign bit */
- + lsl r12, 1
- + ror r11
- + ret r11
- +
- +0:
- + /* Inf or NaN*/
- + mov_imm r10, 0xffe00000
- + lsl r11,8 /* check mantissa */
- + movne r11, -1 /* Return NaN */
- + moveq r11, r10 /* Return inf */
- + mov r10, 0
- + rjmp __extendsfdf_return_op1
- +#endif
- +
- +
- +#ifdef L_avr32_f64_to_f32
- + .global __avr32_f64_to_f32
- + .type __avr32_f64_to_f32,@function
- +
- +__avr32_f64_to_f32:
- + /* Unpack */
- + lsl r9,r11,1 /* Unpack exponent */
- + lsr r9,21
- +
- + reteq 0 /* If exponent is 0 the number is so small
- + that the conversion to single float gives
- + zero */
- +
- + lsl r8,r11,10 /* Adjust mantissa */
- + or r12,r8,r10>>22
- +
- + lsl r10,10 /* Check if there are any remaining bits
- + in the low part of the mantissa.*/
- + neg r10
- + rol r12 /* If there were remaining bits then set lsb
- + of mantissa to 1 */
- +
- + cp r9,0x7ff
- + breq 2f /* Check for NaN or inf */
- +
- + sub r9,(0x3ff-127) /* Adjust bias of exponent */
- + sbr r12,31 /* set the implicit bit.*/
- +
- + cp.w r9, 0 /* Check for subnormal number */
- + brle 3f
- +
- + round_sf r9 /*exp*/, r12 /*mant*/, r10 /*scratch*/
- + pack_sf r12 /*sf*/, r9 /*exp*/, r12 /*mant*/
- +__truncdfsf_return_op1:
- + /* Rotate in sign bit */
- + lsl r11, 1
- + ror r12
- + ret r12
- +
- +2:
- + /* NaN or inf */
- + cbr r12,31 /* clear implicit bit */
- + retne -1 /* Return NaN if mantissa not zero */
- + mov_imm r12, 0x7f800000
- + ret r12 /* Return inf */
- +
- +3: /* Result is subnormal. Adjust it.*/
- + adjust_subnormal_sf r12/*sf*/,r9 /*exp*/, r12 /*mant*/, r11/*sign*/, r10 /*scratch*/
- + ret r12
- +
- +
- +#endif
- +
- +#if defined(L_mulsi3) && defined(__AVR32_NO_MUL__)
- + .global __mulsi3
- + .type __mulsi3,@function
- +
- +__mulsi3:
- + mov r9, 0
- +0:
- + lsr r11, 1
- + addcs r9, r9, r12
- + breq 1f
- + lsl r12, 1
- + rjmp 0b
- +1:
- + ret r9
- +#endif
- diff -Nur gcc-4.4.6.orig/gcc/config/avr32/lib2funcs.S gcc-4.4.6/gcc/config/avr32/lib2funcs.S
- --- gcc-4.4.6.orig/gcc/config/avr32/lib2funcs.S 1970-01-01 01:00:00.000000000 +0100
- +++ gcc-4.4.6/gcc/config/avr32/lib2funcs.S 2011-10-22 19:23:08.524581303 +0200
- @@ -0,0 +1,21 @@
- + .align 4
- + .global __nonlocal_goto
- + .type __nonlocal_goto,@function
- +
- +/* __nonlocal_goto: This function handles nonlocal_goto's in gcc.
- +
- + parameter 0 (r12) = New Frame Pointer
- + parameter 1 (r11) = Address to goto
- + parameter 2 (r10) = New Stack Pointer
- +
- + This function invalidates the return stack, since it returns from a
- + function without using a return instruction.
- +*/
- +__nonlocal_goto:
- + mov r7, r12
- + mov sp, r10
- + frs # Flush return stack
- + mov pc, r11
- +
- +
- +
- diff -Nur gcc-4.4.6.orig/gcc/config/avr32/linux-elf.h gcc-4.4.6/gcc/config/avr32/linux-elf.h
- --- gcc-4.4.6.orig/gcc/config/avr32/linux-elf.h 1970-01-01 01:00:00.000000000 +0100
- +++ gcc-4.4.6/gcc/config/avr32/linux-elf.h 2011-10-22 19:23:08.524581303 +0200
- @@ -0,0 +1,151 @@
- +/*
- + Linux/Elf specific definitions.
- + Copyright 2003-2006 Atmel Corporation.
- +
- + Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
- + and H�vard Skinnemoen, Atmel Norway, <hskinnemoen@atmel.com>
- +
- + This file is part of GCC.
- +
- + This program is free software; you can redistribute it and/or modify
- + it under the terms of the GNU General Public License as published by
- + the Free Software Foundation; either version 2 of the License, or
- + (at your option) any later version.
- +
- + This program is distributed in the hope that it will be useful,
- + but WITHOUT ANY WARRANTY; without even the implied warranty of
- + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- + GNU General Public License for more details.
- +
- + You should have received a copy of the GNU General Public License
- + along with this program; if not, write to the Free Software
- + Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
- +
- +
- +
- +/* elfos.h should have already been included. Now just override
- + any conflicting definitions and add any extras. */
- +
- +/* Run-time Target Specification. */
- +#undef TARGET_VERSION
- +#define TARGET_VERSION fputs (" (AVR32 GNU/Linux with ELF)", stderr);
- +
- +/* Do not assume anything about header files. */
- +#define NO_IMPLICIT_EXTERN_C
- +
- +/* The GNU C++ standard library requires that these macros be defined. */
- +#undef CPLUSPLUS_CPP_SPEC
- +#define CPLUSPLUS_CPP_SPEC "-D_GNU_SOURCE %(cpp)"
- +
- +/* Now we define the strings used to build the spec file. */
- +#undef LIB_SPEC
- +#define LIB_SPEC \
- + "%{pthread:-lpthread} \
- + %{shared:-lc} \
- + %{!shared:%{profile:-lc_p}%{!profile:-lc}}"
- +
- +/* Provide a STARTFILE_SPEC appropriate for GNU/Linux. Here we add
- + the GNU/Linux magical crtbegin.o file (see crtstuff.c) which
- + provides part of the support for getting C++ file-scope static
- + object constructed before entering `main'. */
- +
- +#undef STARTFILE_SPEC
- +#define STARTFILE_SPEC \
- + "%{!shared: \
- + %{pg:gcrt1.o%s} %{!pg:%{p:gcrt1.o%s} \
- + %{!p:%{profile:gcrt1.o%s} \
- + %{!profile:crt1.o%s}}}} \
- + crti.o%s %{!shared:crtbegin.o%s} %{shared:crtbeginS.o%s}"
- +
- +/* Provide a ENDFILE_SPEC appropriate for GNU/Linux. Here we tack on
- + the GNU/Linux magical crtend.o file (see crtstuff.c) which
- + provides part of the support for getting C++ file-scope static
- + object constructed before entering `main', followed by a normal
- + GNU/Linux "finalizer" file, `crtn.o'. */
- +
- +#undef ENDFILE_SPEC
- +#define ENDFILE_SPEC \
- + "%{!shared:crtend.o%s} %{shared:crtendS.o%s} crtn.o%s"
- +
- +#undef ASM_SPEC
- +#define ASM_SPEC "%{!mno-pic:%{!fno-pic:--pic}} %{mrelax|O*:%{mno-relax|O0|O1: ;:--linkrelax}} %{mcpu=*:-mcpu=%*}"
- +
- +#undef LINK_SPEC
- +#define LINK_SPEC "%{version:-v} \
- + %{static:-Bstatic} \
- + %{shared:-shared} \
- + %{symbolic:-Bsymbolic} \
- + %{rdynamic:-export-dynamic} \
- + %{!dynamic-linker:-dynamic-linker /lib/ld-uClibc.so.0} \
- + %{mrelax|O*:%{mno-relax|O0|O1: ;:--relax}}"
- +
- +#define TARGET_OS_CPP_BUILTINS() LINUX_TARGET_OS_CPP_BUILTINS()
- +
- +/* This is how we tell the assembler that two symbols have the same value. */
- +#define ASM_OUTPUT_DEF(FILE, NAME1, NAME2) \
- + do \
- + { \
- + assemble_name (FILE, NAME1); \
- + fputs (" = ", FILE); \
- + assemble_name (FILE, NAME2); \
- + fputc ('\n', FILE); \
- + } \
- + while (0)
- +
- +
- +
- +#undef CC1_SPEC
- +#define CC1_SPEC "%{profile:-p}"
- +
- +/* Target CPU builtins. */
- +#define TARGET_CPU_CPP_BUILTINS() \
- + do \
- + { \
- + builtin_define ("__avr32__"); \
- + builtin_define ("__AVR32__"); \
- + builtin_define ("__AVR32_LINUX__"); \
- + builtin_define (avr32_part->macro); \
- + builtin_define (avr32_arch->macro); \
- + if (avr32_arch->uarch_type == UARCH_TYPE_AVR32A) \
- + builtin_define ("__AVR32_AVR32A__"); \
- + else \
- + builtin_define ("__AVR32_AVR32B__"); \
- + if (TARGET_UNALIGNED_WORD) \
- + builtin_define ("__AVR32_HAS_UNALIGNED_WORD__"); \
- + if (TARGET_SIMD) \
- + builtin_define ("__AVR32_HAS_SIMD__"); \
- + if (TARGET_DSP) \
- + builtin_define ("__AVR32_HAS_DSP__"); \
- + if (TARGET_RMW) \
- + builtin_define ("__AVR32_HAS_RMW__"); \
- + if (TARGET_BRANCH_PRED) \
- + builtin_define ("__AVR32_HAS_BRANCH_PRED__"); \
- + if (TARGET_FAST_FLOAT) \
- + builtin_define ("__AVR32_FAST_FLOAT__"); \
- + } \
- + while (0)
- +
- +
- +
- +/* Call the function profiler with a given profile label. */
- +#undef FUNCTION_PROFILER
- +#define FUNCTION_PROFILER(STREAM, LABELNO) \
- + do \
- + { \
- + fprintf (STREAM, "\tmov\tlr, lo(mcount)\n\torh\tlr, hi(mcount)\n"); \
- + fprintf (STREAM, "\ticall lr\n"); \
- + } \
- + while (0)
- +
- +#define NO_PROFILE_COUNTERS 1
- +
- +/* For dynamic libraries to work */
- +/* #define PLT_REG_CALL_CLOBBERED 1 */
- +#define AVR32_ALWAYS_PIC 1
- +
- +/* uclibc does not implement sinf, cosf etc. */
- +#undef TARGET_C99_FUNCTIONS
- +#define TARGET_C99_FUNCTIONS 0
- +
- +#define LINK_GCC_C_SEQUENCE_SPEC \
- + "%{static:--start-group} %G %L %{static:--end-group}%{!static:%G}"
- diff -Nur gcc-4.4.6.orig/gcc/config/avr32/predicates.md gcc-4.4.6/gcc/config/avr32/predicates.md
- --- gcc-4.4.6.orig/gcc/config/avr32/predicates.md 1970-01-01 01:00:00.000000000 +0100
- +++ gcc-4.4.6/gcc/config/avr32/predicates.md 2011-10-22 19:23:08.524581303 +0200
- @@ -0,0 +1,422 @@
- +;; AVR32 predicates file.
- +;; Copyright 2003-2006 Atmel Corporation.
- +;;
- +;; Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
- +;;
- +;; This file is part of GCC.
- +;;
- +;; This program is free software; you can redistribute it and/or modify
- +;; it under the terms of the GNU General Public License as published by
- +;; the Free Software Foundation; either version 2 of the License, or
- +;; (at your option) any later version.
- +;;
- +;; This program is distributed in the hope that it will be useful,
- +;; but WITHOUT ANY WARRANTY; without even the implied warranty of
- +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- +;; GNU General Public License for more details.
- +;;
- +;; You should have received a copy of the GNU General Public License
- +;; along with this program; if not, write to the Free Software
- +;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- +
- +
- +;; True if the operand is a memory reference which contains an
- +;; Address consisting of a single pointer register
- +(define_predicate "avr32_indirect_register_operand"
- + (and (match_code "mem")
- + (match_test "register_operand(XEXP(op, 0), SImode)")))
- +
- +
- +
- +;; Address expression with a base pointer offset with
- +;; a register displacement
- +(define_predicate "avr32_indexed_memory_operand"
- + (and (match_code "mem")
- + (match_test "GET_CODE(XEXP(op, 0)) == PLUS"))
- + {
- +
- + rtx op0 = XEXP(XEXP(op, 0), 0);
- + rtx op1 = XEXP(XEXP(op, 0), 1);
- +
- + return ((avr32_address_register_rtx_p (op0, 0)
- + && avr32_legitimate_index_p (GET_MODE(op), op1, 0))
- + || (avr32_address_register_rtx_p (op1, 0)
- + && avr32_legitimate_index_p (GET_MODE(op), op0, 0)));
- +
- + })
- +
- +;; Operand suitable for the ld.sb instruction
- +(define_predicate "load_sb_memory_operand"
- + (ior (match_operand 0 "avr32_indirect_register_operand")
- + (match_operand 0 "avr32_indexed_memory_operand")))
- +
- +
- +;; Operand suitable as operand to insns sign extending QI values
- +(define_predicate "extendqi_operand"
- + (ior (match_operand 0 "load_sb_memory_operand")
- + (match_operand 0 "register_operand")))
- +
- +(define_predicate "post_inc_memory_operand"
- + (and (match_code "mem")
- + (match_test "(GET_CODE(XEXP(op, 0)) == POST_INC)
- + && REG_P(XEXP(XEXP(op, 0), 0))")))
- +
- +(define_predicate "pre_dec_memory_operand"
- + (and (match_code "mem")
- + (match_test "(GET_CODE(XEXP(op, 0)) == PRE_DEC)
- + && REG_P(XEXP(XEXP(op, 0), 0))")))
- +
- +;; Operand suitable for add instructions
- +(define_predicate "avr32_add_operand"
- + (ior (match_operand 0 "register_operand")
- + (and (match_operand 0 "immediate_operand")
- + (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'I', \"Is21\")"))))
- +
- +;; Operand is a power of two immediate
- +(define_predicate "power_of_two_operand"
- + (match_code "const_int")
- +{
- + HOST_WIDE_INT value = INTVAL (op);
- +
- + return value != 0 && (value & (value - 1)) == 0;
- +})
- +
- +;; Operand is a multiple of 8 immediate
- +(define_predicate "multiple_of_8_operand"
- + (match_code "const_int")
- +{
- + HOST_WIDE_INT value = INTVAL (op);
- +
- + return (value & 0x7) == 0 ;
- +})
- +
- +;; Operand is a multiple of 16 immediate
- +(define_predicate "multiple_of_16_operand"
- + (match_code "const_int")
- +{
- + HOST_WIDE_INT value = INTVAL (op);
- +
- + return (value & 0xf) == 0 ;
- +})
- +
- +;; Operand is a mask used for masking away upper bits of a reg
- +(define_predicate "avr32_mask_upper_bits_operand"
- + (match_code "const_int")
- +{
- + HOST_WIDE_INT value = INTVAL (op) + 1;
- +
- + return value != 1 && value != 0 && (value & (value - 1)) == 0;
- +})
- +
- +
- +;; Operand suitable for mul instructions
- +(define_predicate "avr32_mul_operand"
- + (ior (match_operand 0 "register_operand")
- + (and (match_operand 0 "immediate_operand")
- + (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'K', \"Ks08\")"))))
- +
- +;; True for logical binary operators.
- +(define_predicate "logical_binary_operator"
- + (match_code "ior,xor,and"))
- +
- +;; True for logical shift operators
- +(define_predicate "logical_shift_operator"
- + (match_code "ashift,lshiftrt"))
- +
- +;; True for shift operand for logical and, or and eor insns
- +(define_predicate "avr32_logical_shift_operand"
- + (and (match_code "ashift,lshiftrt")
- + (ior (and (match_test "GET_CODE(XEXP(op, 1)) == CONST_INT")
- + (match_test "register_operand(XEXP(op, 0), GET_MODE(XEXP(op, 0)))"))
- + (and (match_test "GET_CODE(XEXP(op, 0)) == CONST_INT")
- + (match_test "register_operand(XEXP(op, 1), GET_MODE(XEXP(op, 1)))"))))
- + )
- +
- +
- +;; Predicate for second operand to and, ior and xor insn patterns
- +(define_predicate "avr32_logical_insn_operand"
- + (ior (match_operand 0 "register_operand")
- + (match_operand 0 "avr32_logical_shift_operand"))
- +)
- +
- +
- +;; True for avr32 comparison operators
- +(define_predicate "avr32_comparison_operator"
- + (ior (match_code "eq, ne, gt, ge, lt, le, gtu, geu, ltu, leu")
- + (and (match_code "unspec")
- + (match_test "(XINT(op, 1) == UNSPEC_COND_MI)
- + || (XINT(op, 1) == UNSPEC_COND_PL)"))))
- +
- +(define_predicate "avr32_cond3_comparison_operator"
- + (ior (match_code "eq, ne, ge, lt, geu, ltu")
- + (and (match_code "unspec")
- + (match_test "(XINT(op, 1) == UNSPEC_COND_MI)
- + || (XINT(op, 1) == UNSPEC_COND_PL)"))))
- +
- +;; True for avr32 comparison operand
- +(define_predicate "avr32_comparison_operand"
- + (ior (and (match_code "eq, ne, gt, ge, lt, le, gtu, geu, ltu, leu")
- + (match_test "(CC0_P (XEXP(op,0)) && rtx_equal_p (XEXP(op,1), const0_rtx))"))
- + (and (match_code "unspec")
- + (match_test "(XINT(op, 1) == UNSPEC_COND_MI)
- + || (XINT(op, 1) == UNSPEC_COND_PL)"))))
- +
- +;; True if this is a const_int with one bit set
- +(define_predicate "one_bit_set_operand"
- + (match_code "const_int")
- + {
- + int i;
- + int value;
- + int ones = 0;
- +
- + value = INTVAL(op);
- + for ( i = 0 ; i < 32; i++ ){
- + if ( value & ( 1 << i ) ){
- + ones++;
- + }
- + }
- +
- + return ( ones == 1 );
- + })
- +
- +
- +;; True if this is a const_int with one bit cleared
- +(define_predicate "one_bit_cleared_operand"
- + (match_code "const_int")
- + {
- + int i;
- + int value;
- + int zeroes = 0;
- +
- + value = INTVAL(op);
- + for ( i = 0 ; i < 32; i++ ){
- + if ( !(value & ( 1 << i )) ){
- + zeroes++;
- + }
- + }
- +
- + return ( zeroes == 1 );
- + })
- +
- +
- +;; Immediate all the low 16-bits cleared
- +(define_predicate "avr32_hi16_immediate_operand"
- + (match_code "const_int")
- + {
- + /* If the low 16-bits are zero then this
- + is a hi16 immediate. */
- + return ((INTVAL(op) & 0xffff) == 0);
- + }
- +)
- +
- +;; True if this is a register or immediate operand
- +(define_predicate "register_immediate_operand"
- + (ior (match_operand 0 "register_operand")
- + (match_operand 0 "immediate_operand")))
- +
- +;; True if this is a register or const_int operand
- +(define_predicate "register_const_int_operand"
- + (ior (match_operand 0 "register_operand")
- + (and (match_operand 0 "const_int_operand")
- + (match_operand 0 "immediate_operand"))))
- +
- +;; True if this is a register or const_double operand
- +(define_predicate "register_const_double_operand"
- + (ior (match_operand 0 "register_operand")
- + (match_operand 0 "const_double_operand")))
- +
- +;; True if this is an operand containing a label_ref.
- +(define_predicate "avr32_label_ref_operand"
- + (and (match_code "mem")
- + (match_test "avr32_find_symbol(op)
- + && (GET_CODE(avr32_find_symbol(op)) == LABEL_REF)")))
- +
- +;; True if this is a valid symbol pointing to the constant pool.
- +(define_predicate "avr32_const_pool_operand"
- + (and (match_code "symbol_ref")
- + (match_test "CONSTANT_POOL_ADDRESS_P(op)"))
- + {
- + return (flag_pic ? (!(symbol_mentioned_p (get_pool_constant (op))
- + || label_mentioned_p (get_pool_constant (op)))
- + || avr32_got_mentioned_p(get_pool_constant (op)))
- + : true);
- + }
- +)
- +
- +;; True if this is a memory reference to the constant or mini pool.
- +(define_predicate "avr32_const_pool_ref_operand"
- + (ior (match_operand 0 "avr32_label_ref_operand")
- + (and (match_code "mem")
- + (match_test "avr32_const_pool_operand(XEXP(op,0), GET_MODE(XEXP(op,0)))"))))
- +
- +
- +;; Legal source operand for movti insns
- +(define_predicate "avr32_movti_src_operand"
- + (ior (match_operand 0 "avr32_const_pool_ref_operand")
- + (ior (ior (match_operand 0 "register_immediate_operand")
- + (match_operand 0 "avr32_indirect_register_operand"))
- + (match_operand 0 "post_inc_memory_operand"))))
- +
- +;; Legal destination operand for movti insns
- +(define_predicate "avr32_movti_dst_operand"
- + (ior (ior (match_operand 0 "register_operand")
- + (match_operand 0 "avr32_indirect_register_operand"))
- + (match_operand 0 "pre_dec_memory_operand")))
- +
- +
- +;; True if this is a k12 offseted memory operand.
- +(define_predicate "avr32_k12_memory_operand"
- + (and (match_code "mem")
- + (ior (match_test "REG_P(XEXP(op, 0))")
- + (match_test "GET_CODE(XEXP(op, 0)) == PLUS
- + && REG_P(XEXP(XEXP(op, 0), 0))
- + && (GET_CODE(XEXP(XEXP(op, 0), 1)) == CONST_INT)
- + && (CONST_OK_FOR_CONSTRAINT_P(INTVAL(XEXP(XEXP(op, 0), 0)),
- + 'K', (mode == SImode) ? \"Ks14\" : ((mode == HImode) ? \"Ks13\" : \"Ks12\")))"))))
- +
- +;; True if this is a memory operand with an immediate displacement.
- +(define_predicate "avr32_imm_disp_memory_operand"
- + (and (match_code "mem")
- + (match_test "GET_CODE(XEXP(op, 0)) == PLUS
- + && REG_P(XEXP(XEXP(op, 0), 0))
- + && (GET_CODE(XEXP(XEXP(op, 0), 1)) == CONST_INT)")))
- +
- +;; True if this is a bswap operand.
- +(define_predicate "avr32_bswap_operand"
- + (ior (match_operand 0 "avr32_k12_memory_operand")
- + (match_operand 0 "register_operand")))
- +
- +;; True if this is a valid coprocessor insn memory operand.
- +(define_predicate "avr32_cop_memory_operand"
- + (and (match_operand 0 "memory_operand")
- + (not (match_test "GET_CODE(XEXP(op, 0)) == PLUS
- + && REG_P(XEXP(XEXP(op, 0), 0))
- + && (GET_CODE(XEXP(XEXP(op, 0), 1)) == CONST_INT)
- + && !(CONST_OK_FOR_CONSTRAINT_P(INTVAL(XEXP(XEXP(op, 0), 0)), 'K', \"Ku10\"))"))))
- +
- +;; True if this is a valid source/destination operand.
- +;; for moving values to/from a coprocessor
- +(define_predicate "avr32_cop_move_operand"
- + (ior (match_operand 0 "register_operand")
- + (match_operand 0 "avr32_cop_memory_operand")))
- +
- +
- +;; True if this is a valid extract byte offset for use in
- +;; load extracted index insns.
- +(define_predicate "avr32_extract_shift_operand"
- + (and (match_operand 0 "const_int_operand")
- + (match_test "(INTVAL(op) == 0) || (INTVAL(op) == 8)
- + || (INTVAL(op) == 16) || (INTVAL(op) == 24)")))
- +
- +;; True if this is a valid avr32 symbol operand.
- +(define_predicate "avr32_symbol_operand"
- + (and (match_code "label_ref, symbol_ref, const")
- + (match_test "avr32_find_symbol(op)")))
- +
- +;; True if this is a valid operand for the lda.w and call pseudo insns.
- +(define_predicate "avr32_address_operand"
- + (and (and (match_code "label_ref, symbol_ref")
- + (match_test "avr32_find_symbol(op)"))
- + (ior (match_test "TARGET_HAS_ASM_ADDR_PSEUDOS")
- + (match_test "flag_pic")) ))
- +
- +;; An immediate k16 address operand
- +(define_predicate "avr32_ks16_address_operand"
- + (and (match_operand 0 "address_operand")
- + (ior (match_test "REG_P(op)")
- + (match_test "GET_CODE(op) == PLUS
- + && ((GET_CODE(XEXP(op,0)) == CONST_INT)
- + || (GET_CODE(XEXP(op,1)) == CONST_INT))")) ))
- +
- +;; An offset k16 memory operand
- +(define_predicate "avr32_ks16_memory_operand"
- + (and (match_code "mem")
- + (match_test "avr32_ks16_address_operand (XEXP (op, 0), GET_MODE (XEXP (op, 0)))")))
- +
- +;; An immediate k11 address operand
- +(define_predicate "avr32_ks11_address_operand"
- + (and (match_operand 0 "address_operand")
- + (ior (match_test "REG_P(op)")
- + (match_test "GET_CODE(op) == PLUS
- + && (((GET_CODE(XEXP(op,0)) == CONST_INT)
- + && avr32_const_ok_for_constraint_p(INTVAL(XEXP(op,0)), 'K', \"Ks11\"))
- + || ((GET_CODE(XEXP(op,1)) == CONST_INT)
- + && avr32_const_ok_for_constraint_p(INTVAL(XEXP(op,1)), 'K', \"Ks11\")))")) ))
- +
- +;; True if this is a avr32 call operand
- +(define_predicate "avr32_call_operand"
- + (ior (ior (match_operand 0 "register_operand")
- + (ior (match_operand 0 "avr32_const_pool_ref_operand")
- + (match_operand 0 "avr32_address_operand")))
- + (match_test "SYMBOL_REF_RCALL_FUNCTION_P(op)")))
- +
- +;; Return true for operators performing ALU operations
- +
- +(define_predicate "alu_operator"
- + (match_code "ior, xor, and, plus, minus, ashift, lshiftrt, ashiftrt"))
- +
- +(define_predicate "avr32_add_shift_immediate_operand"
- + (and (match_operand 0 "immediate_operand")
- + (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'K', \"Ku02\")")))
- +
- +(define_predicate "avr32_cond_register_immediate_operand"
- + (ior (match_operand 0 "register_operand")
- + (and (match_operand 0 "immediate_operand")
- + (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'K', \"Ks08\")"))))
- +
- +(define_predicate "avr32_cond_immediate_operand"
- + (and (match_operand 0 "immediate_operand")
- + (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'I', \"Is08\")")))
- +
- +
- +(define_predicate "avr32_cond_move_operand"
- + (ior (ior (match_operand 0 "register_operand")
- + (and (match_operand 0 "immediate_operand")
- + (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'K', \"Ks08\")")))
- + (and (match_test "TARGET_V2_INSNS")
- + (match_operand 0 "memory_operand"))))
- +
- +(define_predicate "avr32_mov_immediate_operand"
- + (and (match_operand 0 "immediate_operand")
- + (match_test "avr32_const_ok_for_move(INTVAL(op))")))
- +
- +
- +(define_predicate "avr32_rmw_address_operand"
- + (ior (and (match_code "symbol_ref")
- + (match_test "({rtx symbol = avr32_find_symbol(op); \
- + symbol && (GET_CODE (symbol) == SYMBOL_REF) && SYMBOL_REF_RMW_ADDR(symbol);})"))
- + (and (match_operand 0 "immediate_operand")
- + (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'K', \"Ks17\")")))
- + {
- + return TARGET_RMW && !flag_pic;
- + }
- +)
- +
- +(define_predicate "avr32_rmw_memory_operand"
- + (and (match_code "mem")
- + (match_test "!volatile_refs_p(op) && (GET_MODE(op) == SImode) &&
- + avr32_rmw_address_operand(XEXP(op, 0), GET_MODE(XEXP(op, 0)))")))
- +
- +(define_predicate "avr32_rmw_memory_or_register_operand"
- + (ior (match_operand 0 "avr32_rmw_memory_operand")
- + (match_operand 0 "register_operand")))
- +
- +(define_predicate "avr32_non_rmw_memory_operand"
- + (and (not (match_operand 0 "avr32_rmw_memory_operand"))
- + (match_operand 0 "memory_operand")))
- +
- +(define_predicate "avr32_non_rmw_general_operand"
- + (and (not (match_operand 0 "avr32_rmw_memory_operand"))
- + (match_operand 0 "general_operand")))
- +
- +(define_predicate "avr32_non_rmw_nonimmediate_operand"
- + (and (not (match_operand 0 "avr32_rmw_memory_operand"))
- + (match_operand 0 "nonimmediate_operand")))
- +
- +;; Return true if the operand is the 1.0f constant.
- +
- +(define_predicate "const_1f_operand"
- + (match_code "const_int,const_double")
- +{
- + return (op == CONST1_RTX (SFmode));
- +})
- diff -Nur gcc-4.4.6.orig/gcc/config/avr32/simd.md gcc-4.4.6/gcc/config/avr32/simd.md
- --- gcc-4.4.6.orig/gcc/config/avr32/simd.md 1970-01-01 01:00:00.000000000 +0100
- +++ gcc-4.4.6/gcc/config/avr32/simd.md 2011-10-22 19:23:08.528581303 +0200
- @@ -0,0 +1,145 @@
- +;; AVR32 machine description file for SIMD instructions.
- +;; Copyright 2003-2006 Atmel Corporation.
- +;;
- +;; Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
- +;;
- +;; This file is part of GCC.
- +;;
- +;; This program is free software; you can redistribute it and/or modify
- +;; it under the terms of the GNU General Public License as published by
- +;; the Free Software Foundation; either version 2 of the License, or
- +;; (at your option) any later version.
- +;;
- +;; This program is distributed in the hope that it will be useful,
- +;; but WITHOUT ANY WARRANTY; without even the implied warranty of
- +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- +;; GNU General Public License for more details.
- +;;
- +;; You should have received a copy of the GNU General Public License
- +;; along with this program; if not, write to the Free Software
- +;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- +
- +;; -*- Mode: Scheme -*-
- +
- +
- +;; Vector modes
- +(define_mode_iterator VECM [V2HI V4QI])
- +(define_mode_attr size [(V2HI "h") (V4QI "b")])
- +
- +(define_insn "add<mode>3"
- + [(set (match_operand:VECM 0 "register_operand" "=r")
- + (plus:VECM (match_operand:VECM 1 "register_operand" "r")
- + (match_operand:VECM 2 "register_operand" "r")))]
- + "TARGET_SIMD"
- + "padd.<size>\t%0, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "type" "alu")])
- +
- +
- +(define_insn "sub<mode>3"
- + [(set (match_operand:VECM 0 "register_operand" "=r")
- + (minus:VECM (match_operand:VECM 1 "register_operand" "r")
- + (match_operand:VECM 2 "register_operand" "r")))]
- + "TARGET_SIMD"
- + "psub.<size>\t%0, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "type" "alu")])
- +
- +
- +(define_insn "abs<mode>2"
- + [(set (match_operand:VECM 0 "register_operand" "=r")
- + (abs:VECM (match_operand:VECM 1 "register_operand" "r")))]
- + "TARGET_SIMD"
- + "pabs.s<size>\t%0, %1"
- + [(set_attr "length" "4")
- + (set_attr "type" "alu")])
- +
- +(define_insn "ashl<mode>3"
- + [(set (match_operand:VECM 0 "register_operand" "=r")
- + (ashift:VECM (match_operand:VECM 1 "register_operand" "r")
- + (match_operand:SI 2 "immediate_operand" "Ku04")))]
- + "TARGET_SIMD"
- + "plsl.<size>\t%0, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "type" "alu")])
- +
- +(define_insn "ashr<mode>3"
- + [(set (match_operand:VECM 0 "register_operand" "=r")
- + (ashiftrt:VECM (match_operand:VECM 1 "register_operand" "r")
- + (match_operand:SI 2 "immediate_operand" "Ku04")))]
- + "TARGET_SIMD"
- + "pasr.<size>\t%0, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "type" "alu")])
- +
- +(define_insn "lshr<mode>3"
- + [(set (match_operand:VECM 0 "register_operand" "=r")
- + (lshiftrt:VECM (match_operand:VECM 1 "register_operand" "r")
- + (match_operand:SI 2 "immediate_operand" "Ku04")))]
- + "TARGET_SIMD"
- + "plsr.<size>\t%0, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "type" "alu")])
- +
- +(define_insn "smaxv2hi3"
- + [(set (match_operand:V2HI 0 "register_operand" "=r")
- + (smax:V2HI (match_operand:V2HI 1 "register_operand" "r")
- + (match_operand:V2HI 2 "register_operand" "r")))]
- +
- + "TARGET_SIMD"
- + "pmax.sh\t%0, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "type" "alu")])
- +
- +(define_insn "sminv2hi3"
- + [(set (match_operand:V2HI 0 "register_operand" "=r")
- + (smin:V2HI (match_operand:V2HI 1 "register_operand" "r")
- + (match_operand:V2HI 2 "register_operand" "r")))]
- +
- + "TARGET_SIMD"
- + "pmin.sh\t%0, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "type" "alu")])
- +
- +(define_insn "umaxv4qi3"
- + [(set (match_operand:V4QI 0 "register_operand" "=r")
- + (umax:V4QI (match_operand:V4QI 1 "register_operand" "r")
- + (match_operand:V4QI 2 "register_operand" "r")))]
- +
- + "TARGET_SIMD"
- + "pmax.ub\t%0, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "type" "alu")])
- +
- +(define_insn "uminv4qi3"
- + [(set (match_operand:V4QI 0 "register_operand" "=r")
- + (umin:V4QI (match_operand:V4QI 1 "register_operand" "r")
- + (match_operand:V4QI 2 "register_operand" "r")))]
- +
- + "TARGET_SIMD"
- + "pmin.ub\t%0, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "type" "alu")])
- +
- +
- +(define_insn "addsubv2hi"
- + [(set (match_operand:V2HI 0 "register_operand" "=r")
- + (vec_concat:V2HI
- + (plus:HI (match_operand:HI 1 "register_operand" "r")
- + (match_operand:HI 2 "register_operand" "r"))
- + (minus:HI (match_dup 1) (match_dup 2))))]
- + "TARGET_SIMD"
- + "paddsub.h\t%0, %1:b, %2:b"
- + [(set_attr "length" "4")
- + (set_attr "type" "alu")])
- +
- +(define_insn "subaddv2hi"
- + [(set (match_operand:V2HI 0 "register_operand" "=r")
- + (vec_concat:V2HI
- + (minus:HI (match_operand:HI 1 "register_operand" "r")
- + (match_operand:HI 2 "register_operand" "r"))
- + (plus:HI (match_dup 1) (match_dup 2))))]
- + "TARGET_SIMD"
- + "psubadd.h\t%0, %1:b, %2:b"
- + [(set_attr "length" "4")
- + (set_attr "type" "alu")])
- diff -Nur gcc-4.4.6.orig/gcc/config/avr32/sync.md gcc-4.4.6/gcc/config/avr32/sync.md
- --- gcc-4.4.6.orig/gcc/config/avr32/sync.md 1970-01-01 01:00:00.000000000 +0100
- +++ gcc-4.4.6/gcc/config/avr32/sync.md 2011-10-22 19:23:08.528581303 +0200
- @@ -0,0 +1,244 @@
- +;;=================================================================
- +;; Atomic operations
- +;;=================================================================
- +
- +
- +(define_insn "sync_compare_and_swapsi"
- + [(set (match_operand:SI 0 "register_operand" "=&r,&r")
- + (match_operand:SI 1 "memory_operand" "+RKs16,+RKs16"))
- + (set (match_dup 1)
- + (unspec_volatile:SI
- + [(match_dup 1)
- + (match_operand:SI 2 "register_immediate_operand" "r,Ks21")
- + (match_operand:SI 3 "register_operand" "r,r")]
- + VUNSPEC_SYNC_CMPXCHG)) ]
- + ""
- + "0:
- + ssrf\t5
- + ld.w\t%0,%1
- + cp.w\t%0,%2
- + brne\t0f
- + stcond\t%1, %3
- + brne\t0b
- + 0:
- + "
- + [(set_attr "length" "16,18")
- + (set_attr "cc" "clobber")]
- + )
- +
- +
- +(define_code_iterator atomic_op [plus minus and ior xor])
- +(define_code_attr atomic_asm_insn [(plus "add") (minus "sub") (and "and") (ior "or") (xor "eor")])
- +(define_code_attr atomic_insn [(plus "add") (minus "sub") (and "and") (ior "ior") (xor "xor")])
- +
- +(define_insn "sync_loadsi"
- + ; NB! Put an early clobber on the destination operand to
- + ; avoid gcc using the same register in the source and
- + ; destination. This is done in order to avoid gcc to
- + ; clobber the source operand since these instructions
- + ; are actually inside a "loop".
- + [(set (match_operand:SI 0 "register_operand" "=&r")
- + (unspec_volatile:SI
- + [(match_operand:SI 1 "avr32_ks16_memory_operand" "RKs16")
- + (label_ref (match_operand 2 "" ""))]
- + VUNSPEC_SYNC_SET_LOCK_AND_LOAD) )]
- + ""
- + "%2:
- + ssrf\t5
- + ld.w\t%0,%1"
- + [(set_attr "length" "6")
- + (set_attr "cc" "clobber")]
- + )
- +
- +(define_insn "sync_store_if_lock"
- + [(set (match_operand:SI 0 "avr32_ks16_memory_operand" "=RKs16")
- + (unspec_volatile:SI
- + [(match_operand:SI 1 "register_operand" "r")
- + (label_ref (match_operand 2 "" ""))]
- + VUNSPEC_SYNC_STORE_IF_LOCK) )]
- + ""
- + "stcond\t%0, %1
- + brne\t%2"
- + [(set_attr "length" "6")
- + (set_attr "cc" "clobber")]
- + )
- +
- +
- +(define_expand "sync_<atomic_insn>si"
- + [(set (match_dup 2)
- + (unspec_volatile:SI
- + [(match_operand:SI 0 "avr32_ks16_memory_operand" "")
- + (match_dup 3)]
- + VUNSPEC_SYNC_SET_LOCK_AND_LOAD))
- + (set (match_dup 2)
- + (atomic_op:SI (match_dup 2)
- + (match_operand:SI 1 "register_immediate_operand" "")))
- + (set (match_dup 0)
- + (unspec_volatile:SI
- + [(match_dup 2)
- + (match_dup 3)]
- + VUNSPEC_SYNC_STORE_IF_LOCK) )
- + (use (match_dup 1))
- + (use (match_dup 4))]
- + ""
- + {
- + rtx *mem_expr = &operands[0];
- + rtx ptr_reg;
- + if ( !avr32_ks16_memory_operand (*mem_expr, GET_MODE (*mem_expr)) )
- + {
- + ptr_reg = force_reg (Pmode, XEXP (*mem_expr, 0));
- + XEXP (*mem_expr, 0) = ptr_reg;
- + }
- + else
- + {
- + rtx address = XEXP (*mem_expr, 0);
- + if ( REG_P (address) )
- + ptr_reg = address;
- + else if ( REG_P (XEXP (address, 0)) )
- + ptr_reg = XEXP (address, 0);
- + else
- + ptr_reg = XEXP (address, 1);
- + }
- +
- + operands[2] = gen_reg_rtx (SImode);
- + operands[3] = gen_rtx_LABEL_REF(Pmode, gen_label_rtx ());
- + operands[4] = ptr_reg;
- +
- + }
- + )
- +
- +
- +
- +(define_expand "sync_old_<atomic_insn>si"
- + [(set (match_operand:SI 0 "register_operand" "")
- + (unspec_volatile:SI
- + [(match_operand:SI 1 "avr32_ks16_memory_operand" "")
- + (match_dup 4)]
- + VUNSPEC_SYNC_SET_LOCK_AND_LOAD))
- + (set (match_dup 3)
- + (atomic_op:SI (match_dup 0)
- + (match_operand:SI 2 "register_immediate_operand" "")))
- + (set (match_dup 1)
- + (unspec_volatile:SI
- + [(match_dup 3)
- + (match_dup 4)]
- + VUNSPEC_SYNC_STORE_IF_LOCK) )
- + (use (match_dup 2))
- + (use (match_dup 5))]
- + ""
- + {
- + rtx *mem_expr = &operands[1];
- + rtx ptr_reg;
- + if ( !avr32_ks16_memory_operand (*mem_expr, GET_MODE (*mem_expr)) )
- + {
- + ptr_reg = force_reg (Pmode, XEXP (*mem_expr, 0));
- + XEXP (*mem_expr, 0) = ptr_reg;
- + }
- + else
- + {
- + rtx address = XEXP (*mem_expr, 0);
- + if ( REG_P (address) )
- + ptr_reg = address;
- + else if ( REG_P (XEXP (address, 0)) )
- + ptr_reg = XEXP (address, 0);
- + else
- + ptr_reg = XEXP (address, 1);
- + }
- +
- + operands[3] = gen_reg_rtx (SImode);
- + operands[4] = gen_rtx_LABEL_REF(Pmode, gen_label_rtx ());
- + operands[5] = ptr_reg;
- + }
- + )
- +
- +(define_expand "sync_new_<atomic_insn>si"
- + [(set (match_operand:SI 0 "register_operand" "")
- + (unspec_volatile:SI
- + [(match_operand:SI 1 "avr32_ks16_memory_operand" "")
- + (match_dup 3)]
- + VUNSPEC_SYNC_SET_LOCK_AND_LOAD))
- + (set (match_dup 0)
- + (atomic_op:SI (match_dup 0)
- + (match_operand:SI 2 "register_immediate_operand" "")))
- + (set (match_dup 1)
- + (unspec_volatile:SI
- + [(match_dup 0)
- + (match_dup 3)]
- + VUNSPEC_SYNC_STORE_IF_LOCK) )
- + (use (match_dup 2))
- + (use (match_dup 4))]
- + ""
- + {
- + rtx *mem_expr = &operands[1];
- + rtx ptr_reg;
- + if ( !avr32_ks16_memory_operand (*mem_expr, GET_MODE (*mem_expr)) )
- + {
- + ptr_reg = force_reg (Pmode, XEXP (*mem_expr, 0));
- + XEXP (*mem_expr, 0) = ptr_reg;
- + }
- + else
- + {
- + rtx address = XEXP (*mem_expr, 0);
- + if ( REG_P (address) )
- + ptr_reg = address;
- + else if ( REG_P (XEXP (address, 0)) )
- + ptr_reg = XEXP (address, 0);
- + else
- + ptr_reg = XEXP (address, 1);
- + }
- +
- + operands[3] = gen_rtx_LABEL_REF(Pmode, gen_label_rtx ());
- + operands[4] = ptr_reg;
- + }
- + )
- +
- +
- +;(define_insn "sync_<atomic_insn>si"
- +; [(set (match_operand:SI 0 "memory_operand" "+RKs16")
- +; (unspec_volatile:SI
- +; [(atomic_op:SI (match_dup 0)
- +; (match_operand:SI 1 "register_operand" "r"))]
- +; VUNSPEC_SYNC_CMPXCHG))
- +; (clobber (match_scratch:SI 2 "=&r"))]
- +; ""
- +; "0:
- +; ssrf\t5
- +; ld.w\t%2,%0
- +; <atomic_asm_insn>\t%2,%1
- +; stcond\t%0, %2
- +; brne\t0b
- +; "
- +; [(set_attr "length" "14")
- +; (set_attr "cc" "clobber")]
- +; )
- +;
- +;(define_insn "sync_new_<atomic_insn>si"
- +; [(set (match_operand:SI 1 "memory_operand" "+RKs16")
- +; (unspec_volatile:SI
- +; [(atomic_op:SI (match_dup 1)
- +; (match_operand:SI 2 "register_operand" "r"))]
- +; VUNSPEC_SYNC_CMPXCHG))
- +; (set (match_operand:SI 0 "register_operand" "=&r")
- +; (atomic_op:SI (match_dup 1)
- +; (match_dup 2)))]
- +; ""
- +; "0:
- +; ssrf\t5
- +; ld.w\t%0,%1
- +; <atomic_asm_insn>\t%0,%2
- +; stcond\t%1, %0
- +; brne\t0b
- +; "
- +; [(set_attr "length" "14")
- +; (set_attr "cc" "clobber")]
- +; )
- +
- +(define_insn "sync_lock_test_and_setsi"
- + [ (set (match_operand:SI 0 "register_operand" "=&r")
- + (match_operand:SI 1 "memory_operand" "+RKu00"))
- + (set (match_dup 1)
- + (match_operand:SI 2 "register_operand" "r")) ]
- + ""
- + "xchg\t%0, %p1, %2"
- + [(set_attr "length" "4")]
- + )
- diff -Nur gcc-4.4.6.orig/gcc/config/avr32/t-avr32 gcc-4.4.6/gcc/config/avr32/t-avr32
- --- gcc-4.4.6.orig/gcc/config/avr32/t-avr32 1970-01-01 01:00:00.000000000 +0100
- +++ gcc-4.4.6/gcc/config/avr32/t-avr32 2011-10-22 19:23:08.528581303 +0200
- @@ -0,0 +1,118 @@
- +
- +MD_INCLUDES= $(srcdir)/config/avr32/avr32.md \
- + $(srcdir)/config/avr32/sync.md \
- + $(srcdir)/config/avr32/simd.md \
- + $(srcdir)/config/avr32/predicates.md
- +
- +s-config s-conditions s-flags s-codes s-constants s-emit s-recog s-preds \
- + s-opinit s-extract s-peep s-attr s-attrtab s-output: $(MD_INCLUDES)
- +
- +# We want fine grained libraries, so use the new code
- +# to build the floating point emulation libraries.
- +FPBIT = fp-bit.c
- +DPBIT = dp-bit.c
- +
- +LIB1ASMSRC = avr32/lib1funcs.S
- +LIB1ASMFUNCS = _avr32_f64_mul _avr32_f64_mul_fast _avr32_f64_addsub _avr32_f64_addsub_fast _avr32_f64_to_u32 \
- + _avr32_f64_to_s32 _avr32_f64_to_u64 _avr32_f64_to_s64 _avr32_u32_to_f64 \
- + _avr32_s32_to_f64 _avr32_f64_cmp_eq _avr32_f64_cmp_ge _avr32_f64_cmp_lt \
- + _avr32_f32_cmp_eq _avr32_f32_cmp_ge _avr32_f32_cmp_lt _avr32_f64_div _avr32_f64_div_fast \
- + _avr32_f32_div _avr32_f32_div_fast _avr32_f32_addsub _avr32_f32_addsub_fast \
- + _avr32_f32_mul _avr32_s32_to_f32 _avr32_u32_to_f32 _avr32_f32_to_s32 \
- + _avr32_f32_to_u32 _avr32_f32_to_f64 _avr32_f64_to_f32 _mulsi3
- +
- +#LIB2FUNCS_EXTRA += $(srcdir)/config/avr32/lib2funcs.S
- +
- +MULTILIB_OPTIONS = march=ap/march=ucr1/march=ucr2/march=ucr2nomul/march=ucr3/march=ucr3fp
- +MULTILIB_DIRNAMES = ap ucr1 ucr2 ucr2nomul ucr3 ucr3fp
- +MULTILIB_EXCEPTIONS =
- +MULTILIB_MATCHES += march?ap=mpart?ap7000
- +MULTILIB_MATCHES += march?ap=mpart?ap7001
- +MULTILIB_MATCHES += march?ap=mpart?ap7002
- +MULTILIB_MATCHES += march?ap=mpart?ap7200
- +MULTILIB_MATCHES += march?ucr1=march?uc
- +MULTILIB_MATCHES += march?ucr1=mpart?uc3a0512es
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a0128
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a0256
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a0512
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a1128
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a1256
- +MULTILIB_MATCHES += march?ucr1=mpart?uc3a1512es
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a1512
- +MULTILIB_MATCHES += march?ucr2nomul=mpart?uc3a3revd
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a364
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a364s
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a3128
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a3128s
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a3256
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a3256s
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a464
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a464s
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a4128
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a4128s
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a4256
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a4256s
- +MULTILIB_MATCHES += march?ucr1=mpart?uc3b064
- +MULTILIB_MATCHES += march?ucr1=mpart?uc3b0128
- +MULTILIB_MATCHES += march?ucr1=mpart?uc3b0256es
- +MULTILIB_MATCHES += march?ucr1=mpart?uc3b0256
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3b0512
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3b0512revc
- +MULTILIB_MATCHES += march?ucr1=mpart?uc3b164
- +MULTILIB_MATCHES += march?ucr1=mpart?uc3b1128
- +MULTILIB_MATCHES += march?ucr1=mpart?uc3b1256es
- +MULTILIB_MATCHES += march?ucr1=mpart?uc3b1256
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3b1512
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3b1512revc
- +MULTILIB_MATCHES += march?ucr3=mpart?uc64d3
- +MULTILIB_MATCHES += march?ucr3=mpart?uc128d3
- +MULTILIB_MATCHES += march?ucr3=mpart?uc64d4
- +MULTILIB_MATCHES += march?ucr3=mpart?uc128d4
- +MULTILIB_MATCHES += march?ucr3=mpart?uc3c0512crevc
- +MULTILIB_MATCHES += march?ucr3=mpart?uc3c1512crevc
- +MULTILIB_MATCHES += march?ucr3=mpart?uc3c2512crevc
- +MULTILIB_MATCHES += march?ucr3=mpart?uc3l0256
- +MULTILIB_MATCHES += march?ucr3=mpart?uc3l0128
- +MULTILIB_MATCHES += march?ucr3=mpart?uc3l064
- +MULTILIB_MATCHES += march?ucr3=mpart?uc3l032
- +MULTILIB_MATCHES += march?ucr3=mpart?uc3l016
- +MULTILIB_MATCHES += march?ucr3=mpart?uc3l064revb
- +MULTILIB_MATCHES += march?ucr3=mpart?uc64l3u
- +MULTILIB_MATCHES += march?ucr3=mpart?uc128l3u
- +MULTILIB_MATCHES += march?ucr3=mpart?uc256l3u
- +MULTILIB_MATCHES += march?ucr3=mpart?uc64l4u
- +MULTILIB_MATCHES += march?ucr3=mpart?uc128l4u
- +MULTILIB_MATCHES += march?ucr3=mpart?uc256l4u
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c064c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c0128c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c0256c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c0512c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c164c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c1128c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c1256c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c1512c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c264c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c2128c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c2256c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c2512c
- +MULTILIB_MATCHES += march?ucr3=mpart?mxt768e
- +
- +
- +EXTRA_MULTILIB_PARTS = crtbegin.o crtbeginS.o crtend.o crtendS.o crti.o crtn.o
- +
- +CRTSTUFF_T_CFLAGS = -mrelax
- +CRTSTUFF_T_CFLAGS_S = -mrelax -fPIC
- +TARGET_LIBGCC2_CFLAGS += -mrelax
- +
- +LIBGCC = stmp-multilib
- +INSTALL_LIBGCC = install-multilib
- +
- +fp-bit.c: $(srcdir)/config/fp-bit.c
- + echo '#define FLOAT' > fp-bit.c
- + cat $(srcdir)/config/fp-bit.c >> fp-bit.c
- +
- +dp-bit.c: $(srcdir)/config/fp-bit.c
- + cat $(srcdir)/config/fp-bit.c > dp-bit.c
- +
- +
- +
- diff -Nur gcc-4.4.6.orig/gcc/config/avr32/t-avr32-linux gcc-4.4.6/gcc/config/avr32/t-avr32-linux
- --- gcc-4.4.6.orig/gcc/config/avr32/t-avr32-linux 1970-01-01 01:00:00.000000000 +0100
- +++ gcc-4.4.6/gcc/config/avr32/t-avr32-linux 2011-10-22 19:23:08.528581303 +0200
- @@ -0,0 +1,118 @@
- +
- +MD_INCLUDES= $(srcdir)/config/avr32/avr32.md \
- + $(srcdir)/config/avr32/sync.md \
- + $(srcdir)/config/avr32/simd.md \
- + $(srcdir)/config/avr32/predicates.md
- +
- +s-config s-conditions s-flags s-codes s-constants s-emit s-recog s-preds \
- + s-opinit s-extract s-peep s-attr s-attrtab s-output: $(MD_INCLUDES)
- +
- +# We want fine grained libraries, so use the new code
- +# to build the floating point emulation libraries.
- +FPBIT = fp-bit.c
- +DPBIT = dp-bit.c
- +
- +LIB1ASMSRC = avr32/lib1funcs.S
- +LIB1ASMFUNCS = _avr32_f64_mul _avr32_f64_mul_fast _avr32_f64_addsub _avr32_f64_addsub_fast _avr32_f64_to_u32 \
- + _avr32_f64_to_s32 _avr32_f64_to_u64 _avr32_f64_to_s64 _avr32_u32_to_f64 \
- + _avr32_s32_to_f64 _avr32_f64_cmp_eq _avr32_f64_cmp_ge _avr32_f64_cmp_lt \
- + _avr32_f32_cmp_eq _avr32_f32_cmp_ge _avr32_f32_cmp_lt _avr32_f64_div _avr32_f64_div_fast \
- + _avr32_f32_div _avr32_f32_div_fast _avr32_f32_addsub _avr32_f32_addsub_fast \
- + _avr32_f32_mul _avr32_s32_to_f32 _avr32_u32_to_f32 _avr32_f32_to_s32 \
- + _avr32_f32_to_u32 _avr32_f32_to_f64 _avr32_f64_to_f32 _mulsi3
- +
- +#LIB2FUNCS_EXTRA += $(srcdir)/config/avr32/lib2funcs.S
- +
- +MULTILIB_OPTIONS = march=ap/march=ucr1/march=ucr2/march=ucr2nomul/march=ucr3/march=ucr3fp
- +MULTILIB_DIRNAMES = ap ucr1 ucr2 ucr2nomul ucr3 ucr3fp
- +MULTILIB_EXCEPTIONS =
- +MULTILIB_MATCHES += march?ap=mpart?ap7000
- +MULTILIB_MATCHES += march?ap=mpart?ap7001
- +MULTILIB_MATCHES += march?ap=mpart?ap7002
- +MULTILIB_MATCHES += march?ap=mpart?ap7200
- +MULTILIB_MATCHES += march?ucr1=march?uc
- +MULTILIB_MATCHES += march?ucr1=mpart?uc3a0512es
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a0128
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a0256
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a0512
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a1128
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a1256
- +MULTILIB_MATCHES += march?ucr1=mpart?uc3a1512es
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a1512
- +MULTILIB_MATCHES += march?ucr2nomul=mpart?uc3a3revd
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a364
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a364s
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a3128
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a3128s
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a3256
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a3256s
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a464
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a464s
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a4128
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a4128s
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a4256
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a4256s
- +MULTILIB_MATCHES += march?ucr1=mpart?uc3b064
- +MULTILIB_MATCHES += march?ucr1=mpart?uc3b0128
- +MULTILIB_MATCHES += march?ucr1=mpart?uc3b0256es
- +MULTILIB_MATCHES += march?ucr1=mpart?uc3b0256
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3b0512
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3b0512revc
- +MULTILIB_MATCHES += march?ucr1=mpart?uc3b164
- +MULTILIB_MATCHES += march?ucr1=mpart?uc3b1128
- +MULTILIB_MATCHES += march?ucr1=mpart?uc3b1256es
- +MULTILIB_MATCHES += march?ucr1=mpart?uc3b1256
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3b1512
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3b1512revc
- +MULTILIB_MATCHES += march?ucr3=mpart?uc64d3
- +MULTILIB_MATCHES += march?ucr3=mpart?uc128d3
- +MULTILIB_MATCHES += march?ucr3=mpart?uc64d4
- +MULTILIB_MATCHES += march?ucr3=mpart?uc128d4
- +MULTILIB_MATCHES += march?ucr3=mpart?uc3c0512crevc
- +MULTILIB_MATCHES += march?ucr3=mpart?uc3c1512crevc
- +MULTILIB_MATCHES += march?ucr3=mpart?uc3c2512crevc
- +MULTILIB_MATCHES += march?ucr3=mpart?uc3l0256
- +MULTILIB_MATCHES += march?ucr3=mpart?uc3l0128
- +MULTILIB_MATCHES += march?ucr3=mpart?uc3l064
- +MULTILIB_MATCHES += march?ucr3=mpart?uc3l032
- +MULTILIB_MATCHES += march?ucr3=mpart?uc3l016
- +MULTILIB_MATCHES += march?ucr3=mpart?uc3l064revb
- +MULTILIB_MATCHES += march?ucr3=mpart?uc64l3u
- +MULTILIB_MATCHES += march?ucr3=mpart?uc128l3u
- +MULTILIB_MATCHES += march?ucr3=mpart?uc256l3u
- +MULTILIB_MATCHES += march?ucr3=mpart?uc64l4u
- +MULTILIB_MATCHES += march?ucr3=mpart?uc128l4u
- +MULTILIB_MATCHES += march?ucr3=mpart?uc256l4u
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c064c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c0128c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c0256c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c0512c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c164c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c1128c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c1256c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c1512c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c264c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c2128c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c2256c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c2512c
- +MULTILIB_MATCHES += march?ucr3=mpart?mxt768e
- +
- +
- +EXTRA_MULTILIB_PARTS = crtbegin.o crtbeginS.o crtend.o crtendS.o
- +
- +CRTSTUFF_T_CFLAGS = -mrelax
- +CRTSTUFF_T_CFLAGS_S = -mrelax -fPIC
- +TARGET_LIBGCC2_CFLAGS += -mrelax
- +
- +LIBGCC = stmp-multilib
- +INSTALL_LIBGCC = install-multilib
- +
- +fp-bit.c: $(srcdir)/config/fp-bit.c
- + echo '#define FLOAT' > fp-bit.c
- + cat $(srcdir)/config/fp-bit.c >> fp-bit.c
- +
- +dp-bit.c: $(srcdir)/config/fp-bit.c
- + cat $(srcdir)/config/fp-bit.c > dp-bit.c
- +
- +
- +
- diff -Nur gcc-4.4.6.orig/gcc/config/avr32/t-elf gcc-4.4.6/gcc/config/avr32/t-elf
- --- gcc-4.4.6.orig/gcc/config/avr32/t-elf 1970-01-01 01:00:00.000000000 +0100
- +++ gcc-4.4.6/gcc/config/avr32/t-elf 2011-10-22 19:23:08.528581303 +0200
- @@ -0,0 +1,16 @@
- +
- +# Assemble startup files.
- +$(T)crti.o: $(srcdir)/config/avr32/crti.asm $(GCC_PASSES)
- + $(GCC_FOR_TARGET) $(CRTSTUFF_CFLAGS) $(CRTSTUFF_T_CFLAGS) $(INCLUDES) \
- + -c -o $(T)crti.o -x assembler-with-cpp $(srcdir)/config/avr32/crti.asm
- +
- +$(T)crtn.o: $(srcdir)/config/avr32/crtn.asm $(GCC_PASSES)
- + $(GCC_FOR_TARGET) $(CRTSTUFF_CFLAGS) $(CRTSTUFF_T_CFLAGS) $(INCLUDES) \
- + -c -o $(T)crtn.o -x assembler-with-cpp $(srcdir)/config/avr32/crtn.asm
- +
- +
- +# Build the libraries for both hard and soft floating point
- +EXTRA_MULTILIB_PARTS = crtbegin.o crtbeginS.o crtend.o crtendS.o crti.o crtn.o
- +
- +LIBGCC = stmp-multilib
- +INSTALL_LIBGCC = install-multilib
- diff -Nur gcc-4.4.6.orig/gcc/config/avr32/uc3fpu.md gcc-4.4.6/gcc/config/avr32/uc3fpu.md
- --- gcc-4.4.6.orig/gcc/config/avr32/uc3fpu.md 1970-01-01 01:00:00.000000000 +0100
- +++ gcc-4.4.6/gcc/config/avr32/uc3fpu.md 2011-10-22 19:23:08.528581303 +0200
- @@ -0,0 +1,199 @@
- +;; AVR32 machine description file for Floating-Point instructions.
- +;; Copyright 2003-2006 Atmel Corporation.
- +;;
- +;;
- +;; This file is part of GCC.
- +;;
- +;; This program is free software; you can redistribute it and/or modify
- +;; it under the terms of the GNU General Public License as published by
- +;; the Free Software Foundation; either version 2 of the License, or
- +;; (at your option) any later version.
- +;;
- +;; This program is distributed in the hope that it will be useful,
- +;; but WITHOUT ANY WARRANTY; without even the implied warranty of
- +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- +;; GNU General Public License for more details.
- +;;
- +;; You should have received a copy of the GNU General Public License
- +;; along with this program; if not, write to the Free Software
- +;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- +
- +(define_insn "*movsf_uc3fp"
- + [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,r,m")
- + (match_operand:SF 1 "general_operand" "r,G,m,r"))]
- + "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
- + "@
- + mov\t%0, %1
- + mov\t%0, %1
- + ld.w\t%0, %1
- + st.w\t%0, %1"
- + [(set_attr "length" "2,4,4,4")
- + (set_attr "type" "alu,alu,load,store")])
- +
- +(define_insn "mulsf3"
- + [(set (match_operand:SF 0 "register_operand" "=r")
- + (mult:SF (match_operand:SF 1 "register_operand" "r")
- + (match_operand:SF 2 "register_operand" "r")))]
- + "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
- + "fmul.s\t%0, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "type" "fmul")])
- +
- +(define_insn "nmulsf3"
- + [(set (match_operand:SF 0 "register_operand" "=r")
- + (neg:SF (mult:SF (match_operand:SF 1 "register_operand" "%r")
- + (match_operand:SF 2 "register_operand" "r"))))]
- + "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
- + "fnmul.s\t%0, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "type" "fmul")])
- +
- +(define_insn "macsf3"
- + [(set (match_operand:SF 0 "register_operand" "=r")
- + (plus:SF (mult:SF (match_operand:SF 1 "register_operand" "r")
- + (match_operand:SF 2 "register_operand" "r"))
- + (match_operand:SF 3 "register_operand" "r")))]
- + "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
- + "fmac.s\t%0, %3, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "type" "fmul")])
- +
- +;(define_insn "nmacsf3"
- +; [(set (match_operand:SF 0 "register_operand" "=r")
- +; (plus:SF (neg:SF (match_operand:SF 1 "register_operand" "r"))
- +; (mult:SF(match_operand:SF 2 "register_operand" "r")
- +; (match_operand:SF 3 "register_operand" "r"))))]
- +; "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
- +; "fnmac.s\t%0, %1, %2, %3"
- +; [(set_attr "length" "4")
- +; (set_attr "type" "fmul")])
- +
- +(define_insn "nmacsf3"
- + [(set (match_operand:SF 0 "register_operand" "=r")
- + (minus:SF (mult:SF (match_operand:SF 2 "register_operand" "r")
- + (match_operand:SF 3 "register_operand" "r"))
- + (match_operand:SF 1 "register_operand" "r")))]
- + "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
- + "fnmac.s\t%0, %1, %2, %3"
- + [(set_attr "length" "4")
- + (set_attr "type" "fmul")])
- +
- +(define_insn "msubacsf3"
- + [(set (match_operand:SF 0 "register_operand" "=r")
- + (minus:SF (match_operand:SF 3 "register_operand" "r")
- + (mult:SF (match_operand:SF 1 "register_operand" "r")
- + (match_operand:SF 2 "register_operand" "r"))))]
- + "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
- + "fmsc.s\t%0, %3, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "type" "fmul")])
- +
- +(define_insn "nmsubacsf3"
- + [(set (match_operand:SF 0 "register_operand" "=r")
- + (minus:SF (neg:SF (mult:SF (match_operand:SF 1 "register_operand" "r")
- + (match_operand:SF 2 "register_operand" "r")))
- + (match_operand:SF 3 "register_operand" "r")))]
- + "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
- + "fnmsc.s\t%0, %3, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "type" "fmul")])
- +
- +(define_insn "addsf3"
- + [(set (match_operand:SF 0 "register_operand" "=r")
- + (plus:SF (match_operand:SF 1 "register_operand" "%r")
- + (match_operand:SF 2 "register_operand" "r")))]
- + "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
- + "fadd.s\t%0, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "type" "fmul")])
- +
- +(define_insn "subsf3"
- + [(set (match_operand:SF 0 "register_operand" "=r")
- + (minus:SF (match_operand:SF 1 "register_operand" "r")
- + (match_operand:SF 2 "register_operand" "r")))]
- + "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
- + "fsub.s\t%0, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "type" "fmul")])
- +
- +(define_insn "fixuns_truncsfsi2"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (unsigned_fix:SI (match_operand:SF 1 "register_operand" "r")))]
- + "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
- + "fcastrs.uw\t%0, %1"
- + [(set_attr "length" "4")])
- +
- +(define_insn "fix_truncsfsi2"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (fix:SI (match_operand:SF 1 "register_operand" "r")))]
- + "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
- + "fcastrs.sw\t%0, %1"
- + [(set_attr "length" "4")])
- +
- +(define_insn "floatunssisf2"
- + [(set (match_operand:SF 0 "register_operand" "=r")
- + (unsigned_float:SF (match_operand:SI 1 "register_operand" "r")))]
- + "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
- + "fcastuw.s\t%0, %1"
- + [(set_attr "length" "4")])
- +
- +(define_insn "floatsisf2"
- + [(set (match_operand:SF 0 "register_operand" "=r")
- + (float:SF (match_operand:SI 1 "register_operand" "r")))]
- + "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
- + "fcastsw.s\t%0, %1"
- + [(set_attr "length" "4")])
- +
- +(define_insn "cmpsf_internal_uc3fp"
- + [(set (cc0)
- + (compare:CC
- + (match_operand:SF 0 "register_operand" "r")
- + (match_operand:SF 1 "register_operand" "r")))]
- + "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
- + {
- + avr32_branch_type = CMP_SF;
- + if (!rtx_equal_p(cc_prev_status.mdep.value, SET_SRC(PATTERN (insn))) )
- + return "fcmp.s\t%0, %1";
- + return "";
- + }
- + [(set_attr "length" "4")
- + (set_attr "cc" "compare")])
- +
- +(define_expand "divsf3"
- + [(set (match_operand:SF 0 "register_operand" "=r")
- + (div:SF (match_operand:SF 1 "register_operand" "r")
- + (match_operand:SF 2 "register_operand" "r")))]
- + "TARGET_ARCH_FPU && TARGET_HARD_FLOAT && flag_unsafe_math_optimizations"
- + "{
- + emit_insn(gen_frcpa_internal(operands[0],operands[2]));
- + emit_insn(gen_mulsf3(operands[0],operands[0],operands[1]));
- + DONE;
- + }"
- +)
- +
- +(define_insn "frcpa_internal"
- + [(set (match_operand:SF 0 "register_operand" "=r")
- + (unspec:SF [(match_operand:SF 1 "register_operand" "r")] UNSPEC_FRCPA))]
- + "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
- + "frcpa.s %0,%1"
- + [(set_attr "length" "4")])
- +
- +(define_expand "sqrtsf2"
- + [(set (match_operand:SF 0 "register_operand" "")
- + (sqrt:SF (match_operand:SF 1 "register_operand" "")))]
- + "TARGET_ARCH_FPU && TARGET_HARD_FLOAT && flag_unsafe_math_optimizations"
- + "
- +{
- + rtx scratch = gen_reg_rtx (SFmode);
- + emit_insn (gen_rsqrtsf2 (scratch, operands[1], CONST1_RTX (SFmode)));
- + emit_insn (gen_divsf3(operands[0], force_reg (SFmode, CONST1_RTX (SFmode)),
- + scratch));
- + DONE;
- +}")
- +
- +(define_insn "rsqrtsf2"
- + [(set (match_operand:SF 0 "register_operand" "=r")
- + (div:SF (match_operand:SF 2 "const_1f_operand" "F")
- + (sqrt:SF (match_operand:SF 1 "register_operand" "?r"))))]
- + "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
- + "frsqrta.s %1, %0")
- diff -Nur gcc-4.4.6.orig/gcc/config/avr32/uclinux-elf.h gcc-4.4.6/gcc/config/avr32/uclinux-elf.h
- --- gcc-4.4.6.orig/gcc/config/avr32/uclinux-elf.h 1970-01-01 01:00:00.000000000 +0100
- +++ gcc-4.4.6/gcc/config/avr32/uclinux-elf.h 2011-10-22 19:23:08.528581303 +0200
- @@ -0,0 +1,20 @@
- +
- +/* Run-time Target Specification. */
- +#undef TARGET_VERSION
- +#define TARGET_VERSION fputs (" (AVR32 uClinux with ELF)", stderr)
- +
- +/* We don't want a .jcr section on uClinux. As if this makes a difference... */
- +#define TARGET_USE_JCR_SECTION 0
- +
- +/* Here we go. Drop the crtbegin/crtend stuff completely. */
- +#undef STARTFILE_SPEC
- +#define STARTFILE_SPEC \
- + "%{!shared: %{pg:gcrt1.o%s} %{!pg:%{p:gcrt1.o%s}" \
- + " %{!p:%{profile:gcrt1.o%s}" \
- + " %{!profile:crt1.o%s}}}} crti.o%s"
- +
- +#undef ENDFILE_SPEC
- +#define ENDFILE_SPEC "crtn.o%s"
- +
- +#undef TARGET_DEFAULT
- +#define TARGET_DEFAULT (AVR32_FLAG_NO_INIT_GOT)
- diff -Nur gcc-4.4.6.orig/gcc/config/host-linux.c gcc-4.4.6/gcc/config/host-linux.c
- --- gcc-4.4.6.orig/gcc/config/host-linux.c 2009-02-20 16:20:38.000000000 +0100
- +++ gcc-4.4.6/gcc/config/host-linux.c 2011-10-22 19:23:08.528581303 +0200
- @@ -25,6 +25,9 @@
- #include "hosthooks.h"
- #include "hosthooks-def.h"
-
- +#ifndef SSIZE_MAX
- +#define SSIZE_MAX LONG_MAX
- +#endif
-
- /* Linux has a feature called exec-shield-randomize that perturbs the
- address of non-fixed mapped segments by a (relatively) small amount.
- diff -Nur gcc-4.4.6.orig/gcc/config.gcc gcc-4.4.6/gcc/config.gcc
- --- gcc-4.4.6.orig/gcc/config.gcc 2011-02-18 22:39:51.000000000 +0100
- +++ gcc-4.4.6/gcc/config.gcc 2011-10-22 19:23:08.528581303 +0200
- @@ -810,6 +810,24 @@
- avr-*-*)
- tm_file="avr/avr.h dbxelf.h"
- ;;
- +avr32*-*-linux*)
- + tm_file="dbxelf.h elfos.h linux.h avr32/linux-elf.h avr32/avr32.h "
- + tmake_file="t-linux avr32/t-avr32-linux"
- + extra_parts="crtbegin.o crtbeginS.o crtend.o crtendS.o"
- + extra_modes=avr32/avr32-modes.def
- + gnu_ld=yes
- + ;;
- +avr32*-*-uclinux*)
- + tm_file="dbxelf.h elfos.h linux.h avr32/linux-elf.h avr32/uclinux-elf.h avr32/avr32.h"
- + tmake_file="t-linux avr32/t-avr32-linux"
- + extra_modes=avr32/avr32-modes.def
- + gnu_ld=yes
- + ;;
- +avr32-*-*)
- + tm_file="dbxelf.h elfos.h avr32/avr32.h avr32/avr32-elf.h"
- + tmake_file="avr32/t-avr32 avr32/t-elf"
- + extra_modes=avr32/avr32-modes.def
- + ;;
- bfin*-elf*)
- tm_file="${tm_file} dbxelf.h elfos.h bfin/elf.h"
- tmake_file=bfin/t-bfin-elf
- @@ -2736,6 +2754,32 @@
- fi
- ;;
-
- + avr32*-*-*)
- + supported_defaults="part arch"
- +
- + case "$with_part" in
- + "" \
- + | "ap7000" | "ap7010" | "ap7020" | "uc3a0256" | "uc3a0512" | "uc3a1128" | "uc3a1256" | "uc3a1512" )
- + # OK
- + ;;
- + *)
- + echo "Unknown part used in --with-part=$with_part" 1>&2
- + exit 1
- + ;;
- + esac
- +
- + case "$with_arch" in
- + "" \
- + | "ap" | "uc")
- + # OK
- + ;;
- + *)
- + echo "Unknown arch used in --with-arch=$with_arch" 1>&2
- + exit 1
- + ;;
- + esac
- + ;;
- +
- fr*-*-*linux*)
- supported_defaults=cpu
- case "$with_cpu" in
- diff -Nur gcc-4.4.6.orig/gcc/config.gcc.orig gcc-4.4.6/gcc/config.gcc.orig
- --- gcc-4.4.6.orig/gcc/config.gcc.orig 1970-01-01 01:00:00.000000000 +0100
- +++ gcc-4.4.6/gcc/config.gcc.orig 2011-10-22 19:23:08.528581303 +0200
- @@ -0,0 +1,3208 @@
- +# GCC target-specific configuration file.
- +# Copyright 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
- +# 2008, 2009, 2010 Free Software Foundation, Inc.
- +
- +#This file is part of GCC.
- +
- +#GCC is free software; you can redistribute it and/or modify it under
- +#the terms of the GNU General Public License as published by the Free
- +#Software Foundation; either version 3, or (at your option) any later
- +#version.
- +
- +#GCC is distributed in the hope that it will be useful, but WITHOUT
- +#ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- +#FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- +#for more details.
- +
- +#You should have received a copy of the GNU General Public License
- +#along with GCC; see the file COPYING3. If not see
- +#<http://www.gnu.org/licenses/>.
- +
- +# This is the GCC target-specific configuration file
- +# where a configuration type is mapped to different system-specific
- +# definitions and files. This is invoked by the autoconf-generated
- +# configure script. Putting it in a separate shell file lets us skip
- +# running autoconf when modifying target-specific information.
- +
- +# When you change the cases in the OS or target switches, consider
- +# updating ../libgcc/config.host also.
- +
- +# This file switches on the shell variable ${target}, and also uses the
- +# following shell variables:
- +#
- +# with_* Various variables as set by configure.
- +#
- +# enable_threads Either the name, yes or no depending on whether
- +# threads support was requested.
- +#
- +# default_use_cxa_atexit
- +# The default value for the $enable___cxa_atexit
- +# variable. enable___cxa_atexit needs to be set to
- +# "yes" for the correct operation of C++ destructors
- +# but it relies upon the presence of a non-standard C
- +# library function called __cxa_atexit.
- +# Since not all C libraries provide __cxa_atexit the
- +# default value of $default_use_cxa_atexit is set to
- +# "no" except for targets which are known to be OK.
- +#
- +# gas_flag Either yes or no depending on whether GNU as was
- +# requested.
- +#
- +# gnu_ld_flag Either yes or no depending on whether GNU ld was
- +# requested.
- +
- +# This file sets the following shell variables for use by the
- +# autoconf-generated configure script:
- +#
- +# cpu_type The name of the cpu, if different from the first
- +# chunk of the canonical target name.
- +#
- +# tm_defines List of target macros to define for all compilations.
- +#
- +# tm_file A list of target macro files, if different from
- +# "$cpu_type/$cpu_type.h". Usually it's constructed
- +# per target in a way like this:
- +# tm_file="${tm_file} dbxelf.h elfos.h svr4.h ${cpu_type.h}/elf.h"
- +# Note that the preferred order is:
- +# - specific target header "${cpu_type}/${cpu_type.h}"
- +# - generic headers like dbxelf.h elfos.h, etc.
- +# - specializing target headers like ${cpu_type.h}/elf.h
- +# This helps to keep OS specific stuff out of the CPU
- +# defining header ${cpu_type}/${cpu_type.h}.
- +#
- +# It is possible to include automatically-generated
- +# build-directory files by prefixing them with "./".
- +# All other files should relative to $srcdir/config.
- +#
- +# tm_p_file Location of file with declarations for functions
- +# in $out_file.
- +#
- +# out_file The name of the machine description C support
- +# file, if different from "$cpu_type/$cpu_type.c".
- +#
- +# md_file The name of the machine-description file, if
- +# different from "$cpu_type/$cpu_type.md".
- +#
- +# tmake_file A list of machine-description-specific
- +# makefile-fragments, if different from
- +# "$cpu_type/t-$cpu_type".
- +#
- +# extra_modes The name of the file containing a list of extra
- +# machine modes, if necessary and different from
- +# "$cpu_type/$cpu_type-modes.def".
- +#
- +# extra_objs List of extra objects that should be linked into
- +# the compiler proper (cc1, cc1obj, cc1plus)
- +# depending on target.
- +#
- +# extra_gcc_objs List of extra objects that should be linked into
- +# the compiler driver (gcc) depending on target.
- +#
- +# extra_headers List of used header files from the directory
- +# config/${cpu_type}.
- +#
- +# use_gcc_tgmath If set, add tgmath.h to the list of used header
- +# files.
- +#
- +# extra_passes List of extra executables compiled for this target
- +# machine, used for compiling from source to object.
- +#
- +# extra_parts List of extra object files that should be compiled
- +# for this target machine.
- +#
- +# extra_programs Like extra_passes, but these are used when linking.
- +#
- +# extra_options List of target-dependent .opt files.
- +#
- +# c_target_objs List of extra target-dependent objects that be
- +# linked into the C compiler only.
- +#
- +# cxx_target_objs List of extra target-dependent objects that be
- +# linked into the C++ compiler only.
- +#
- +# fortran_target_objs List of extra target-dependent objects that be
- +# linked into the fortran compiler only.
- +#
- +# target_gtfiles List of extra source files with type information.
- +#
- +# xm_defines List of macros to define when compiling for the
- +# target machine.
- +#
- +# xm_file List of files to include when compiling for the
- +# target machine.
- +#
- +# use_collect2 Set to yes or no, depending on whether collect2
- +# will be used.
- +#
- +# target_cpu_default Set to override the default target model.
- +#
- +# gdb_needs_out_file_path
- +# Set to yes if gdb needs a dir command with
- +# `dirname $out_file`.
- +#
- +# thread_file Set to control which thread package to use.
- +#
- +# gas Set to yes or no depending on whether the target
- +# system normally uses GNU as.
- +#
- +# need_64bit_hwint Set to yes if HOST_WIDE_INT must be 64 bits wide
- +# for this target. This is true if this target
- +# supports "long" or "wchar_t" wider than 32 bits,
- +# or BITS_PER_WORD is wider than 32 bits.
- +# The setting made here must match the one made in
- +# other locations such as libcpp/configure.ac
- +#
- +# configure_default_options
- +# Set to an initializer for configure_default_options
- +# in configargs.h, based on --with-cpu et cetera.
- +#
- +# use_fixproto Set to "yes" if fixproto should be run normally,
- +# "no" if fixproto should never be run.
- +
- +# The following variables are used in each case-construct to build up the
- +# outgoing variables:
- +#
- +# gnu_ld Set to yes or no depending on whether the target
- +# system normally uses GNU ld.
- +
- +out_file=
- +tmake_file=
- +extra_headers=
- +use_gcc_tgmath=yes
- +extra_passes=
- +extra_parts=
- +extra_programs=
- +extra_objs=
- +extra_gcc_objs=
- +extra_options=
- +c_target_objs=
- +cxx_target_objs=
- +fortran_target_objs=
- +tm_defines=
- +xm_defines=
- +# Set this to force installation and use of collect2.
- +use_collect2=
- +# Set this to override the default target model.
- +target_cpu_default=
- +# Set this if gdb needs a dir command with `dirname $out_file`
- +gdb_needs_out_file_path=
- +# Set this to control which thread package will be used.
- +thread_file=
- +# Reinitialize these from the flag values every loop pass, since some
- +# configure entries modify them.
- +gas="$gas_flag"
- +gnu_ld="$gnu_ld_flag"
- +default_use_cxa_atexit=no
- +target_gtfiles=
- +need_64bit_hwint=
- +
- +# Default to not using fixproto. Targets which need fixproto should
- +# specifically set this to 'yes'.
- +use_fixproto=no
- +
- +# Don't carry these over build->host->target. Please.
- +xm_file=
- +md_file=
- +
- +# Obsolete configurations.
- +case ${target} in
- +# Avoid generic cases below matching.
- + h8300-*-rtems* | h8300-*-elf* \
- + | sh-*-elf* | sh-*-symbianelf* | sh-*-linux* | sh-*-netbsdelf* \
- + | sh-*-rtems* | sh-wrs-vxworks) ;;
- + arm-*-coff* \
- + | armel-*-coff* \
- + | h8300-*-* \
- + | i[34567]86-*-aout* \
- + | i[34567]86-*-coff* \
- + | m68k-*-aout* \
- + | m68k-*-coff* \
- + | sh-*-* \
- + | pdp11-*-bsd \
- + | rs6000-ibm-aix4.[12]* \
- + | powerpc-ibm-aix4.[12]* \
- + )
- + if test "x$enable_obsolete" != xyes; then
- + echo "*** Configuration ${target} is obsolete." >&2
- + echo "*** Specify --enable-obsolete to build it anyway." >&2
- + echo "*** Support will be REMOVED in the next major release of GCC," >&2
- + echo "*** unless a maintainer comes forward." >&2
- + exit 1
- + fi;;
- +esac
- +
- +# Unsupported targets list. Do not put an entry in this list unless
- +# it would otherwise be caught by a more permissive pattern. The list
- +# should be in alphabetical order.
- +case ${target} in
- + i[34567]86-go32-* \
- + | i[34567]86-*-go32* \
- + | mips64orion*-*-rtems* \
- + | sparc-hal-solaris2* \
- + | thumb-*-* \
- + | *-*-linux*aout* \
- + | *-*-linux*coff* \
- + | *-*-linux*libc1* \
- + | *-*-linux*oldld* \
- + | *-*-rtemsaout* \
- + | *-*-rtemscoff* \
- + | *-*-solaris2.[0-6] \
- + | *-*-solaris2.[0-6].* \
- + | *-*-sysv* \
- + | vax-*-vms* \
- + )
- + echo "*** Configuration ${target} not supported" 1>&2
- + exit 1
- + ;;
- +esac
- +
- +# Set default cpu_type, tm_file, tm_p_file and xm_file so it can be
- +# updated in each machine entry. Also set default extra_headers for some
- +# machines.
- +tm_p_file=
- +cpu_type=`echo ${target} | sed 's/-.*$//'`
- +cpu_is_64bit=
- +case ${target} in
- +m32c*-*-*)
- + cpu_type=m32c
- + tmake_file=m32c/t-m32c
- + ;;
- +alpha*-*-*)
- + cpu_type=alpha
- + need_64bit_hwint=yes
- + ;;
- +am33_2.0-*-linux*)
- + cpu_type=mn10300
- + ;;
- +arm*-*-*)
- + cpu_type=arm
- + extra_headers="mmintrin.h arm_neon.h"
- + c_target_objs="arm-c.o"
- + cxx_target_objs="arm-c.o"
- + ;;
- +bfin*-*)
- + cpu_type=bfin
- + ;;
- +crisv32-*)
- + cpu_type=cris
- + ;;
- +frv*) cpu_type=frv
- + ;;
- +fido-*-*)
- + cpu_type=m68k
- + extra_headers=math-68881.h
- + ;;
- +i[34567]86-*-*)
- + cpu_type=i386
- + c_target_objs="i386-c.o"
- + cxx_target_objs="i386-c.o"
- + extra_headers="cpuid.h mmintrin.h mm3dnow.h xmmintrin.h emmintrin.h
- + pmmintrin.h tmmintrin.h ammintrin.h smmintrin.h
- + nmmintrin.h bmmintrin.h mmintrin-common.h
- + wmmintrin.h immintrin.h x86intrin.h avxintrin.h
- + cross-stdarg.h"
- + ;;
- +x86_64-*-*)
- + cpu_type=i386
- + c_target_objs="i386-c.o"
- + cxx_target_objs="i386-c.o"
- + extra_headers="cpuid.h mmintrin.h mm3dnow.h xmmintrin.h emmintrin.h
- + pmmintrin.h tmmintrin.h ammintrin.h smmintrin.h
- + nmmintrin.h bmmintrin.h mmintrin-common.h
- + wmmintrin.h immintrin.h x86intrin.h avxintrin.h
- + cross-stdarg.h"
- + need_64bit_hwint=yes
- + ;;
- +ia64-*-*)
- + extra_headers=ia64intrin.h
- + need_64bit_hwint=yes
- + ;;
- +hppa*-*-*)
- + cpu_type=pa
- + ;;
- +m32r*-*-*)
- + cpu_type=m32r
- + ;;
- +m68k-*-*)
- + extra_headers=math-68881.h
- + ;;
- +mips*-*-*)
- + cpu_type=mips
- + need_64bit_hwint=yes
- + extra_headers="loongson.h"
- + ;;
- +picochip-*-*)
- + cpu_type=picochip
- + ;;
- +powerpc*-*-*)
- + cpu_type=rs6000
- + extra_headers="ppc-asm.h altivec.h spe.h ppu_intrinsics.h paired.h spu2vmx.h vec_types.h si2vmx.h"
- + need_64bit_hwint=yes
- + case x$with_cpu in
- + xpowerpc64|xdefault64|x6[23]0|x970|xG5|xpower[34567]|xpower6x|xrs64a|xcell)
- + cpu_is_64bit=yes
- + ;;
- + esac
- + ;;
- +rs6000*-*-*)
- + need_64bit_hwint=yes
- + ;;
- +score*-*-*)
- + cpu_type=score
- + ;;
- +sparc*-*-*)
- + cpu_type=sparc
- + need_64bit_hwint=yes
- + ;;
- +spu*-*-*)
- + cpu_type=spu
- + need_64bit_hwint=yes
- + ;;
- +s390*-*-*)
- + cpu_type=s390
- + need_64bit_hwint=yes
- + ;;
- +# Note the 'l'; we need to be able to match e.g. "shle" or "shl".
- +sh[123456789lbe]*-*-* | sh-*-*)
- + cpu_type=sh
- + need_64bit_hwint=yes
- + ;;
- +esac
- +
- +tm_file=${cpu_type}/${cpu_type}.h
- +if test -f ${srcdir}/config/${cpu_type}/${cpu_type}-protos.h
- +then
- + tm_p_file=${cpu_type}/${cpu_type}-protos.h
- +fi
- +extra_modes=
- +if test -f ${srcdir}/config/${cpu_type}/${cpu_type}-modes.def
- +then
- + extra_modes=${cpu_type}/${cpu_type}-modes.def
- +fi
- +if test -f ${srcdir}/config/${cpu_type}/${cpu_type}.opt
- +then
- + extra_options="${extra_options} ${cpu_type}/${cpu_type}.opt"
- +fi
- +
- +case ${target} in
- +i[34567]86-*-*)
- + if test "x$enable_cld" = xyes; then
- + tm_defines="${tm_defines} USE_IX86_CLD=1"
- + fi
- + ;;
- +x86_64-*-*)
- + tm_file="i386/biarch64.h ${tm_file}"
- + if test "x$enable_cld" = xyes; then
- + tm_defines="${tm_defines} USE_IX86_CLD=1"
- + fi
- + ;;
- +esac
- +
- +# On a.out targets, we need to use collect2.
- +case ${target} in
- +*-*-*aout*)
- + use_collect2=yes
- + ;;
- +esac
- +
- +# Common parts for widely ported systems.
- +case ${target} in
- +*-*-darwin*)
- + tm_file="${tm_file} darwin.h"
- + case ${target} in
- + *-*-darwin[912]*)
- + tm_file="${tm_file} darwin9.h"
- + ;;
- + esac
- + tm_file="${tm_file} ${cpu_type}/darwin.h"
- + tm_p_file="${tm_p_file} darwin-protos.h"
- + tmake_file="t-darwin ${cpu_type}/t-darwin t-slibgcc-darwin"
- + target_gtfiles="\$(srcdir)/config/darwin.c"
- + extra_options="${extra_options} darwin.opt"
- + c_target_objs="${c_target_objs} darwin-c.o"
- + cxx_target_objs="${cxx_target_objs} darwin-c.o"
- + fortran_target_objs="darwin-f.o"
- + extra_objs="darwin.o"
- + extra_gcc_objs="darwin-driver.o"
- + default_use_cxa_atexit=yes
- + case ${enable_threads} in
- + "" | yes | posix) thread_file='posix' ;;
- + esac
- + ;;
- +*-*-freebsd[12] | *-*-freebsd[12].* | *-*-freebsd*aout*)
- + # This is the place-holder for the generic a.out configuration
- + # of FreeBSD. No actual configuration resides here since
- + # there was only ever a bare-bones ix86 configuration for
- + # a.out and it exists solely in the machine-specific section.
- + # This place-holder must exist to avoid dropping into
- + # the generic ELF configuration of FreeBSD (i.e. it must be
- + # ordered before that section).
- + ;;
- +*-*-freebsd*)
- + # This is the generic ELF configuration of FreeBSD. Later
- + # machine-specific sections may refine and add to this
- + # configuration.
- + #
- + # Due to tm_file entry ordering issues that vary between cpu
- + # architectures, we only define fbsd_tm_file to allow the
- + # machine-specific section to dictate the final order of all
- + # entries of tm_file with the minor exception that components
- + # of the tm_file set here will always be of the form:
- + #
- + # freebsd<version_number>.h [freebsd-<conf_option>.h ...] freebsd-spec.h freebsd.h
- + #
- + # The machine-specific section should not tamper with this
- + # ordering but may order all other entries of tm_file as it
- + # pleases around the provided core setting.
- + gas=yes
- + gnu_ld=yes
- + extra_parts="crtbegin.o crtend.o crtbeginS.o crtendS.o"
- + fbsd_major=`echo ${target} | sed -e 's/.*freebsd//g' | sed -e 's/\..*//g'`
- + tm_defines="${tm_defines} FBSD_MAJOR=${fbsd_major}"
- + tmake_file="t-slibgcc-elf-ver t-freebsd"
- + case ${enable_threads} in
- + no)
- + fbsd_tm_file="${fbsd_tm_file} freebsd-nthr.h"
- + ;;
- + "" | yes | posix)
- + thread_file='posix'
- + tmake_file="${tmake_file} t-freebsd-thread"
- + # Before 5.0, FreeBSD can't bind shared libraries to -lc
- + # when "optionally" threaded via weak pthread_* checks.
- + case ${target} in
- + *-*-freebsd[34] | *-*-freebsd[34].*)
- + tmake_file="${tmake_file} t-slibgcc-nolc-override"
- + ;;
- + esac
- + ;;
- + *)
- + echo 'Unknown thread configuration for FreeBSD'
- + exit 1
- + ;;
- + esac
- + fbsd_tm_file="${fbsd_tm_file} freebsd-spec.h freebsd.h"
- + case ${target} in
- + *-*-freebsd[345].*)
- + :;;
- + *)
- + default_use_cxa_atexit=yes;;
- + esac
- + ;;
- +*-*-linux* | frv-*-*linux* | *-*-kfreebsd*-gnu | *-*-knetbsd*-gnu | *-*-gnu* | *-*-kopensolaris*-gnu)
- + extra_parts="crtbegin.o crtbeginS.o crtbeginT.o crtend.o crtendS.o"
- + gas=yes
- + gnu_ld=yes
- + case ${enable_threads} in
- + "" | yes | posix) thread_file='posix' ;;
- + esac
- + tmake_file="t-slibgcc-elf-ver t-linux"
- + case $target in
- + *-*-linux* | frv-*-*linux* | *-*-kfreebsd*-gnu | *-*-knetbsd*-gnu | *-*-kopensolaris*-gnu)
- + :;;
- + *-*-gnu*)
- + tmake_file="$tmake_file t-gnu";;
- + esac
- + # glibc / uclibc switch. uclibc isn't usable for GNU/Hurd and neither for
- + # GNU/k*BSD.
- + case $target in
- + *linux*)
- + extra_options="$extra_options linux.opt";;
- + *)
- + tm_defines="$tm_defines OPTION_GLIBC=1";;
- + esac
- + case ${target} in
- + *-*-*uclibc*)
- + tm_defines="${tm_defines} UCLIBC_DEFAULT=1"
- + ;;
- + *)
- + tm_defines="${tm_defines} UCLIBC_DEFAULT=0"
- + ;;
- + esac
- + # Assume that glibc or uClibc are being used and so __cxa_atexit is provided.
- + default_use_cxa_atexit=yes
- + use_gcc_tgmath=no
- + ;;
- +*-*-netbsd*)
- + tmake_file="t-slibgcc-elf-ver t-libc-ok t-netbsd t-libgcc-pic"
- + gas=yes
- + gnu_ld=yes
- +
- + # NetBSD 2.0 and later get POSIX threads enabled by default.
- + # Allow them to be explicitly enabled on any other version.
- + case ${enable_threads} in
- + "")
- + case ${target} in
- + *-*-netbsd[2-9]* | *-*-netbsdelf[2-9]*)
- + thread_file='posix'
- + tm_defines="${tm_defines} NETBSD_ENABLE_PTHREADS"
- + ;;
- + esac
- + ;;
- + yes | posix)
- + thread_file='posix'
- + tm_defines="${tm_defines} NETBSD_ENABLE_PTHREADS"
- + ;;
- + esac
- +
- + # NetBSD 1.7 and later are set up to use GCC's crtstuff for
- + # ELF configurations. We will clear extra_parts in the
- + # a.out configurations.
- + case ${target} in
- + *-*-netbsd*1.[7-9]* | *-*-netbsd[2-9]* | *-*-netbsdelf[2-9]*)
- + extra_parts="crtbegin.o crtend.o crtbeginS.o crtendS.o crtbeginT.o"
- + ;;
- + esac
- +
- + # NetBSD 2.0 and later provide __cxa_atexit(), which we use by
- + # default (unless overridden by --disable-__cxa_atexit).
- + case ${target} in
- + *-*-netbsd[2-9]* | *-*-netbsdelf[2-9]*)
- + default_use_cxa_atexit=yes
- + ;;
- + esac
- + ;;
- +*-*-openbsd*)
- + tmake_file="t-libc-ok t-openbsd t-libgcc-pic"
- + case ${enable_threads} in
- + yes)
- + thread_file='posix'
- + tmake_file="${tmake_file} t-openbsd-thread"
- + ;;
- + esac
- + case ${target} in
- + *-*-openbsd2.*|*-*-openbsd3.[012])
- + tm_defines="${tm_defines} HAS_LIBC_R=1" ;;
- + esac
- + ;;
- +*-*-rtems*)
- + case ${enable_threads} in
- + yes) thread_file='rtems' ;;
- + esac
- + ;;
- +*-*-vxworks*)
- + tmake_file=t-vxworks
- + xm_defines=POSIX
- + extra_options="${extra_options} vxworks.opt"
- + extra_objs=vxworks.o
- + case ${enable_threads} in
- + no) ;;
- + "" | yes | vxworks) thread_file='vxworks' ;;
- + *) echo 'Unknown thread configuration for VxWorks'; exit 1 ;;
- + esac
- + ;;
- +*-*-elf)
- + # Assume that newlib is being used and so __cxa_atexit is provided.
- + default_use_cxa_atexit=yes
- + ;;
- +esac
- +
- +case ${target} in
- +# Support site-specific machine types.
- +*local*)
- + rest=`echo ${target} | sed -e "s/$cpu_type-//"`
- + tm_file=${cpu_type}/$rest.h
- + if test -f $srcdir/config/${cpu_type}/xm-$rest.h
- + then xm_file=${cpu_type}/xm-$rest.h
- + fi
- + if test -f $srcdir/config/${cpu_type}/t-$rest
- + then tmake_file=${cpu_type}/t-$rest
- + fi
- + ;;
- +alpha*-*-linux*)
- + tm_file="${tm_file} alpha/elf.h alpha/linux.h alpha/linux-elf.h"
- + target_cpu_default="MASK_GAS"
- + tmake_file="${tmake_file} alpha/t-crtfm alpha/t-alpha alpha/t-ieee alpha/t-linux"
- + ;;
- +alpha*-*-gnu*)
- + tm_file="$tm_file alpha/elf.h alpha/linux.h alpha/linux-elf.h gnu.h alpha/gnu.h"
- + target_cpu_default="MASK_GAS"
- + tmake_file="${tmake_file} alpha/t-crtfm alpha/t-alpha alpha/t-ieee"
- + ;;
- +alpha*-*-freebsd*)
- + tm_file="${tm_file} ${fbsd_tm_file} alpha/elf.h alpha/freebsd.h"
- + target_cpu_default="MASK_GAS"
- + tmake_file="${tmake_file} alpha/t-crtfm alpha/t-alpha alpha/t-ieee"
- + extra_parts="crtbegin.o crtend.o crtbeginS.o crtendS.o crtbeginT.o"
- + ;;
- +alpha*-*-netbsd*)
- + tm_file="${tm_file} netbsd.h alpha/elf.h netbsd-elf.h alpha/netbsd.h"
- + target_cpu_default="MASK_GAS"
- + tmake_file="${tmake_file} alpha/t-alpha alpha/t-ieee"
- + ;;
- +alpha*-*-openbsd*)
- + tm_defines="${tm_defines} OBSD_NO_DYNAMIC_LIBRARIES OBSD_HAS_DECLARE_FUNCTION_NAME OBSD_HAS_DECLARE_FUNCTION_SIZE OBSD_HAS_DECLARE_OBJECT"
- + tm_file="alpha/alpha.h openbsd.h alpha/openbsd.h"
- + # default x-alpha is only appropriate for dec-osf.
- + target_cpu_default="MASK_GAS"
- + tmake_file="alpha/t-alpha alpha/t-ieee"
- + ;;
- +alpha*-dec-osf[45]*)
- + if test x$stabs = xyes
- + then
- + tm_file="${tm_file} dbx.h"
- + fi
- + if test x$gas != xyes
- + then
- + extra_passes="mips-tfile mips-tdump"
- + fi
- + use_collect2=yes
- + tmake_file="alpha/t-alpha alpha/t-ieee alpha/t-crtfm alpha/t-osf4"
- + tm_file="${tm_file} alpha/osf.h"
- + extra_headers=va_list.h
- + case ${target} in
- + *-*-osf4*)
- + # Define TARGET_SUPPORT_ARCH except on 4.0a.
- + case ${target} in
- + *-*-osf4.0a) ;;
- + *) tm_defines="${tm_defines} TARGET_SUPPORT_ARCH=1"
- + esac
- + ;;
- + *-*-osf5*)
- + tm_file="${tm_file} alpha/osf5.h"
- + tm_defines="${tm_defines} TARGET_SUPPORT_ARCH=1"
- + ;;
- + esac
- + case ${enable_threads} in
- + "" | yes | posix)
- + thread_file='posix'
- + tmake_file="${tmake_file} alpha/t-osf-pthread"
- + ;;
- + esac
- + ;;
- +alpha64-dec-*vms*)
- + tm_file="${tm_file} alpha/vms.h alpha/vms64.h"
- + xm_file="alpha/xm-vms.h"
- + tmake_file="alpha/t-alpha alpha/t-vms alpha/t-vms64 alpha/t-ieee"
- + prefix=/gnu
- + local_prefix=/gnu
- + ;;
- +alpha*-dec-*vms*)
- + tm_file="${tm_file} alpha/vms.h"
- + xm_file=alpha/xm-vms.h
- + tmake_file="alpha/t-alpha alpha/t-vms alpha/t-ieee"
- + prefix=/gnu
- + local_prefix=/gnu
- + ;;
- +arc-*-elf*)
- + tm_file="dbxelf.h elfos.h svr4.h ${tm_file}"
- + extra_parts="crtinit.o crtfini.o"
- + ;;
- +arm-*-coff* | armel-*-coff*)
- + tm_file="arm/semi.h arm/aout.h arm/arm.h arm/coff.h dbxcoff.h"
- + tmake_file="arm/t-arm arm/t-arm-coff"
- + ;;
- +arm-wrs-vxworks)
- + tm_file="elfos.h arm/elf.h arm/aout.h ${tm_file} vx-common.h vxworks.h arm/vxworks.h"
- + tmake_file="${tmake_file} arm/t-arm arm/t-vxworks"
- + ;;
- +arm*-*-freebsd*)
- + tm_file="dbxelf.h elfos.h ${fbsd_tm_file} arm/elf.h arm/aout.h arm/freebsd.h arm/arm.h"
- + tmake_file="${tmake_file} arm/t-arm arm/t-strongarm-elf"
- + ;;
- +arm*-*-netbsdelf*)
- + tm_file="dbxelf.h elfos.h netbsd.h netbsd-elf.h arm/elf.h arm/aout.h arm/arm.h arm/netbsd-elf.h"
- + tmake_file="${tmake_file} arm/t-arm arm/t-netbsd"
- + ;;
- +arm*-*-netbsd*)
- + tm_file="arm/aout.h arm/arm.h netbsd.h netbsd-aout.h arm/netbsd.h"
- + tmake_file="t-netbsd arm/t-arm arm/t-netbsd"
- + extra_parts=""
- + use_collect2=yes
- + ;;
- +arm*-*-linux*) # ARM GNU/Linux with ELF
- + tm_file="dbxelf.h elfos.h linux.h arm/elf.h arm/linux-gas.h arm/linux-elf.h"
- + case $target in
- + arm*b-*)
- + tm_defines="${tm_defines} TARGET_BIG_ENDIAN_DEFAULT=1"
- + ;;
- + esac
- + tmake_file="${tmake_file} t-linux arm/t-arm"
- + case ${target} in
- + arm*-*-linux-*eabi)
- + tm_file="$tm_file arm/bpabi.h arm/linux-eabi.h"
- + tmake_file="$tmake_file arm/t-arm-elf arm/t-bpabi arm/t-linux-eabi t-slibgcc-libgcc"
- + # The BPABI long long divmod functions return a 128-bit value in
- + # registers r0-r3. Correctly modeling that requires the use of
- + # TImode.
- + need_64bit_hwint=yes
- + # The EABI requires the use of __cxa_atexit.
- + default_use_cxa_atexit=yes
- + ;;
- + *)
- + tmake_file="$tmake_file arm/t-linux"
- + ;;
- + esac
- + tm_file="$tm_file arm/aout.h arm/arm.h"
- + tmake_file="${tmake_file} arm/t-arm-softfp soft-fp/t-softfp"
- + ;;
- +arm*-*-uclinux*) # ARM ucLinux
- + tm_file="dbxelf.h elfos.h arm/unknown-elf.h arm/elf.h arm/linux-gas.h arm/uclinux-elf.h"
- + tmake_file="arm/t-arm arm/t-arm-elf"
- + case ${target} in
- + arm*-*-uclinux*eabi)
- + tm_file="$tm_file arm/bpabi.h arm/uclinux-eabi.h"
- + tmake_file="$tmake_file arm/t-bpabi"
- + # The BPABI long long divmod functions return a 128-bit value in
- + # registers r0-r3. Correctly modeling that requires the use of
- + # TImode.
- + need_64bit_hwint=yes
- + # The EABI requires the use of __cxa_atexit.
- + default_use_cxa_atexit=yes
- + esac
- + tm_file="$tm_file arm/aout.h arm/arm.h"
- + tmake_file="${tmake_file} arm/t-arm-softfp soft-fp/t-softfp"
- + ;;
- +arm*-*-ecos-elf)
- + tm_file="dbxelf.h elfos.h arm/unknown-elf.h arm/elf.h arm/aout.h arm/arm.h arm/ecos-elf.h"
- + tmake_file="arm/t-arm arm/t-arm-elf"
- + tmake_file="${tmake_file} arm/t-arm-softfp soft-fp/t-softfp"
- + ;;
- +arm*-*-eabi* | arm*-*-symbianelf* )
- + # The BPABI long long divmod functions return a 128-bit value in
- + # registers r0-r3. Correctly modeling that requires the use of
- + # TImode.
- + need_64bit_hwint=yes
- + default_use_cxa_atexit=yes
- + tm_file="dbxelf.h elfos.h arm/unknown-elf.h arm/elf.h arm/bpabi.h"
- + tmake_file="arm/t-arm arm/t-arm-elf"
- + case ${target} in
- + arm*-*-eabi*)
- + tm_file="$tm_file arm/eabi.h"
- + tmake_file="${tmake_file} arm/t-bpabi"
- + extra_options="${extra_options} arm/eabi.opt"
- + ;;
- + arm*-*-symbianelf*)
- + tm_file="${tm_file} arm/symbian.h"
- + # We do not include t-bpabi for Symbian OS because the system
- + # provides its own implementation of the BPABI functions.
- + tmake_file="${tmake_file} arm/t-symbian"
- + ;;
- + esac
- + tm_file="${tm_file} arm/aout.h arm/arm.h"
- + tmake_file="${tmake_file} arm/t-arm-softfp soft-fp/t-softfp"
- + ;;
- +arm*-*-rtems*)
- + tm_file="dbxelf.h elfos.h arm/unknown-elf.h arm/elf.h arm/aout.h arm/arm.h arm/rtems-elf.h rtems.h"
- + tmake_file="arm/t-arm arm/t-arm-elf t-rtems arm/t-rtems"
- + tmake_file="${tmake_file} arm/t-arm-softfp soft-fp/t-softfp"
- + ;;
- +arm*-*-elf)
- + tm_file="dbxelf.h elfos.h arm/unknown-elf.h arm/elf.h arm/aout.h arm/arm.h"
- + tmake_file="arm/t-arm arm/t-arm-elf"
- + tmake_file="${tmake_file} arm/t-arm-softfp soft-fp/t-softfp"
- + ;;
- +arm*-wince-pe*)
- + tm_file="arm/semi.h arm/aout.h arm/arm.h arm/coff.h dbxcoff.h arm/pe.h arm/wince-pe.h"
- + tmake_file="arm/t-arm arm/t-wince-pe"
- + extra_options="${extra_options} arm/pe.opt"
- + extra_objs="pe.o"
- + ;;
- +arm-*-pe*)
- + tm_file="arm/semi.h arm/aout.h arm/arm.h arm/coff.h dbxcoff.h arm/pe.h"
- + tmake_file="arm/t-arm arm/t-pe"
- + extra_options="${extra_options} arm/pe.opt"
- + extra_objs="pe.o"
- + ;;
- +avr-*-rtems*)
- + tm_file="avr/avr.h dbxelf.h avr/rtems.h rtems.h"
- + tmake_file="avr/t-avr t-rtems avr/t-rtems"
- + ;;
- +avr-*-*)
- + tm_file="avr/avr.h dbxelf.h"
- + ;;
- +bfin*-elf*)
- + tm_file="${tm_file} dbxelf.h elfos.h bfin/elf.h"
- + tmake_file=bfin/t-bfin-elf
- + use_collect2=no
- + ;;
- +bfin*-uclinux*)
- + tm_file="${tm_file} dbxelf.h elfos.h bfin/elf.h linux.h bfin/uclinux.h"
- + tmake_file=bfin/t-bfin-uclinux
- + tm_defines="${tm_defines} UCLIBC_DEFAULT=1"
- + extra_options="${extra_options} linux.opt"
- + use_collect2=no
- + ;;
- +bfin*-linux-uclibc*)
- + tm_file="${tm_file} dbxelf.h elfos.h bfin/elf.h linux.h bfin/linux.h ./linux-sysroot-suffix.h"
- + tmake_file="t-slibgcc-elf-ver bfin/t-bfin-linux"
- + extra_parts="crtbegin.o crtbeginS.o crtend.o crtendS.o"
- + use_collect2=no
- + ;;
- +bfin*-rtems*)
- + tm_file="${tm_file} dbxelf.h elfos.h bfin/elf.h bfin/rtems.h rtems.h"
- + tmake_file="bfin/t-bfin t-rtems bfin/t-rtems"
- + ;;
- +bfin*-*)
- + tm_file="${tm_file} dbxelf.h elfos.h bfin/elf.h"
- + tmake_file=bfin/t-bfin
- + use_collect2=no
- + ;;
- +crisv32-*-elf | crisv32-*-none)
- + tm_file="dbxelf.h elfos.h ${tm_file}"
- + tmake_file="cris/t-cris"
- + target_cpu_default=32
- + gas=yes
- + extra_options="${extra_options} cris/elf.opt"
- + ;;
- +cris-*-elf | cris-*-none)
- + tm_file="dbxelf.h elfos.h ${tm_file}"
- + tmake_file="cris/t-cris cris/t-elfmulti"
- + gas=yes
- + extra_options="${extra_options} cris/elf.opt"
- + ;;
- +crisv32-*-linux* | cris-*-linux*)
- + tm_file="dbxelf.h elfos.h svr4.h ${tm_file} linux.h cris/linux.h"
- + # We need to avoid using t-linux, so override default tmake_file
- + tmake_file="cris/t-cris t-slibgcc-elf-ver cris/t-linux"
- + extra_options="${extra_options} cris/linux.opt"
- + case $target in
- + cris-*-*)
- + target_cpu_default=10
- + ;;
- + crisv32-*-*)
- + target_cpu_default=32
- + ;;
- + esac
- + ;;
- +crx-*-elf)
- + tm_file="elfos.h ${tm_file}"
- + extra_parts="crtbegin.o crtend.o"
- + use_collect2=no
- + ;;
- +fr30-*-elf)
- + tm_file="dbxelf.h elfos.h svr4.h ${tm_file}"
- + tmake_file=fr30/t-fr30
- + extra_parts="crti.o crtn.o crtbegin.o crtend.o"
- + ;;
- +frv-*-elf)
- + tm_file="dbxelf.h elfos.h svr4.h ${tm_file} frv/frv-abi.h"
- + tmake_file=frv/t-frv
- + ;;
- +frv-*-*linux*)
- + tm_file="dbxelf.h elfos.h svr4.h ${tm_file} \
- + linux.h frv/linux.h frv/frv-abi.h"
- + tmake_file="${tmake_file} frv/t-frv frv/t-linux"
- + ;;
- +h8300-*-rtems*)
- + tmake_file="h8300/t-h8300 h8300/t-elf t-rtems h8300/t-rtems"
- + tm_file="h8300/h8300.h dbxelf.h elfos.h h8300/elf.h h8300/rtems.h rtems.h"
- + ;;
- +h8300-*-elf*)
- + tmake_file="h8300/t-h8300 h8300/t-elf"
- + tm_file="h8300/h8300.h dbxelf.h elfos.h h8300/elf.h"
- + ;;
- +h8300-*-*)
- + tm_file="h8300/h8300.h dbxcoff.h h8300/coff.h"
- + ;;
- +hppa*64*-*-linux*)
- + target_cpu_default="MASK_PA_11|MASK_PA_20"
- + tm_file="pa/pa64-start.h ${tm_file} dbxelf.h elfos.h svr4.h linux.h \
- + pa/pa-linux.h pa/pa64-regs.h pa/pa-64.h pa/pa64-linux.h"
- + tmake_file="${tmake_file} pa/t-linux64"
- + gas=yes gnu_ld=yes
- + need_64bit_hwint=yes
- + ;;
- +hppa*-*-linux*)
- + target_cpu_default="MASK_PA_11|MASK_NO_SPACE_REGS"
- + tm_file="${tm_file} dbxelf.h elfos.h svr4.h linux.h pa/pa-linux.h \
- + pa/pa32-regs.h pa/pa32-linux.h"
- + tmake_file="${tmake_file} pa/t-linux"
- + # Set the libgcc version number
- + if test x$sjlj = x1; then
- + tmake_file="$tmake_file pa/t-slibgcc-sjlj-ver"
- + else
- + tmake_file="$tmake_file pa/t-slibgcc-dwarf-ver"
- + fi
- + ;;
- +# port not yet contributed.
- +#hppa*-*-openbsd*)
- +# target_cpu_default="MASK_PA_11"
- +# ;;
- +hppa[12]*-*-hpux10*)
- + case ${target} in
- + hppa1.1-*-* | hppa2*-*-*)
- + target_cpu_default="MASK_PA_11"
- + ;;
- + esac
- + tm_file="${tm_file} pa/pa32-regs.h dbxelf.h pa/som.h \
- + pa/pa-hpux.h pa/pa-hpux10.h"
- + extra_options="${extra_options} pa/pa-hpux.opt"
- + case ${target} in
- + *-*-hpux10.[1-9]*)
- + tm_file="${tm_file} pa/pa-hpux1010.h"
- + extra_options="${extra_options} pa/pa-hpux1010.opt"
- + ;;
- + esac
- + tmake_file="pa/t-pa-hpux10 pa/t-pa-hpux pa/t-hpux-shlib"
- + case ${enable_threads} in
- + "")
- + if test x$have_pthread_h = xyes ; then
- + tmake_file="${tmake_file} pa/t-dce-thr"
- + fi
- + ;;
- + yes | dce)
- + tmake_file="${tmake_file} pa/t-dce-thr"
- + ;;
- + esac
- + # Set the libgcc version number
- + if test x$sjlj = x1; then
- + tmake_file="$tmake_file pa/t-slibgcc-sjlj-ver"
- + else
- + tmake_file="$tmake_file pa/t-slibgcc-dwarf-ver"
- + fi
- + use_collect2=yes
- + gas=yes
- + ;;
- +hppa*64*-*-hpux11*)
- + target_cpu_default="MASK_PA_11|MASK_PA_20"
- + if test x$gnu_ld = xyes
- + then
- + target_cpu_default="${target_cpu_default}|MASK_GNU_LD"
- + fi
- + tm_file="pa/pa64-start.h ${tm_file} dbxelf.h elfos.h \
- + pa/pa64-regs.h pa/pa-hpux.h pa/pa-hpux1010.h \
- + pa/pa-hpux11.h"
- + case ${target} in
- + *-*-hpux11.[1-9]*)
- + tm_file="${tm_file} pa/pa-hpux1111.h pa/pa-64.h pa/pa64-hpux.h"
- + extra_options="${extra_options} pa/pa-hpux1111.opt"
- + ;;
- + *)
- + tm_file="${tm_file} pa/pa-64.h pa/pa64-hpux.h"
- + ;;
- + esac
- + extra_options="${extra_options} pa/pa-hpux.opt \
- + pa/pa-hpux1010.opt pa/pa64-hpux.opt"
- + need_64bit_hwint=yes
- + tmake_file="pa/t-pa64 pa/t-pa-hpux pa/t-hpux-shlib"
- + # Set the libgcc version number
- + if test x$sjlj = x1; then
- + tmake_file="$tmake_file pa/t-slibgcc-sjlj-ver"
- + else
- + tmake_file="$tmake_file pa/t-slibgcc-dwarf-ver"
- + fi
- + extra_parts="crtbegin.o crtend.o crtbeginS.o crtendS.o crtbeginT.o \
- + libgcc_stub.a"
- + case x${enable_threads} in
- + x | xyes | xposix )
- + thread_file=posix
- + ;;
- + esac
- + gas=yes
- + ;;
- +hppa[12]*-*-hpux11*)
- + case ${target} in
- + hppa1.1-*-* | hppa2*-*-*)
- + target_cpu_default="MASK_PA_11"
- + ;;
- + esac
- + tm_file="${tm_file} pa/pa32-regs.h dbxelf.h pa/som.h \
- + pa/pa-hpux.h pa/pa-hpux1010.h pa/pa-hpux11.h"
- + extra_options="${extra_options} pa/pa-hpux.opt pa/pa-hpux1010.opt"
- + case ${target} in
- + *-*-hpux11.[1-9]*)
- + tm_file="${tm_file} pa/pa-hpux1111.h"
- + extra_options="${extra_options} pa/pa-hpux1111.opt"
- + ;;
- + esac
- + tmake_file="pa/t-pa-hpux11 pa/t-pa-hpux pa/t-hpux-shlib"
- + # Set the libgcc version number
- + if test x$sjlj = x1; then
- + tmake_file="$tmake_file pa/t-slibgcc-sjlj-ver"
- + else
- + tmake_file="$tmake_file pa/t-slibgcc-dwarf-ver"
- + fi
- + extra_parts="libgcc_stub.a"
- + case x${enable_threads} in
- + x | xyes | xposix )
- + thread_file=posix
- + ;;
- + esac
- + use_collect2=yes
- + gas=yes
- + ;;
- +i[34567]86-*-darwin*)
- + need_64bit_hwint=yes
- +
- + # This is so that '.../configure && make' doesn't fail due to
- + # config.guess deciding that the configuration is i386-*-darwin* and
- + # then this file using that to set --with-cpu=i386 which has no -m64
- + # support.
- + with_cpu=${with_cpu:-generic}
- + tmake_file="${tmake_file} i386/t-crtpc i386/t-crtfm"
- + ;;
- +x86_64-*-darwin*)
- + with_cpu=${with_cpu:-generic}
- + tmake_file="${tmake_file} t-darwin ${cpu_type}/t-darwin64 t-slibgcc-darwin i386/t-crtpc i386/t-crtfm"
- + tm_file="${tm_file} ${cpu_type}/darwin64.h"
- + ;;
- +i[34567]86-*-elf*)
- + tm_file="${tm_file} i386/unix.h i386/att.h dbxelf.h elfos.h i386/i386elf.h"
- + tmake_file="${tmake_file} i386/t-i386elf t-svr4"
- + ;;
- +x86_64-*-elf*)
- + tm_file="${tm_file} i386/unix.h i386/att.h dbxelf.h elfos.h i386/i386elf.h i386/x86-64.h"
- + tmake_file="${tmake_file} i386/t-i386elf t-svr4"
- + ;;
- +i[34567]86-*-aout*)
- + tm_file="${tm_file} i386/unix.h i386/bsd.h i386/gas.h i386/gstabs.h i386/i386-aout.h"
- + ;;
- +i[34567]86-*-freebsd*)
- + tm_file="${tm_file} i386/unix.h i386/att.h dbxelf.h elfos.h ${fbsd_tm_file} i386/freebsd.h"
- + ;;
- +x86_64-*-freebsd*)
- + tm_file="${tm_file} i386/unix.h i386/att.h dbxelf.h elfos.h ${fbsd_tm_file} i386/x86-64.h i386/freebsd.h i386/freebsd64.h"
- + tmake_file="${tmake_file} i386/t-crtstuff"
- + ;;
- +i[34567]86-*-netbsdelf*)
- + tm_file="${tm_file} i386/unix.h i386/att.h dbxelf.h elfos.h netbsd.h netbsd-elf.h i386/netbsd-elf.h"
- + ;;
- +i[34567]86-*-netbsd*)
- + tm_file="${tm_file} i386/unix.h i386/bsd.h i386/gas.h i386/gstabs.h netbsd.h netbsd-aout.h i386/netbsd.h"
- + tmake_file="${tmake_file} t-netbsd"
- + extra_parts=""
- + use_collect2=yes
- + ;;
- +x86_64-*-netbsd*)
- + tm_file="${tm_file} i386/unix.h i386/att.h dbxelf.h elfos.h netbsd.h netbsd-elf.h i386/x86-64.h i386/netbsd64.h"
- + tmake_file="${tmake_file} i386/t-crtstuff"
- + ;;
- +i[34567]86-*-openbsd2.*|i[34567]86-*openbsd3.[0123])
- + tm_file="i386/i386.h i386/unix.h i386/bsd.h i386/gas.h i386/gstabs.h openbsd-oldgas.h openbsd.h i386/openbsd.h"
- + # needed to unconfuse gdb
- + tmake_file="${tmake_file} t-libc-ok t-openbsd i386/t-openbsd"
- + # we need collect2 until our bug is fixed...
- + use_collect2=yes
- + ;;
- +i[34567]86-*-openbsd*)
- + tm_file="${tm_file} i386/unix.h i386/att.h dbxelf.h elfos.h"
- + tm_file="${tm_file} openbsd.h i386/openbsdelf.h"
- + gas=yes
- + gnu_ld=yes
- + ;;
- +i[34567]86-*-coff*)
- + tm_file="${tm_file} i386/unix.h i386/bsd.h i386/gas.h dbxcoff.h i386/i386-coff.h"
- + ;;
- +i[34567]86-*-linux* | i[34567]86-*-kfreebsd*-gnu | i[34567]86-*-knetbsd*-gnu | i[34567]86-*-gnu* | i[34567]86-*-kopensolaris*-gnu)
- + # Intel 80386's running GNU/*
- + # with ELF format using glibc 2
- + tm_file="${tm_file} i386/unix.h i386/att.h dbxelf.h elfos.h svr4.h linux.h"
- + case ${target} in
- + i[34567]86-*-linux*)
- + if test x$enable_targets = xall; then
- + tm_file="${tm_file} i386/x86-64.h i386/linux64.h"
- + tm_defines="${tm_defines} TARGET_BI_ARCH=1"
- + tmake_file="${tmake_file} i386/t-linux64"
- + need_64bit_hwint=yes
- + case X"${with_cpu}" in
- + Xgeneric|Xcore2|Xnocona|Xx86-64|Xamdfam10|Xbarcelona|Xk8|Xopteron|Xathlon64|Xathlon-fx|Xathlon64-sse3|Xk8-sse3|Xopteron-sse3)
- + ;;
- + X)
- + if test x$with_cpu_64 = x; then
- + with_cpu_64=generic
- + fi
- + ;;
- + *)
- + echo "Unsupported CPU used in --with-cpu=$with_cpu, supported values:" 1>&2
- + echo "generic core2 nocona x86-64 amdfam10 barcelona k8 opteron athlon64 athlon-fx athlon64-sse3 k8-sse3 opteron-sse3" 1>&2
- + exit 1
- + ;;
- + esac
- + else
- + tm_file="${tm_file} i386/linux.h"
- + fi
- + ;;
- + i[34567]86-*-knetbsd*-gnu) tm_file="${tm_file} i386/linux.h knetbsd-gnu.h i386/knetbsd-gnu.h" ;;
- + i[34567]86-*-kfreebsd*-gnu) tm_file="${tm_file} i386/linux.h kfreebsd-gnu.h i386/kfreebsd-gnu.h" ;;
- + i[34567]86-*-kopensolaris*-gnu) tm_file="${tm_file} i386/linux.h kopensolaris-gnu.h i386/kopensolaris-gnu.h" ;;
- + i[34567]86-*-gnu*) tm_file="$tm_file i386/linux.h gnu.h i386/gnu.h";;
- + esac
- + tmake_file="${tmake_file} i386/t-crtstuff i386/t-crtpc i386/t-crtfm t-dfprules"
- + ;;
- +x86_64-*-linux* | x86_64-*-kfreebsd*-gnu | x86_64-*-knetbsd*-gnu)
- + tm_file="${tm_file} i386/unix.h i386/att.h dbxelf.h elfos.h svr4.h linux.h \
- + i386/x86-64.h i386/linux64.h"
- + case ${target} in
- + x86_64-*-kfreebsd*-gnu) tm_file="${tm_file} kfreebsd-gnu.h" ;;
- + x86_64-*-knetbsd*-gnu) tm_file="${tm_file} knetbsd-gnu.h" ;;
- + esac
- + tmake_file="${tmake_file} i386/t-linux64 i386/t-crtstuff i386/t-crtpc i386/t-crtfm t-dfprules"
- + ;;
- +i[34567]86-pc-msdosdjgpp*)
- + xm_file=i386/xm-djgpp.h
- + tm_file="dbxcoff.h ${tm_file} i386/unix.h i386/bsd.h i386/gas.h i386/djgpp.h"
- + tmake_file="${tmake_file} i386/t-djgpp"
- + extra_options="${extra_options} i386/djgpp.opt"
- + gnu_ld=yes
- + gas=yes
- + ;;
- +i[34567]86-*-lynxos*)
- + xm_defines=POSIX
- + tm_file="${tm_file} i386/unix.h i386/att.h dbxelf.h elfos.h i386/lynx.h lynx.h"
- + tmake_file="${tmake_file} i386/t-crtstuff t-lynx"
- + extra_parts="crtbegin.o crtbeginS.o crtend.o crtendS.o"
- + extra_options="${extra_options} lynx.opt"
- + thread_file=lynx
- + gnu_ld=yes
- + gas=yes
- + ;;
- +i[3456x]86-*-netware*)
- + tm_file="${tm_file} i386/unix.h i386/att.h dbxelf.h elfos.h svr4.h tm-dwarf2.h i386/netware.h"
- + tmake_file="${tmake_file} i386/t-netware"
- + extra_objs=netware.o
- + case /${with_ld} in
- + */nwld)
- + extra_objs="$extra_objs nwld.o"
- + tm_file="${tm_file} i386/nwld.h"
- + tmake_file="${tmake_file} i386/t-nwld"
- + extra_parts="crt0.o libgcc.def libc.def libcpre.def posixpre.def"
- + ;;
- + esac
- + case x${enable_threads} in
- + x | xyes | xposix) thread_file='posix';;
- + xnks) thread_file='nks';;
- + xno) ;;
- + *) echo 'Unknown thread configuration for NetWare' >&2; exit 1;;
- + esac
- + ;;
- +i[34567]86-*-nto-qnx*)
- + tm_file="${tm_file} i386/att.h dbxelf.h tm-dwarf2.h elfos.h svr4.h i386/unix.h i386/nto.h"
- + tmake_file="${tmake_file} i386/t-nto"
- + gnu_ld=yes
- + gas=yes
- + ;;
- +i[34567]86-*-rtems*)
- + tm_file="${tm_file} i386/unix.h i386/att.h dbxelf.h elfos.h i386/i386elf.h i386/rtemself.h rtems.h"
- + extra_parts="crtbegin.o crtend.o crti.o crtn.o"
- + tmake_file="${tmake_file} i386/t-rtems-i386 i386/t-crtstuff t-rtems"
- + ;;
- +i[34567]86-*-solaris2*)
- + tm_file="${tm_file} i386/unix.h i386/att.h dbxelf.h elfos.h svr4.h i386/sysv4.h sol2.h"
- + case ${target} in
- + *-*-solaris2.1[0-9]*)
- + tm_file="${tm_file} sol2-10.h"
- + ;;
- + esac
- + tm_file="${tm_file} i386/sol2.h"
- + if test x$gnu_ld = xyes; then
- + tm_file="${tm_file} sol2-gld.h"
- + fi
- + if test x$gas = xyes; then
- + tm_file="${tm_file} i386/sol2-gas.h"
- + fi
- + tmake_file="${tmake_file} t-sol2 t-svr4"
- + c_target_objs="${c_target_objs} sol2-c.o"
- + cxx_target_objs="${cxx_target_objs} sol2-c.o"
- + extra_objs="sol2.o"
- + tm_p_file="${tm_p_file} sol2-protos.h"
- + if test x$gnu_ld = xyes; then
- + tmake_file="$tmake_file t-slibgcc-elf-ver"
- + tm_defines="${tm_defines} TARGET_GNU_LD=1"
- + else
- + tmake_file="$tmake_file t-slibgcc-sld"
- + fi
- + if test x$gas = xyes; then
- + tm_file="usegas.h ${tm_file}"
- + fi
- + tm_file="$tm_file tm-dwarf2.h"
- + case ${target} in
- + *-*-solaris2.1[0-9]*)
- + tm_file="${tm_file} i386/x86-64.h i386/sol2-10.h"
- + tm_defines="${tm_defines} TARGET_BI_ARCH=1"
- + tmake_file="$tmake_file i386/t-sol2-10"
- + # i386/t-crtstuff only affects libgcc. Its inclusion
- + # depends on a runtime test and is thus performed in
- + # libgcc/configure.ac instead.
- + need_64bit_hwint=yes
- + case X"${with_cpu}" in
- + Xgeneric|Xcore2|Xnocona|Xx86-64|Xamdfam10|Xbarcelona|Xk8|Xopteron|Xathlon64|Xathlon-fx|Xathlon64-sse3|Xk8-sse3|Xopteron-sse3)
- + ;;
- + X)
- + if test x$with_cpu_64 = x; then
- + with_cpu_64=generic
- + fi
- + ;;
- + *)
- + echo "Unsupported CPU used in --with-cpu=$with_cpu, supported values:" 1>&2
- + echo "generic core2 nocona x86-64 amdfam10 barcelona k8 opteron athlon64 athlon-fx athlon64-sse3 k8-sse3 opteron-sse3" 1>&2
- + exit 1
- + ;;
- + esac
- + ;;
- + esac
- + case ${enable_threads}:${have_pthread_h}:${have_thread_h} in
- + "":yes:* | yes:yes:* )
- + thread_file=posix
- + ;;
- + "":*:yes | yes:*:yes )
- + thread_file=solaris
- + ;;
- + esac
- + ;;
- +i[4567]86-wrs-vxworks|i[4567]86-wrs-vxworksae)
- + tm_file="${tm_file} i386/unix.h i386/att.h elfos.h svr4.h vx-common.h"
- + case ${target} in
- + *-vxworksae*)
- + tm_file="${tm_file} vxworksae.h i386/vx-common.h i386/vxworksae.h"
- + tmake_file="${tmake_file} i386/t-vxworks i386/t-vxworksae"
- + ;;
- + *)
- + tm_file="${tm_file} vxworks.h i386/vx-common.h i386/vxworks.h"
- + tmake_file="${tmake_file} i386/t-vxworks"
- + ;;
- + esac
- + ;;
- +i[34567]86-*-pe | i[34567]86-*-cygwin*)
- + tm_file="${tm_file} i386/unix.h i386/bsd.h i386/gas.h dbxcoff.h i386/cygming.h i386/cygwin.h"
- + xm_file=i386/xm-cygwin.h
- + # This has to match the logic for DWARF2_UNWIND_INFO in gcc/config/i386/cygming.h
- + if test x$sjlj = x0; then
- + tmake_eh_file="i386/t-dw2-eh"
- + else
- + tmake_eh_file="i386/t-sjlj-eh"
- + fi
- + tmake_file="${tmake_file} ${tmake_eh_file} i386/t-cygming i386/t-cygwin"
- + target_gtfiles="\$(srcdir)/config/i386/winnt.c"
- + extra_options="${extra_options} i386/cygming.opt"
- + extra_objs="winnt.o winnt-stubs.o"
- + c_target_objs="${c_target_objs} cygwin2.o msformat-c.o"
- + cxx_target_objs="${cxx_target_objs} cygwin2.o winnt-cxx.o msformat-c.o"
- + extra_gcc_objs=cygwin1.o
- + if test x$enable_threads = xyes; then
- + thread_file='posix'
- + fi
- + ;;
- +i[34567]86-*-mingw* | x86_64-*-mingw*)
- + tm_file="${tm_file} i386/unix.h i386/bsd.h i386/gas.h dbxcoff.h i386/cygming.h i386/mingw32.h"
- + xm_file=i386/xm-mingw32.h
- + case ${target} in
- + x86_64-*-*)
- + need_64bit_hwint=yes
- + ;;
- + *)
- + ;;
- + esac
- + # This has to match the logic for DWARF2_UNWIND_INFO in gcc/config/i386/cygming.h
- + if test x$sjlj = x0; then
- + tmake_eh_file="i386/t-dw2-eh"
- + else
- + tmake_eh_file="i386/t-sjlj-eh"
- + fi
- + tmake_file="${tmake_file} ${tmake_eh_file} i386/t-cygming i386/t-mingw32"
- + target_gtfiles="\$(srcdir)/config/i386/winnt.c"
- + extra_options="${extra_options} i386/cygming.opt i386/mingw.opt"
- + extra_objs="winnt.o winnt-stubs.o"
- + c_target_objs="${c_target_objs} msformat-c.o"
- + cxx_target_objs="${cxx_target_objs} winnt-cxx.o msformat-c.o"
- + default_use_cxa_atexit=yes
- + case ${enable_threads} in
- + "" | yes | win32)
- + thread_file='win32'
- + tmake_file="${tmake_file} i386/t-gthr-win32"
- + ;;
- + esac
- + case ${target} in
- + x86_64-*-mingw*)
- + tmake_file="${tmake_file} i386/t-crtfm"
- + ;;
- + *)
- + ;;
- + esac
- + case ${target} in
- + *mingw32crt*)
- + tm_file="${tm_file} i386/crtdll.h"
- + ;;
- + *mingw32msv* | *mingw*)
- + ;;
- + esac
- + ;;
- +i[34567]86-*-interix3*)
- + tm_file="${tm_file} i386/unix.h i386/bsd.h i386/gas.h i386/i386-interix.h i386/i386-interix3.h interix.h interix3.h"
- + tmake_file="${tmake_file} i386/t-interix"
- + extra_objs=winnt.o
- + target_gtfiles="\$(srcdir)/config/i386/winnt.c"
- + if test x$enable_threads = xyes ; then
- + thread_file='posix'
- + fi
- + if test x$stabs = xyes ; then
- + tm_file="${tm_file} dbxcoff.h"
- + fi
- + ;;
- +ia64*-*-elf*)
- + tm_file="${tm_file} dbxelf.h elfos.h ia64/sysv4.h ia64/elf.h"
- + tmake_file="ia64/t-ia64"
- + target_cpu_default="0"
- + if test x$gas = xyes
- + then
- + target_cpu_default="${target_cpu_default}|MASK_GNU_AS"
- + fi
- + if test x$gnu_ld = xyes
- + then
- + target_cpu_default="${target_cpu_default}|MASK_GNU_LD"
- + fi
- + extra_parts="crtbegin.o crtend.o crtbeginS.o crtendS.o crtfastmath.o"
- + ;;
- +ia64*-*-freebsd*)
- + tm_file="${tm_file} dbxelf.h elfos.h ${fbsd_tm_file} ia64/sysv4.h ia64/freebsd.h"
- + target_cpu_default="MASK_GNU_AS|MASK_GNU_LD"
- + tmake_file="${tmake_file} ia64/t-ia64"
- + extra_parts="crtbegin.o crtend.o crtbeginS.o crtendS.o crtfastmath.o"
- + ;;
- +ia64*-*-linux*)
- + tm_file="${tm_file} dbxelf.h elfos.h svr4.h linux.h ia64/sysv4.h ia64/linux.h"
- + tmake_file="${tmake_file} ia64/t-ia64 t-libunwind ia64/t-glibc"
- + if test x$with_system_libunwind != xyes ; then
- + tmake_file="${tmake_file} t-libunwind-elf ia64/t-glibc-libunwind"
- + fi
- + target_cpu_default="MASK_GNU_AS|MASK_GNU_LD"
- + extra_parts="crtbegin.o crtend.o crtbeginS.o crtendS.o crtfastmath.o"
- + ;;
- +ia64*-*-hpux*)
- + tm_file="${tm_file} dbxelf.h elfos.h svr4.h ia64/sysv4.h ia64/hpux.h"
- + tmake_file="ia64/t-ia64 ia64/t-hpux"
- + target_cpu_default="MASK_GNU_AS"
- + case x$enable_threads in
- + x | xyes | xposix )
- + thread_file=posix
- + ;;
- + esac
- + use_collect2=no
- + c_target_objs="ia64-c.o"
- + cxx_target_objs="ia64-c.o"
- + extra_options="${extra_options} ia64/ilp32.opt"
- + ;;
- +iq2000*-*-elf*)
- + tm_file="svr4.h elfos.h iq2000/iq2000.h"
- + tmake_file=iq2000/t-iq2000
- + out_file=iq2000/iq2000.c
- + md_file=iq2000/iq2000.md
- + ;;
- +m32r-*-elf*)
- + tm_file="dbxelf.h elfos.h svr4.h ${tm_file}"
- + extra_parts="crtinit.o crtfini.o"
- + ;;
- +m32rle-*-elf*)
- + tm_file="dbxelf.h elfos.h svr4.h m32r/little.h ${tm_file}"
- + extra_parts="crtinit.o crtfini.o m32rx/crtinit.o m32rx/crtfini.o"
- + ;;
- +m32r-*-rtems*)
- + tm_file="dbxelf.h elfos.h svr4.h ${tm_file} m32r/rtems.h rtems.h"
- + tmake_file="m32r/t-m32r t-rtems"
- + extra_parts="crtinit.o crtfini.o"
- + ;;
- +m32r-*-linux*)
- + tm_file="dbxelf.h elfos.h svr4.h linux.h ${tm_file} m32r/linux.h"
- + # We override the tmake_file for linux -- why?
- + tmake_file="t-slibgcc-elf-ver m32r/t-linux"
- + gnu_ld=yes
- + if test x$enable_threads = xyes; then
- + thread_file='posix'
- + fi
- + ;;
- +m32rle-*-linux*)
- + tm_file="dbxelf.h elfos.h svr4.h linux.h m32r/little.h ${tm_file} m32r/linux.h"
- + # We override the tmake_file for linux -- why?
- + tmake_file="t-slibgcc-elf-ver m32r/t-linux"
- + gnu_ld=yes
- + if test x$enable_threads = xyes; then
- + thread_file='posix'
- + fi
- + ;;
- +# m68hc11 and m68hc12 share the same machine description.
- +m68hc11-*-*|m6811-*-*)
- + tm_file="dbxelf.h elfos.h usegas.h m68hc11/m68hc11.h"
- + tm_p_file="m68hc11/m68hc11-protos.h"
- + md_file="m68hc11/m68hc11.md"
- + out_file="m68hc11/m68hc11.c"
- + tmake_file="m68hc11/t-m68hc11"
- + ;;
- +m68hc12-*-*|m6812-*-*)
- + tm_file="m68hc11/m68hc12.h dbxelf.h elfos.h usegas.h m68hc11/m68hc11.h"
- + tm_p_file="m68hc11/m68hc11-protos.h"
- + md_file="m68hc11/m68hc11.md"
- + out_file="m68hc11/m68hc11.c"
- + tmake_file="m68hc11/t-m68hc11"
- + extra_options="${extra_options} m68hc11/m68hc11.opt"
- + ;;
- +m68k-*-aout*)
- + default_m68k_cpu=68020
- + default_cf_cpu=5206
- + tmake_file="m68k/t-floatlib m68k/t-m68kbare m68k/t-mlibs"
- + tm_file="${tm_file} m68k/m68k-none.h m68k/m68kemb.h m68k/m68k-aout.h libgloss.h"
- + ;;
- +m68k-*-coff*)
- + default_m68k_cpu=68020
- + default_cf_cpu=5206
- + tmake_file="m68k/t-floatlib m68k/t-m68kbare m68k/t-mlibs"
- + tm_defines="${tm_defines} MOTOROLA=1"
- + tm_file="${tm_file} m68k/m68k-none.h m68k/m68kemb.h dbxcoff.h m68k/coff.h dbx.h"
- + ;;
- +m68k-*-elf* | fido-*-elf*)
- + case ${target} in
- + fido-*-elf*)
- + # Check that $with_cpu makes sense.
- + case $with_cpu in
- + "" | "fidoa")
- + ;;
- + *)
- + echo "Cannot accept --with-cpu=$with_cpu"
- + exit 1
- + ;;
- + esac
- + with_cpu=fidoa
- + ;;
- + *)
- + default_m68k_cpu=68020
- + default_cf_cpu=5206
- + ;;
- + esac
- + tm_file="${tm_file} m68k/m68k-none.h m68k/m68kelf.h dbxelf.h elfos.h m68k/m68kemb.h m68k/m68020-elf.h"
- + tm_defines="${tm_defines} MOTOROLA=1"
- + tmake_file="m68k/t-floatlib m68k/t-m68kbare m68k/t-m68kelf"
- + # Add multilibs for targets other than fido.
- + case ${target} in
- + fido-*-elf*)
- + ;;
- + *)
- + tmake_file="$tmake_file m68k/t-mlibs"
- + ;;
- + esac
- + extra_parts="crtbegin.o crtend.o"
- + ;;
- +m68k*-*-netbsdelf*)
- + default_m68k_cpu=68020
- + default_cf_cpu=5475
- + tm_file="${tm_file} dbxelf.h elfos.h netbsd.h netbsd-elf.h m68k/netbsd-elf.h"
- + tm_defines="${tm_defines} MOTOROLA=1"
- + ;;
- +m68k*-*-openbsd*)
- + default_m68k_cpu=68020
- + default_cf_cpu=5475
- + # needed to unconfuse gdb
- + tm_defines="${tm_defines} OBSD_OLD_GAS"
- + tm_file="${tm_file} openbsd.h m68k/openbsd.h"
- + tmake_file="t-libc-ok t-openbsd m68k/t-openbsd"
- + # we need collect2 until our bug is fixed...
- + use_collect2=yes
- + ;;
- +m68k-*-uclinuxoldabi*) # Motorola m68k/ColdFire running uClinux
- + # with uClibc, using the original
- + # m68k-elf-based ABI
- + default_m68k_cpu=68020
- + default_cf_cpu=5206
- + tm_file="${tm_file} m68k/m68k-none.h m68k/m68kelf.h dbxelf.h elfos.h m68k/uclinux-oldabi.h"
- + tm_defines="${tm_defines} MOTOROLA=1"
- + tmake_file="m68k/t-floatlib m68k/t-uclinux"
- + ;;
- +m68k-*-uclinux*) # Motorola m68k/ColdFire running uClinux
- + # with uClibc, using the new GNU/Linux-style
- + # ABI.
- + default_m68k_cpu=68020
- + default_cf_cpu=5206
- + tm_file="${tm_file} dbxelf.h elfos.h svr4.h linux.h flat.h m68k/linux.h m68k/uclinux.h ./sysroot-suffix.h"
- + tm_defines="${tm_defines} MOTOROLA=1 UCLIBC_DEFAULT=1"
- + extra_options="${extra_options} linux.opt"
- + tmake_file="m68k/t-floatlib m68k/t-uclinux m68k/t-mlibs"
- + ;;
- +m68k-*-linux*) # Motorola m68k's running GNU/Linux
- + # with ELF format using glibc 2
- + # aka the GNU/Linux C library 6.
- + default_m68k_cpu=68020
- + default_cf_cpu=5475
- + with_arch=${with_arch:-m68k}
- + tm_file="${tm_file} dbxelf.h elfos.h svr4.h linux.h m68k/linux.h ./sysroot-suffix.h"
- + extra_options="${extra_options} m68k/ieee.opt"
- + tm_defines="${tm_defines} MOTOROLA=1"
- + tmake_file="${tmake_file} m68k/t-floatlib m68k/t-linux m68k/t-mlibs"
- + # if not configured with --enable-sjlj-exceptions, bump the
- + # libgcc version number
- + if test x$sjlj != x1; then
- + tmake_file="$tmake_file m68k/t-slibgcc-elf-ver"
- + fi
- + ;;
- +m68k-*-rtems*)
- + default_m68k_cpu=68020
- + default_cf_cpu=5206
- + tmake_file="m68k/t-floatlib m68k/t-m68kbare m68k/t-crtstuff t-rtems m68k/t-rtems m68k/t-mlibs"
- + tm_file="${tm_file} m68k/m68k-none.h m68k/m68kelf.h dbxelf.h elfos.h m68k/m68kemb.h m68k/m68020-elf.h m68k/rtemself.h rtems.h"
- + tm_defines="${tm_defines} MOTOROLA=1"
- + extra_parts="crtbegin.o crtend.o"
- + ;;
- +mcore-*-elf)
- + tm_file="dbxelf.h elfos.h svr4.h ${tm_file} mcore/mcore-elf.h"
- + tmake_file=mcore/t-mcore
- + inhibit_libc=true
- + ;;
- +mcore-*-pe*)
- + tm_file="svr3.h dbxcoff.h ${tm_file} mcore/mcore-pe.h"
- + tmake_file=mcore/t-mcore-pe
- + inhibit_libc=true
- + ;;
- +mips-sgi-irix[56]*)
- + tm_file="elfos.h ${tm_file} mips/iris.h"
- + tmake_file="mips/t-iris mips/t-slibgcc-irix"
- + target_cpu_default="MASK_ABICALLS"
- + case ${target} in
- + *-*-irix5*)
- + tm_file="${tm_file} mips/iris5.h"
- + ;;
- +
- + *-*-irix6*)
- + tm_file="${tm_file} mips/iris6.h"
- + tmake_file="${tmake_file} mips/t-iris6"
- + tm_defines="${tm_defines} MIPS_ISA_DEFAULT=3 MIPS_ABI_DEFAULT=ABI_N32"
- + ;;
- + esac
- + if test "x$stabs" = xyes
- + then
- + tm_file="${tm_file} dbx.h mips/dbxmdebug.h"
- + fi
- + if test "x$gnu_ld" = xyes
- + then
- + tm_defines="${tm_defines} IRIX_USING_GNU_LD"
- + fi
- + case ${enable_threads}:${have_pthread_h} in
- + "":yes | yes:yes ) thread_file=posix ;;
- + esac
- + ;;
- +mips*-*-netbsd*) # NetBSD/mips, either endian.
- + target_cpu_default="MASK_ABICALLS"
- + tm_file="elfos.h ${tm_file} mips/elf.h netbsd.h netbsd-elf.h mips/netbsd.h"
- + ;;
- +mips64*-*-linux* | mipsisa64*-*-linux*)
- + tm_file="dbxelf.h elfos.h svr4.h linux.h ${tm_file} mips/linux.h mips/linux64.h"
- + tmake_file="${tmake_file} mips/t-linux64 mips/t-libgcc-mips16"
- + tm_defines="${tm_defines} MIPS_ABI_DEFAULT=ABI_N32"
- + case ${target} in
- + mips64el-st-linux-gnu)
- + tm_file="${tm_file} mips/st.h"
- + tmake_file="${tmake_file} mips/t-st"
- + ;;
- + mips64octeon*-*-linux*)
- + tm_defines="${tm_defines} MIPS_CPU_STRING_DEFAULT=\\\"octeon\\\""
- + target_cpu_default=MASK_SOFT_FLOAT_ABI
- + ;;
- + mipsisa64r2*-*-linux*)
- + tm_defines="${tm_defines} MIPS_ISA_DEFAULT=65"
- + ;;
- + esac
- + gnu_ld=yes
- + gas=yes
- + test x$with_llsc != x || with_llsc=yes
- + ;;
- +mips*-*-linux*) # Linux MIPS, either endian.
- + tm_file="dbxelf.h elfos.h svr4.h linux.h ${tm_file} mips/linux.h"
- + tmake_file="${tmake_file} mips/t-libgcc-mips16"
- + case ${target} in
- + mipsisa32r2*)
- + tm_defines="${tm_defines} MIPS_ISA_DEFAULT=33"
- + ;;
- + mipsisa32*)
- + tm_defines="${tm_defines} MIPS_ISA_DEFAULT=32"
- + esac
- + test x$with_llsc != x || with_llsc=yes
- + ;;
- +mips*-*-openbsd*)
- + tm_defines="${tm_defines} OBSD_HAS_DECLARE_FUNCTION_NAME OBSD_HAS_DECLARE_OBJECT OBSD_HAS_CORRECT_SPECS"
- + target_cpu_default="MASK_ABICALLS"
- + tm_file="mips/mips.h openbsd.h mips/openbsd.h mips/sdb.h"
- + case ${target} in
- + mips*el-*-openbsd*)
- + tm_defines="${tm_defines} TARGET_ENDIAN_DEFAULT=0";;
- + *) tm_defines="${tm_defines} TARGET_ENDIAN_DEFAULT=MASK_BIG_ENDIAN";;
- + esac
- + ;;
- +mips*-sde-elf*)
- + tm_file="elfos.h ${tm_file} mips/elf.h mips/sde.h"
- + tmake_file="mips/t-sde mips/t-libgcc-mips16"
- + case "${with_newlib}" in
- + yes)
- + # newlib / libgloss.
- + ;;
- + *)
- + # MIPS toolkit libraries.
- + tm_file="$tm_file mips/sdemtk.h"
- + tmake_file="$tmake_file mips/t-sdemtk"
- + extra_options="$extra_options mips/sdemtk.opt"
- + case ${enable_threads} in
- + "" | yes | mipssde)
- + thread_file='mipssde'
- + ;;
- + esac
- + ;;
- + esac
- + case ${target} in
- + mipsisa32r2*)
- + tm_defines="MIPS_ISA_DEFAULT=33 MIPS_ABI_DEFAULT=ABI_32"
- + ;;
- + mipsisa32*)
- + tm_defines="MIPS_ISA_DEFAULT=32 MIPS_ABI_DEFAULT=ABI_32"
- + ;;
- + mipsisa64r2*)
- + tm_defines="MIPS_ISA_DEFAULT=65 MIPS_ABI_DEFAULT=ABI_N32"
- + ;;
- + mipsisa64*)
- + tm_defines="MIPS_ISA_DEFAULT=64 MIPS_ABI_DEFAULT=ABI_N32"
- + ;;
- + esac
- + ;;
- +mipsisa32-*-elf* | mipsisa32el-*-elf* | \
- +mipsisa32r2-*-elf* | mipsisa32r2el-*-elf* | \
- +mipsisa64-*-elf* | mipsisa64el-*-elf* | \
- +mipsisa64r2-*-elf* | mipsisa64r2el-*-elf*)
- + tm_file="elfos.h ${tm_file} mips/elf.h"
- + tmake_file="mips/t-isa3264 mips/t-libgcc-mips16"
- + case ${target} in
- + mipsisa32r2*)
- + tm_defines="${tm_defines} MIPS_ISA_DEFAULT=33"
- + ;;
- + mipsisa32*)
- + tm_defines="${tm_defines} MIPS_ISA_DEFAULT=32"
- + ;;
- + mipsisa64r2*)
- + tm_defines="${tm_defines} MIPS_ISA_DEFAULT=65"
- + ;;
- + mipsisa64*)
- + tm_defines="${tm_defines} MIPS_ISA_DEFAULT=64"
- + ;;
- + esac
- + case ${target} in
- + mipsisa32*-*-elfoabi*)
- + tm_defines="${tm_defines} MIPS_ABI_DEFAULT=ABI_32"
- + tm_file="${tm_file} mips/elfoabi.h"
- + ;;
- + mipsisa64*-*-elfoabi*)
- + tm_defines="${tm_defines} MIPS_ABI_DEFAULT=ABI_O64"
- + tm_file="${tm_file} mips/elfoabi.h"
- + ;;
- + *-*-elf*)
- + tm_defines="${tm_defines} MIPS_ABI_DEFAULT=ABI_EABI"
- + ;;
- + esac
- + ;;
- +mipsisa64sr71k-*-elf*)
- + tm_file="elfos.h ${tm_file} mips/elf.h"
- + tmake_file=mips/t-sr71k
- + target_cpu_default="MASK_64BIT|MASK_FLOAT64"
- + tm_defines="${tm_defines} MIPS_ISA_DEFAULT=64 MIPS_CPU_STRING_DEFAULT=\\\"sr71000\\\" MIPS_ABI_DEFAULT=ABI_EABI"
- + ;;
- +mipsisa64sb1-*-elf* | mipsisa64sb1el-*-elf*)
- + tm_file="elfos.h ${tm_file} mips/elf.h"
- + tmake_file="mips/t-elf mips/t-libgcc-mips16 mips/t-sb1"
- + target_cpu_default="MASK_64BIT|MASK_FLOAT64"
- + tm_defines="${tm_defines} MIPS_ISA_DEFAULT=64 MIPS_CPU_STRING_DEFAULT=\\\"sb1\\\" MIPS_ABI_DEFAULT=ABI_O64"
- + ;;
- +mips-*-elf* | mipsel-*-elf*)
- + tm_file="elfos.h ${tm_file} mips/elf.h"
- + tmake_file="mips/t-elf mips/t-libgcc-mips16"
- + ;;
- +mips64-*-elf* | mips64el-*-elf*)
- + tm_file="elfos.h ${tm_file} mips/elf.h"
- + tmake_file="mips/t-elf mips/t-libgcc-mips16"
- + target_cpu_default="MASK_64BIT|MASK_FLOAT64"
- + tm_defines="${tm_defines} MIPS_ISA_DEFAULT=3 MIPS_ABI_DEFAULT=ABI_O64"
- + ;;
- +mips64vr-*-elf* | mips64vrel-*-elf*)
- + tm_file="elfos.h ${tm_file} mips/vr.h mips/elf.h"
- + tmake_file=mips/t-vr
- + ;;
- +mips64orion-*-elf* | mips64orionel-*-elf*)
- + tm_file="elfos.h ${tm_file} mips/elforion.h mips/elf.h"
- + tmake_file="mips/t-elf mips/t-libgcc-mips16"
- + target_cpu_default="MASK_64BIT|MASK_FLOAT64"
- + tm_defines="${tm_defines} MIPS_ISA_DEFAULT=3 MIPS_ABI_DEFAULT=ABI_O64"
- + ;;
- +mips*-*-rtems*)
- + tm_file="elfos.h ${tm_file} mips/elf.h mips/rtems.h rtems.h"
- + tmake_file="mips/t-elf mips/t-libgcc-mips16 t-rtems mips/t-rtems"
- + ;;
- +mips-wrs-vxworks)
- + tm_file="elfos.h ${tm_file} svr4.h mips/elf.h vx-common.h vxworks.h mips/vxworks.h"
- + tmake_file="${tmake_file} mips/t-vxworks"
- + ;;
- +mipstx39-*-elf* | mipstx39el-*-elf*)
- + tm_file="elfos.h ${tm_file} mips/r3900.h mips/elf.h"
- + tmake_file="mips/t-r3900 mips/t-libgcc-mips16"
- + ;;
- +mmix-knuth-mmixware)
- + need_64bit_hwint=yes
- + ;;
- +mn10300-*-*)
- + tm_file="dbxelf.h elfos.h svr4.h ${tm_file}"
- + if test x$stabs = xyes
- + then
- + tm_file="${tm_file} dbx.h"
- + fi
- + use_collect2=no
- + ;;
- +pdp11-*-bsd)
- + tm_file="${tm_file} pdp11/2bsd.h"
- + use_fixproto=yes
- + ;;
- +pdp11-*-*)
- + ;;
- +picochip-*)
- + # Nothing special
- + ;;
- +# port not yet contributed
- +#powerpc-*-openbsd*)
- +# tmake_file="${tmake_file} rs6000/t-fprules rs6000/t-fprules-fpbit "
- +# extra_headers=
- +# ;;
- +powerpc64-*-linux*)
- + tm_file="${tm_file} dbxelf.h elfos.h svr4.h freebsd-spec.h rs6000/sysv4.h"
- + test x$with_cpu != x || cpu_is_64bit=yes
- + test x$cpu_is_64bit != xyes || tm_file="${tm_file} rs6000/default64.h"
- + tm_file="rs6000/biarch64.h ${tm_file} rs6000/linux64.h"
- + if test x${enable_secureplt} = xyes; then
- + tm_file="rs6000/secureplt.h ${tm_file}"
- + fi
- + extra_options="${extra_options} rs6000/sysv4.opt rs6000/linux64.opt"
- + tmake_file="t-dfprules rs6000/t-fprules ${tmake_file} rs6000/t-ppccomm rs6000/t-linux64 rs6000/t-fprules-softfp soft-fp/t-softfp"
- + ;;
- +powerpc64-*-gnu*)
- + tm_file="${cpu_type}/${cpu_type}.h elfos.h svr4.h freebsd-spec.h gnu.h rs6000/sysv4.h rs6000/linux64.h rs6000/gnu.h"
- + extra_options="${extra_options} rs6000/sysv4.opt rs6000/linux64.opt"
- + tmake_file="rs6000/t-fprules t-slibgcc-elf-ver t-gnu rs6000/t-linux64 rs6000/t-fprules-softfp soft-fp/t-softfp"
- + ;;
- +powerpc-*-darwin*)
- + extra_options="${extra_options} rs6000/darwin.opt"
- + extra_parts="crt2.o"
- + case ${target} in
- + *-darwin1[0-9]* | *-darwin[8-9]*)
- + tmake_file="${tmake_file} rs6000/t-darwin8"
- + tm_file="${tm_file} rs6000/darwin8.h"
- + ;;
- + *-darwin7*)
- + tm_file="${tm_file} rs6000/darwin7.h"
- + ;;
- + *-darwin[0-6]*)
- + ;;
- + esac
- + extra_headers=altivec.h
- + ;;
- +powerpc64-*-darwin*)
- + tm_file="${tm_file} ${cpu_type}/darwin8.h ${cpu_type}/darwin64.h"
- + extra_options="${extra_options} ${cpu_type}/darwin.opt"
- + # We're omitting t-darwin8 to avoid building any multilibs
- + extra_headers=altivec.h
- + ;;
- +powerpc*-*-freebsd*)
- + tm_file="${tm_file} dbxelf.h elfos.h ${fbsd_tm_file} rs6000/sysv4.h rs6000/freebsd.h"
- + tmake_file="rs6000/t-fprules rs6000/t-fprules-fpbit rs6000/t-ppcos ${tmake_file} rs6000/t-ppccomm"
- + extra_options="${extra_options} rs6000/sysv4.opt"
- + ;;
- +powerpc-*-netbsd*)
- + tm_file="${tm_file} dbxelf.h elfos.h netbsd.h netbsd-elf.h freebsd-spec.h rs6000/sysv4.h rs6000/netbsd.h"
- + tmake_file="${tmake_file} rs6000/t-netbsd"
- + extra_options="${extra_options} rs6000/sysv4.opt"
- + ;;
- +powerpc-*-eabispe*)
- + tm_file="${tm_file} dbxelf.h elfos.h svr4.h freebsd-spec.h rs6000/sysv4.h rs6000/eabi.h rs6000/e500.h rs6000/eabispe.h"
- + extra_options="${extra_options} rs6000/sysv4.opt"
- + tmake_file="rs6000/t-spe rs6000/t-ppccomm"
- + ;;
- +powerpc-*-eabisimaltivec*)
- + tm_file="${tm_file} dbxelf.h elfos.h svr4.h freebsd-spec.h rs6000/sysv4.h rs6000/eabi.h rs6000/e500.h rs6000/eabisim.h rs6000/eabialtivec.h"
- + extra_options="${extra_options} rs6000/sysv4.opt"
- + tmake_file="rs6000/t-fprules rs6000/t-fprules-fpbit rs6000/t-ppcendian rs6000/t-ppccomm"
- + ;;
- +powerpc-*-eabisim*)
- + tm_file="${tm_file} dbxelf.h elfos.h usegas.h svr4.h freebsd-spec.h rs6000/sysv4.h rs6000/eabi.h rs6000/e500.h rs6000/eabisim.h"
- + extra_options="${extra_options} rs6000/sysv4.opt"
- + tmake_file="rs6000/t-fprules rs6000/t-fprules-fpbit rs6000/t-ppcgas rs6000/t-ppccomm"
- + ;;
- +powerpc-*-elf*)
- + tm_file="${tm_file} dbxelf.h elfos.h usegas.h svr4.h freebsd-spec.h rs6000/sysv4.h"
- + extra_options="${extra_options} rs6000/sysv4.opt"
- + tmake_file="rs6000/t-fprules rs6000/t-fprules-fpbit rs6000/t-ppcgas rs6000/t-ppccomm"
- + ;;
- +powerpc-*-eabialtivec*)
- + tm_file="${tm_file} dbxelf.h elfos.h svr4.h freebsd-spec.h rs6000/sysv4.h rs6000/eabi.h rs6000/e500.h rs6000/eabialtivec.h"
- + extra_options="${extra_options} rs6000/sysv4.opt"
- + tmake_file="rs6000/t-fprules rs6000/t-fprules-fpbit rs6000/t-ppcendian rs6000/t-ppccomm"
- + ;;
- +powerpc-xilinx-eabi*)
- + tm_file="${tm_file} dbxelf.h elfos.h usegas.h svr4.h freebsd-spec.h rs6000/sysv4.h rs6000/eabi.h rs6000/singlefp.h"
- + extra_options="${extra_options} rs6000/sysv4.opt"
- + tmake_file="rs6000/t-fprules rs6000/t-fprules-fpbit rs6000/t-ppcgas rs6000/t-ppccomm"
- + ;;
- +powerpc-*-eabi*)
- + tm_file="${tm_file} dbxelf.h elfos.h usegas.h svr4.h freebsd-spec.h rs6000/sysv4.h rs6000/eabi.h rs6000/e500.h"
- + extra_options="${extra_options} rs6000/sysv4.opt"
- + tmake_file="rs6000/t-fprules rs6000/t-fprules-fpbit rs6000/t-ppcgas rs6000/t-ppccomm"
- + ;;
- +powerpc-*-rtems*)
- + tm_file="${tm_file} dbxelf.h elfos.h svr4.h freebsd-spec.h rs6000/sysv4.h rs6000/eabi.h rs6000/e500.h rs6000/rtems.h rtems.h"
- + extra_options="${extra_options} rs6000/sysv4.opt"
- + tmake_file="rs6000/t-fprules rs6000/t-fprules-fpbit rs6000/t-rtems t-rtems rs6000/t-ppccomm"
- + ;;
- +powerpc-*-linux*altivec*)
- + tm_file="${tm_file} dbxelf.h elfos.h svr4.h freebsd-spec.h rs6000/sysv4.h rs6000/linux.h rs6000/linuxaltivec.h"
- + extra_options="${extra_options} rs6000/sysv4.opt"
- + tmake_file="rs6000/t-fprules rs6000/t-fprules-softfp soft-fp/t-softfp rs6000/t-ppcos ${tmake_file} rs6000/t-ppccomm"
- + ;;
- +powerpc-*-linux*spe*)
- + tm_file="${tm_file} dbxelf.h elfos.h svr4.h freebsd-spec.h rs6000/sysv4.h rs6000/linux.h rs6000/linuxspe.h rs6000/e500.h"
- + extra_options="${extra_options} rs6000/sysv4.opt"
- + tmake_file="t-dfprules rs6000/t-fprules rs6000/t-fprules-softfp soft-fp/t-softfp rs6000/t-ppcos ${tmake_file} rs6000/t-ppccomm"
- + ;;
- +powerpc-*-linux*paired*)
- + tm_file="${tm_file} dbxelf.h elfos.h svr4.h freebsd-spec.h rs6000/sysv4.h rs6000/linux.h rs6000/750cl.h"
- + extra_options="${extra_options} rs6000/sysv4.opt"
- + tmake_file="rs6000/t-fprules rs6000/t-fprules-softfp soft-fp/t-softfp rs6000/t-ppcos ${tmake_file} rs6000/t-ppccomm"
- + ;;
- +powerpc-*-linux*)
- + tm_file="${tm_file} dbxelf.h elfos.h svr4.h freebsd-spec.h rs6000/sysv4.h"
- + extra_options="${extra_options} rs6000/sysv4.opt"
- + tmake_file="t-dfprules rs6000/t-fprules rs6000/t-ppcos ${tmake_file} rs6000/t-ppccomm"
- + case ${enable_targets}:${cpu_is_64bit} in
- + *powerpc64* | all:* | *:yes)
- + if test x$cpu_is_64bit = xyes; then
- + tm_file="${tm_file} rs6000/default64.h"
- + fi
- + tm_file="rs6000/biarch64.h ${tm_file} rs6000/linux64.h"
- + tmake_file="$tmake_file rs6000/t-linux64"
- + extra_options="${extra_options} rs6000/linux64.opt"
- + ;;
- + *)
- + tm_file="${tm_file} rs6000/linux.h"
- + ;;
- + esac
- + tmake_file="${tmake_file} rs6000/t-fprules-softfp soft-fp/t-softfp"
- + if test x${enable_secureplt} = xyes; then
- + tm_file="rs6000/secureplt.h ${tm_file}"
- + fi
- + ;;
- +powerpc-*-gnu-gnualtivec*)
- + tm_file="${cpu_type}/${cpu_type}.h elfos.h svr4.h freebsd-spec.h gnu.h rs6000/sysv4.h rs6000/linux.h rs6000/linuxaltivec.h rs6000/gnu.h"
- + extra_options="${extra_options} rs6000/sysv4.opt"
- + tmake_file="rs6000/t-fprules rs6000/t-fprules-fpbit rs6000/t-ppcos t-slibgcc-elf-ver t-gnu rs6000/t-ppccomm"
- + if test x$enable_threads = xyes; then
- + thread_file='posix'
- + fi
- + ;;
- +powerpc-*-gnu*)
- + tm_file="${cpu_type}/${cpu_type}.h elfos.h svr4.h freebsd-spec.h gnu.h rs6000/sysv4.h rs6000/linux.h rs6000/gnu.h"
- + tmake_file="rs6000/t-fprules rs6000/t-fprules-fpbit rs6000/t-ppcos t-slibgcc-elf-ver t-gnu rs6000/t-ppccomm"
- + extra_options="${extra_options} rs6000/sysv4.opt"
- + if test x$enable_threads = xyes; then
- + thread_file='posix'
- + fi
- + ;;
- +powerpc-wrs-vxworks|powerpc-wrs-vxworksae)
- + tm_file="${tm_file} elfos.h svr4.h freebsd-spec.h rs6000/sysv4.h"
- + tmake_file="${tmake_file} rs6000/t-fprules rs6000/t-fprules-fpbit rs6000/t-ppccomm rs6000/t-vxworks"
- + extra_options="${extra_options} rs6000/sysv4.opt"
- + extra_headers=ppc-asm.h
- + case ${target} in
- + *-vxworksae*)
- + tm_file="${tm_file} vx-common.h vxworksae.h rs6000/vxworks.h rs6000/e500.h rs6000/vxworksae.h"
- + tmake_file="${tmake_file} rs6000/t-vxworksae"
- + ;;
- + *-vxworks*)
- + tm_file="${tm_file} vx-common.h vxworks.h rs6000/vxworks.h rs6000/e500.h"
- + ;;
- + esac
- + ;;
- +powerpc-*-lynxos*)
- + xm_defines=POSIX
- + tm_file="${tm_file} dbxelf.h elfos.h rs6000/sysv4.h rs6000/lynx.h lynx.h"
- + tmake_file="t-lynx rs6000/t-lynx"
- + extra_options="${extra_options} rs6000/sysv4.opt lynx.opt"
- + extra_parts="crtbegin.o crtbeginS.o crtend.o crtendS.o"
- + extra_options="${extra_options} lynx.opt"
- + thread_file=lynx
- + gnu_ld=yes
- + gas=yes
- + ;;
- +powerpcle-*-elf*)
- + tm_file="${tm_file} dbxelf.h elfos.h usegas.h svr4.h freebsd-spec.h rs6000/sysv4.h rs6000/sysv4le.h"
- + tmake_file="rs6000/t-fprules rs6000/t-fprules-fpbit rs6000/t-ppcgas rs6000/t-ppccomm"
- + extra_options="${extra_options} rs6000/sysv4.opt"
- + ;;
- +powerpcle-*-eabisim*)
- + tm_file="${tm_file} dbxelf.h elfos.h usegas.h svr4.h freebsd-spec.h rs6000/sysv4.h rs6000/sysv4le.h rs6000/eabi.h rs6000/e500.h rs6000/eabisim.h"
- + tmake_file="rs6000/t-fprules rs6000/t-fprules-fpbit rs6000/t-ppcgas rs6000/t-ppccomm"
- + extra_options="${extra_options} rs6000/sysv4.opt"
- + ;;
- +powerpcle-*-eabi*)
- + tm_file="${tm_file} dbxelf.h elfos.h usegas.h svr4.h freebsd-spec.h rs6000/sysv4.h rs6000/sysv4le.h rs6000/eabi.h rs6000/e500.h"
- + tmake_file="rs6000/t-fprules rs6000/t-fprules-fpbit rs6000/t-ppcgas rs6000/t-ppccomm"
- + extra_options="${extra_options} rs6000/sysv4.opt"
- + ;;
- +powerpc-xilinx-eabi*)
- + tm_file="${tm_file} dbxelf.h elfos.h usegas.h svr4.h freebsd-spec.h rs6000/sysv4.h rs6000/eabi.h rs6000/singlefp.h rs6000/xfpu.h"
- + extra_options="${extra_options} rs6000/sysv4.opt"
- + tmake_file="rs6000/t-fprules rs6000/t-fprules-fpbit rs6000/t-ppcgas rs6000/t-ppccomm"
- + ;;
- +rs6000-ibm-aix4.[12]* | powerpc-ibm-aix4.[12]*)
- + tm_file="${tm_file} rs6000/aix.h rs6000/aix41.h rs6000/xcoff.h"
- + tmake_file="rs6000/t-fprules rs6000/t-fprules-fpbit rs6000/t-newas"
- + extra_options="${extra_options} rs6000/aix41.opt"
- + use_collect2=yes
- + extra_headers=
- + use_fixproto=yes
- + ;;
- +rs6000-ibm-aix4.[3456789]* | powerpc-ibm-aix4.[3456789]*)
- + tm_file="rs6000/biarch64.h ${tm_file} rs6000/aix.h rs6000/aix43.h rs6000/xcoff.h"
- + tmake_file=rs6000/t-aix43
- + extra_options="${extra_options} rs6000/aix64.opt"
- + use_collect2=yes
- + thread_file='aix'
- + extra_headers=
- + ;;
- +rs6000-ibm-aix5.1.* | powerpc-ibm-aix5.1.*)
- + tm_file="rs6000/biarch64.h ${tm_file} rs6000/aix.h rs6000/aix51.h rs6000/xcoff.h"
- + extra_options="${extra_options} rs6000/aix64.opt"
- + tmake_file=rs6000/t-aix43
- + use_collect2=yes
- + thread_file='aix'
- + extra_headers=
- + ;;
- +rs6000-ibm-aix5.2.* | powerpc-ibm-aix5.2.*)
- + tm_file="${tm_file} rs6000/aix.h rs6000/aix52.h rs6000/xcoff.h"
- + tmake_file=rs6000/t-aix52
- + extra_options="${extra_options} rs6000/aix64.opt"
- + use_collect2=yes
- + thread_file='aix'
- + extra_headers=
- + ;;
- +rs6000-ibm-aix5.3.* | powerpc-ibm-aix5.3.*)
- + tm_file="${tm_file} rs6000/aix.h rs6000/aix53.h rs6000/xcoff.h"
- + tmake_file=rs6000/t-aix52
- + extra_options="${extra_options} rs6000/aix64.opt"
- + use_collect2=yes
- + thread_file='aix'
- + extra_headers=altivec.h
- + ;;
- +rs6000-ibm-aix[6789].* | powerpc-ibm-aix[6789].*)
- + tm_file="${tm_file} rs6000/aix.h rs6000/aix61.h rs6000/xcoff.h"
- + tmake_file=rs6000/t-aix52
- + extra_options="${extra_options} rs6000/aix64.opt"
- + use_collect2=yes
- + thread_file='aix'
- + extra_headers=altivec.h
- + ;;
- +s390-*-linux*)
- + tm_file="s390/s390.h dbxelf.h elfos.h svr4.h linux.h s390/linux.h"
- + tmake_file="${tmake_file} t-dfprules s390/t-crtstuff s390/t-linux"
- + ;;
- +s390x-*-linux*)
- + tm_file="s390/s390x.h s390/s390.h dbxelf.h elfos.h svr4.h linux.h s390/linux.h"
- + tm_p_file=s390/s390-protos.h
- + md_file=s390/s390.md
- + extra_modes=s390/s390-modes.def
- + out_file=s390/s390.c
- + tmake_file="${tmake_file} t-dfprules s390/t-crtstuff s390/t-linux s390/t-linux64"
- + ;;
- +s390x-ibm-tpf*)
- + tm_file="s390/s390x.h s390/s390.h dbxelf.h elfos.h svr4.h s390/tpf.h"
- + tm_p_file=s390/s390-protos.h
- + md_file=s390/s390.md
- + extra_modes=s390/s390-modes.def
- + out_file=s390/s390.c
- + extra_parts="crtbeginS.o crtendS.o"
- + tmake_file="s390/t-crtstuff s390/t-tpf"
- + thread_file='tpf'
- + extra_options="${extra_options} s390/tpf.opt"
- + ;;
- +score-*-elf)
- + tm_file="dbxelf.h elfos.h score/elf.h score/score.h"
- + tmake_file=score/t-score-elf
- + extra_objs="score7.o score3.o"
- + ;;
- +sh-*-elf* | sh[12346l]*-*-elf* | \
- +sh-*-symbianelf* | sh[12346l]*-*-symbianelf* | \
- + sh-*-linux* | sh[2346lbe]*-*-linux* | \
- + sh-*-netbsdelf* | shl*-*-netbsdelf* | sh5-*-netbsd* | sh5l*-*-netbsd* | \
- + sh64-*-netbsd* | sh64l*-*-netbsd*)
- + tmake_file="${tmake_file} sh/t-sh sh/t-elf"
- + if test x${with_endian} = x; then
- + case ${target} in
- + sh[1234]*be-*-* | sh[1234]*eb-*-*) with_endian=big ;;
- + shbe-*-* | sheb-*-*) with_endian=big,little ;;
- + sh[1234]l* | sh[34]*-*-linux*) with_endian=little ;;
- + shl* | sh64l* | sh*-*-linux* | \
- + sh5l* | sh-superh-elf) with_endian=little,big ;;
- + sh[1234]*-*-*) with_endian=big ;;
- + *) with_endian=big,little ;;
- + esac
- + fi
- + case ${with_endian} in
- + big|little) tmake_file="${tmake_file} sh/t-1e" ;;
- + big,little|little,big) ;;
- + *) echo "with_endian=${with_endian} not supported."; exit 1 ;;
- + esac
- + case ${with_endian} in
- + little*) tm_file="sh/little.h ${tm_file}" ;;
- + esac
- + tm_file="${tm_file} dbxelf.h elfos.h"
- + case ${target} in
- + sh*-*-netbsd*) ;;
- + *) tm_file="${tm_file} svr4.h" ;;
- + esac
- + tm_file="${tm_file} sh/elf.h"
- + case ${target} in
- + sh*-*-linux*) tmake_file="${tmake_file} sh/t-linux"
- + tm_file="${tm_file} linux.h sh/linux.h" ;;
- + sh*-*-netbsd*) tm_file="${tm_file} netbsd.h netbsd-elf.h sh/netbsd-elf.h" ;;
- + sh*-superh-elf) if test x$with_libgloss != xno; then
- + with_libgloss=yes
- + tm_file="${tm_file} sh/newlib.h"
- + fi
- + tm_file="${tm_file} sh/embed-elf.h sh/superh.h"
- + tmake_file="${tmake_file} sh/t-superh"
- + extra_options="${extra_options} sh/superh.opt" ;;
- + *) if test x$with_newlib = xyes \
- + && test x$with_libgloss = xyes; then
- + tm_file="${tm_file} sh/newlib.h"
- + fi
- + tm_file="${tm_file} sh/embed-elf.h" ;;
- + esac
- + case ${target} in
- + sh5*-*-netbsd*)
- + # SHmedia, 32-bit ABI
- + tmake_file="${tmake_file} sh/t-sh64 sh/t-netbsd"
- + ;;
- + sh64*-netbsd*)
- + # SHmedia, 64-bit ABI
- + tmake_file="${tmake_file} sh/t-sh64 sh/t-netbsd sh/t-netbsd-sh5-64"
- + ;;
- + *-*-netbsd)
- + tmake_file="${tmake_file} sh/t-netbsd"
- + ;;
- + sh64*-*-linux*)
- + tmake_file="${tmake_file} sh/t-sh64 sh/t-linux64"
- + tm_file="${tm_file} sh/sh64.h"
- + extra_headers="shmedia.h ushmedia.h sshmedia.h"
- + ;;
- + sh64*)
- + tmake_file="${tmake_file} sh/t-sh64"
- + tm_file="${tm_file} sh/sh64.h"
- + extra_headers="shmedia.h ushmedia.h sshmedia.h"
- + ;;
- + *-*-symbianelf*)
- + tmake_file="sh/t-symbian"
- + tm_file="sh/symbian-pre.h sh/little.h ${tm_file} sh/symbian-post.h"
- + extra_objs="symbian.o"
- + extra_parts="crt1.o crti.o crtn.o crtbegin.o crtend.o crtbeginS.o crtendS.o"
- + ;;
- + esac
- + # sed el/eb endian suffixes away to avoid confusion with sh[23]e
- + case `echo ${target} | sed 's/e[lb]-/-/'` in
- + sh64*-*-netbsd*) sh_cpu_target=sh5-64media ;;
- + sh64* | sh5*-*-netbsd*) sh_cpu_target=sh5-32media ;;
- + sh4a_single_only*) sh_cpu_target=sh4a-single-only ;;
- + sh4a_single*) sh_cpu_target=sh4a-single ;;
- + sh4a_nofpu*) sh_cpu_target=sh4a-nofpu ;;
- + sh4al) sh_cpu_target=sh4al ;;
- + sh4a*) sh_cpu_target=sh4a ;;
- + sh4_single_only*) sh_cpu_target=sh4-single-only ;;
- + sh4_single*) sh_cpu_target=sh4-single ;;
- + sh4_nofpu*) sh_cpu_target=sh4-nofpu ;;
- + sh4* | sh-superh-*) sh_cpu_target=sh4 ;;
- + sh3e*) sh_cpu_target=sh3e ;;
- + sh*-*-netbsd* | sh3*) sh_cpu_target=sh3 ;;
- + sh2a_single_only*) sh_cpu_target=sh2a-single-only ;;
- + sh2a_single*) sh_cpu_target=sh2a-single ;;
- + sh2a_nofpu*) sh_cpu_target=sh2a-nofpu ;;
- + sh2a*) sh_cpu_target=sh2a ;;
- + sh2e*) sh_cpu_target=sh2e ;;
- + sh2*) sh_cpu_target=sh2 ;;
- + *) sh_cpu_target=sh1 ;;
- + esac
- + # did the user say --without-fp ?
- + if test x$with_fp = xno; then
- + case ${sh_cpu_target} in
- + sh5-*media) sh_cpu_target=${sh_cpu_target}-nofpu ;;
- + sh4al | sh1) ;;
- + sh4a* ) sh_cpu_target=sh4a-nofpu ;;
- + sh4*) sh_cpu_target=sh4-nofpu ;;
- + sh3*) sh_cpu_target=sh3 ;;
- + sh2a*) sh_cpu_target=sh2a-nofpu ;;
- + sh2*) sh_cpu_target=sh2 ;;
- + *) echo --without-fp not available for $target: ignored
- + esac
- + tm_defines="$tm_defines STRICT_NOFPU=1"
- + fi
- + sh_cpu_default="`echo $with_cpu|sed s/^m/sh/|tr A-Z_ a-z-`"
- + case $sh_cpu_default in
- + sh5-64media-nofpu | sh5-64media | \
- + sh5-32media-nofpu | sh5-32media | sh5-compact-nofpu | sh5-compact | \
- + sh2a-single-only | sh2a-single | sh2a-nofpu | sh2a | \
- + sh4a-single-only | sh4a-single | sh4a-nofpu | sh4a | sh4al | \
- + sh4-single-only | sh4-single | sh4-nofpu | sh4 | sh4-300 | \
- + sh3e | sh3 | sh2e | sh2 | sh1) ;;
- + "") sh_cpu_default=${sh_cpu_target} ;;
- + *) echo "with_cpu=$with_cpu not supported"; exit 1 ;;
- + esac
- + sh_multilibs=${with_multilib_list}
- + if test x${sh_multilibs} = x ; then
- + case ${target} in
- + sh64-superh-linux* | \
- + sh[1234]*) sh_multilibs=${sh_cpu_target} ;;
- + sh64* | sh5*) sh_multilibs=m5-32media,m5-32media-nofpu,m5-compact,m5-compact-nofpu,m5-64media,m5-64media-nofpu ;;
- + sh-superh-*) sh_multilibs=m4,m4-single,m4-single-only,m4-nofpu ;;
- + sh*-*-linux*) sh_multilibs=m1,m3e,m4 ;;
- + sh*-*-netbsd*) sh_multilibs=m3,m3e,m4 ;;
- + *) sh_multilibs=m1,m2,m2e,m4,m4-single,m4-single-only,m2a,m2a-single ;;
- + esac
- + if test x$with_fp = xno; then
- + sh_multilibs="`echo $sh_multilibs|sed -e s/m4/sh4-nofpu/ -e s/,m4-[^,]*//g -e s/,m[23]e// -e s/m2a,m2a-single/m2a-nofpu/ -e s/m5-..m....,//g`"
- + fi
- + fi
- + target_cpu_default=SELECT_`echo ${sh_cpu_default}|tr abcdefghijklmnopqrstuvwxyz- ABCDEFGHIJKLMNOPQRSTUVWXYZ_`
- + tm_defines=${tm_defines}' SH_MULTILIB_CPU_DEFAULT=\"'`echo $sh_cpu_default|sed s/sh/m/`'\"'
- + sh_multilibs=`echo $sh_multilibs,$sh_cpu_default | sed -e 's/[ ,/][ ,]*/ /g' -e 's/ $//' -e 's/^m/sh/' -e 's/ m/ sh/g' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ_ abcdefghijklmnopqrstuvwxyz-`
- + for sh_multilib in ${sh_multilibs}; do
- + case ${sh_multilib} in
- + sh1 | sh2 | sh2e | sh3 | sh3e | \
- + sh4 | sh4-single | sh4-single-only | sh4-nofpu | sh4-300 |\
- + sh4a | sh4a-single | sh4a-single-only | sh4a-nofpu | sh4al | \
- + sh2a | sh2a-single | sh2a-single-only | sh2a-nofpu | \
- + sh5-64media | sh5-64media-nofpu | \
- + sh5-32media | sh5-32media-nofpu | \
- + sh5-compact | sh5-compact-nofpu)
- + tmake_file="${tmake_file} sh/t-mlib-${sh_multilib}"
- + tm_defines="$tm_defines SUPPORT_`echo $sh_multilib|tr abcdefghijklmnopqrstuvwxyz- ABCDEFGHIJKLMNOPQRSTUVWXYZ_`=1"
- + ;;
- + *)
- + echo "with_multilib_list=${sh_multilib} not supported."
- + exit 1
- + ;;
- + esac
- + done
- + if test x${enable_incomplete_targets} = xyes ; then
- + tm_defines="$tm_defines SUPPORT_SH1=1 SUPPORT_SH2E=1 SUPPORT_SH4=1 SUPPORT_SH4_SINGLE=1 SUPPORT_SH2A=1 SUPPORT_SH2A_SINGLE=1 SUPPORT_SH5_32MEDIA=1 SUPPORT_SH5_32MEDIA_NOFPU=1 SUPPORT_SH5_64MEDIA=1 SUPPORT_SH5_64MEDIA_NOFPU=1"
- + fi
- + ;;
- +sh-*-rtems*)
- + tmake_file="sh/t-sh sh/t-elf t-rtems sh/t-rtems"
- + tm_file="${tm_file} dbxelf.h elfos.h svr4.h sh/elf.h sh/embed-elf.h sh/rtemself.h rtems.h"
- + ;;
- +sh-wrs-vxworks)
- + tmake_file="$tmake_file sh/t-sh sh/t-elf sh/t-vxworks"
- + tm_file="${tm_file} elfos.h svr4.h sh/elf.h sh/embed-elf.h vx-common.h vxworks.h sh/vxworks.h"
- + ;;
- +sh-*-*)
- + tm_file="${tm_file} dbxcoff.h sh/coff.h"
- + ;;
- +sparc-*-netbsdelf*)
- + tm_file="${tm_file} dbxelf.h elfos.h svr4.h sparc/sysv4.h netbsd.h netbsd-elf.h sparc/netbsd-elf.h"
- + extra_options="${extra_options} sparc/long-double-switch.opt"
- + ;;
- +sparc64-*-openbsd*)
- + tm_file="sparc/openbsd1-64.h ${tm_file} dbxelf.h elfos.h svr4.h sparc/sysv4.h sparc/sp64-elf.h openbsd.h sparc/openbsd64.h"
- + extra_options="${extra_options} sparc/little-endian.opt"
- + gas=yes gnu_ld=yes
- + with_cpu=ultrasparc
- + ;;
- +sparc-*-elf*)
- + tm_file="${tm_file} dbxelf.h elfos.h svr4.h sparc/sysv4.h sparc/sp-elf.h"
- + tmake_file="sparc/t-elf sparc/t-crtfm"
- + extra_parts="crti.o crtn.o crtbegin.o crtend.o"
- + ;;
- +sparc-*-linux*) # SPARC's running GNU/Linux, libc6
- + tm_file="${tm_file} dbxelf.h elfos.h svr4.h sparc/sysv4.h sparc/gas.h linux.h"
- + extra_options="${extra_options} sparc/long-double-switch.opt"
- + tmake_file="${tmake_file} sparc/t-linux"
- + if test x$enable_targets = xall; then
- + tm_file="sparc/biarch64.h ${tm_file} sparc/linux64.h"
- + tmake_file="${tmake_file} sparc/t-linux64"
- + else
- + tm_file="${tm_file} sparc/linux.h"
- + fi
- + tmake_file="${tmake_file} sparc/t-crtfm"
- + ;;
- +sparc-*-rtems*)
- + tm_file="${tm_file} dbxelf.h elfos.h svr4.h sparc/sysv4.h sparc/sp-elf.h sparc/rtemself.h rtems.h"
- + tmake_file="sparc/t-elf sparc/t-crtfm t-rtems"
- + extra_parts="crti.o crtn.o crtbegin.o crtend.o"
- + ;;
- +sparc64-*-solaris2* | sparcv9-*-solaris2*)
- + tm_file="sparc/biarch64.h ${tm_file} dbxelf.h elfos.h svr4.h sparc/sysv4.h sol2.h"
- + case ${target} in
- + *-*-solaris2.1[0-9]*)
- + tm_file="${tm_file} sol2-10.h"
- + ;;
- + esac
- + tm_file="${tm_file} sparc/sol2.h sparc/sol2-64.h sparc/sol2-bi.h"
- + if test x$gnu_ld = xyes; then
- + tm_file="${tm_file} sol2-gld.h sparc/sol2-gld-bi.h"
- + fi
- + if test x$gas = xyes; then
- + tm_file="${tm_file} sparc/sol2-gas.h sparc/sol2-gas-bi.h"
- + fi
- + tm_file="${tm_file} tm-dwarf2.h"
- + tmake_file="t-sol2 sparc/t-sol2 sparc/t-sol2-64 sparc/t-crtfm"
- + if test x$gnu_ld = xyes; then
- + tmake_file="$tmake_file t-slibgcc-elf-ver"
- + else
- + tmake_file="$tmake_file t-slibgcc-sld"
- + fi
- + if test x$gas = xyes; then
- + tm_file="usegas.h ${tm_file}"
- + fi
- + c_target_objs="sol2-c.o"
- + cxx_target_objs="sol2-c.o"
- + extra_objs="sol2.o"
- + tm_p_file="${tm_p_file} sol2-protos.h"
- + extra_parts="crt1.o crti.o crtn.o gcrt1.o crtbegin.o crtend.o"
- + case ${enable_threads}:${have_pthread_h}:${have_thread_h} in
- + "":yes:* | yes:yes:* ) thread_file=posix ;;
- + "":*:yes | yes:*:yes ) thread_file=solaris ;;
- + esac
- + ;;
- +sparc-*-solaris2*)
- + tm_file="${tm_file} dbxelf.h elfos.h svr4.h sparc/sysv4.h sol2.h"
- + case ${target} in
- + *-*-solaris2.1[0-9]*)
- + tm_file="${tm_file} sol2-10.h"
- + ;;
- + esac
- + tm_file="${tm_file} sparc/sol2.h"
- + if test x$gnu_ld = xyes; then
- + tm_file="${tm_file} sol2-gld.h"
- + fi
- + if test x$gas = xyes; then
- + tm_file="${tm_file} sparc/sol2-gas.h"
- + fi
- + tmake_file="t-sol2 sparc/t-sol2 sparc/t-crtfm"
- + if test x$gnu_ld = xyes; then
- + tmake_file="$tmake_file t-slibgcc-elf-ver"
- + else
- + tmake_file="$tmake_file t-slibgcc-sld"
- + fi
- + tm_file="sparc/biarch64.h ${tm_file} sparc/sol2-bi.h"
- + if test x$gnu_ld = xyes; then
- + tm_file="${tm_file} sparc/sol2-gld-bi.h"
- + fi
- + if test x$gas = xyes; then
- + tm_file="${tm_file} sparc/sol2-gas-bi.h"
- + fi
- + if test x$gas = xyes; then
- + tm_file="usegas.h ${tm_file}"
- + fi
- + tm_file="${tm_file} tm-dwarf2.h"
- + tmake_file="$tmake_file sparc/t-sol2-64"
- + test x$with_cpu != x || with_cpu=v9
- + c_target_objs="sol2-c.o"
- + cxx_target_objs="sol2-c.o"
- + extra_objs="sol2.o"
- + tm_p_file="${tm_p_file} sol2-protos.h"
- + extra_parts="crt1.o crti.o crtn.o gcrt1.o gmon.o crtbegin.o crtend.o"
- + case ${enable_threads}:${have_pthread_h}:${have_thread_h} in
- + "":yes:* | yes:yes:* )
- + thread_file=posix
- + ;;
- + "":*:yes | yes:*:yes )
- + thread_file=solaris
- + ;;
- + esac
- + ;;
- +sparc-wrs-vxworks)
- + tm_file="${tm_file} elfos.h svr4.h sparc/sysv4.h vx-common.h vxworks.h sparc/vxworks.h"
- + tmake_file="${tmake_file} sparc/t-vxworks"
- + ;;
- +sparc64-*-elf*)
- + tm_file="${tm_file} dbxelf.h elfos.h svr4.h sparc/sysv4.h sparc/sp64-elf.h"
- + extra_options="${extra_options} sparc/little-endian.opt"
- + tmake_file="${tmake_file} sparc/t-crtfm"
- + extra_parts="crtbegin.o crtend.o"
- + ;;
- +sparc64-*-freebsd*|ultrasparc-*-freebsd*)
- + tm_file="${tm_file} ${fbsd_tm_file} dbxelf.h elfos.h sparc/sysv4.h sparc/freebsd.h"
- + extra_options="${extra_options} sparc/long-double-switch.opt"
- + tmake_file="${tmake_file} sparc/t-crtfm"
- + case "x$with_cpu" in
- + xultrasparc) ;;
- + x) with_cpu=ultrasparc ;;
- + *) echo "$with_cpu not supported for freebsd target"; exit 1 ;;
- + esac
- + ;;
- +sparc64-*-linux*) # 64-bit SPARC's running GNU/Linux
- + tm_file="sparc/biarch64.h ${tm_file} dbxelf.h elfos.h svr4.h sparc/sysv4.h sparc/gas.h linux.h sparc/linux64.h"
- + extra_options="${extra_options} sparc/long-double-switch.opt"
- + tmake_file="${tmake_file} sparc/t-linux sparc/t-linux64 sparc/t-crtfm"
- + ;;
- +sparc64-*-netbsd*)
- + tm_file="sparc/biarch64.h ${tm_file}"
- + tm_file="${tm_file} dbxelf.h elfos.h svr4.h sparc/sysv4.h netbsd.h netbsd-elf.h sparc/netbsd-elf.h"
- + extra_options="${extra_options} sparc/long-double-switch.opt"
- + tmake_file="${tmake_file} sparc/t-netbsd64"
- + ;;
- +spu-*-elf*)
- + tm_file="dbxelf.h elfos.h spu/spu-elf.h spu/spu.h"
- + tmake_file="spu/t-spu-elf"
- + extra_headers="spu_intrinsics.h spu_internals.h vmx2spu.h spu_mfcio.h vec_types.h"
- + extra_modes=spu/spu-modes.def
- + c_target_objs="${c_target_objs} spu-c.o"
- + cxx_target_objs="${cxx_target_objs} spu-c.o"
- + ;;
- +v850e1-*-*)
- + target_cpu_default="TARGET_CPU_v850e1"
- + tm_file="dbxelf.h elfos.h svr4.h v850/v850.h"
- + tm_p_file=v850/v850-protos.h
- + tmake_file=v850/t-v850e
- + md_file=v850/v850.md
- + out_file=v850/v850.c
- + extra_options="${extra_options} v850/v850.opt"
- + if test x$stabs = xyes
- + then
- + tm_file="${tm_file} dbx.h"
- + fi
- + use_collect2=no
- + c_target_objs="v850-c.o"
- + cxx_target_objs="v850-c.o"
- + ;;
- +v850e-*-*)
- + target_cpu_default="TARGET_CPU_v850e"
- + tm_file="dbxelf.h elfos.h svr4.h v850/v850.h"
- + tm_p_file=v850/v850-protos.h
- + tmake_file=v850/t-v850e
- + md_file=v850/v850.md
- + out_file=v850/v850.c
- + extra_options="${extra_options} v850/v850.opt"
- + if test x$stabs = xyes
- + then
- + tm_file="${tm_file} dbx.h"
- + fi
- + use_collect2=no
- + c_target_objs="v850-c.o"
- + cxx_target_objs="v850-c.o"
- + ;;
- +v850-*-*)
- + target_cpu_default="TARGET_CPU_generic"
- + tm_file="dbxelf.h elfos.h svr4.h ${tm_file}"
- + tmake_file=v850/t-v850
- + if test x$stabs = xyes
- + then
- + tm_file="${tm_file} dbx.h"
- + fi
- + use_collect2=no
- + c_target_objs="v850-c.o"
- + cxx_target_objs="v850-c.o"
- + ;;
- +vax-*-netbsdelf*)
- + tm_file="${tm_file} elfos.h netbsd.h netbsd-elf.h vax/elf.h vax/netbsd-elf.h"
- + ;;
- +vax-*-netbsd*)
- + tm_file="${tm_file} netbsd.h netbsd-aout.h vax/netbsd.h"
- + tmake_file=t-netbsd
- + extra_parts=""
- + use_collect2=yes
- + ;;
- +vax-*-openbsd*)
- + tm_file="vax/vax.h vax/openbsd1.h openbsd.h vax/openbsd.h"
- + use_collect2=yes
- + ;;
- +xstormy16-*-elf)
- + # For historical reasons, the target files omit the 'x'.
- + tm_file="dbxelf.h elfos.h svr4.h stormy16/stormy16.h"
- + tm_p_file=stormy16/stormy16-protos.h
- + md_file=stormy16/stormy16.md
- + out_file=stormy16/stormy16.c
- + extra_options=stormy16/stormy16.opt
- + tmake_file="stormy16/t-stormy16"
- + extra_parts="crtbegin.o crtend.o"
- + ;;
- +xtensa*-*-elf*)
- + tm_file="${tm_file} dbxelf.h elfos.h svr4.h xtensa/elf.h"
- + tmake_file="xtensa/t-xtensa xtensa/t-elf"
- + ;;
- +xtensa*-*-linux*)
- + tm_file="${tm_file} dbxelf.h elfos.h svr4.h linux.h xtensa/linux.h"
- + tmake_file="${tmake_file} xtensa/t-xtensa xtensa/t-linux"
- + ;;
- +am33_2.0-*-linux*)
- + tm_file="mn10300/mn10300.h dbxelf.h elfos.h linux.h mn10300/linux.h"
- + tmake_file="${tmake_file} mn10300/t-linux"
- + gas=yes gnu_ld=yes
- + extra_parts="crtbegin.o crtend.o crtbeginS.o crtendS.o"
- + use_collect2=no
- + ;;
- +m32c-*-rtems*)
- + tm_file="dbxelf.h elfos.h svr4.h ${tm_file} m32c/rtems.h rtems.h"
- + tmake_file="${tmake_file} t-rtems"
- + c_target_objs="m32c-pragma.o"
- + cxx_target_objs="m32c-pragma.o"
- + ;;
- +m32c-*-elf*)
- + tm_file="dbxelf.h elfos.h svr4.h ${tm_file}"
- + c_target_objs="m32c-pragma.o"
- + cxx_target_objs="m32c-pragma.o"
- + ;;
- +*)
- + echo "*** Configuration ${target} not supported" 1>&2
- + exit 1
- + ;;
- +esac
- +
- +case ${target} in
- +i[34567]86-*-linux* | x86_64-*-linux*)
- + tmake_file="${tmake_file} i386/t-pmm_malloc i386/t-i386"
- + ;;
- +i[34567]86-*-* | x86_64-*-*)
- + tmake_file="${tmake_file} i386/t-gmm_malloc i386/t-i386"
- + ;;
- +esac
- +
- +# Support for --with-cpu and related options (and a few unrelated options,
- +# too).
- +case ${with_cpu} in
- + yes | no)
- + echo "--with-cpu must be passed a value" 1>&2
- + exit 1
- + ;;
- +esac
- +
- +# If there is no $with_cpu option, try to infer one from ${target}.
- +# This block sets nothing except for with_cpu.
- +if test x$with_cpu = x ; then
- + case ${target} in
- + i386-*-*)
- + with_cpu=i386
- + ;;
- + i486-*-*)
- + with_cpu=i486
- + ;;
- + i586-*-*)
- + case ${target_noncanonical} in
- + k6_2-*)
- + with_cpu=k6-2
- + ;;
- + k6_3-*)
- + with_cpu=k6-3
- + ;;
- + k6-*)
- + with_cpu=k6
- + ;;
- + pentium_mmx-*|winchip_c6-*|winchip2-*|c3-*)
- + with_cpu=pentium-mmx
- + ;;
- + *)
- + with_cpu=pentium
- + ;;
- + esac
- + ;;
- + i686-*-* | i786-*-*)
- + case ${target_noncanonical} in
- + amdfam10-*|barcelona-*)
- + with_cpu=amdfam10
- + ;;
- + k8_sse3-*|opteron_sse3-*|athlon64_sse3-*)
- + with_cpu=k8-sse3
- + ;;
- + k8-*|opteron-*|athlon64-*|athlon_fx-*)
- + with_cpu=k8
- + ;;
- + athlon_xp-*|athlon_mp-*|athlon_4-*)
- + with_cpu=athlon-4
- + ;;
- + athlon_tbird-*|athlon-*)
- + with_cpu=athlon
- + ;;
- + geode-*)
- + with_cpu=geode
- + ;;
- + pentium2-*)
- + with_cpu=pentium2
- + ;;
- + pentium3-*|pentium3m-*)
- + with_cpu=pentium3
- + ;;
- + pentium4-*|pentium4m-*)
- + with_cpu=pentium4
- + ;;
- + prescott-*)
- + with_cpu=prescott
- + ;;
- + nocona-*)
- + with_cpu=nocona
- + ;;
- + core2-*)
- + with_cpu=core2
- + ;;
- + pentium_m-*)
- + with_cpu=pentium-m
- + ;;
- + pentiumpro-*)
- + with_cpu=pentiumpro
- + ;;
- + *)
- + with_cpu=generic
- + ;;
- + esac
- + ;;
- + x86_64-*-*)
- + case ${target_noncanonical} in
- + amdfam10-*|barcelona-*)
- + with_cpu=amdfam10
- + ;;
- + k8_sse3-*|opteron_sse3-*|athlon64_sse3-*)
- + with_cpu=k8-sse3
- + ;;
- + k8-*|opteron-*|athlon64-*|athlon_fx-*)
- + with_cpu=k8
- + ;;
- + nocona-*)
- + with_cpu=nocona
- + ;;
- + core2-*)
- + with_cpu=core2
- + ;;
- + *)
- + with_cpu=generic
- + ;;
- + esac
- + ;;
- + alphaev6[78]*-*-*)
- + with_cpu=ev67
- + ;;
- + alphaev6*-*-*)
- + with_cpu=ev6
- + ;;
- + alphapca56*-*-*)
- + with_cpu=pca56
- + ;;
- + alphaev56*-*-*)
- + with_cpu=ev56
- + ;;
- + alphaev5*-*-*)
- + with_cpu=ev5
- + ;;
- + frv-*-*linux* | frv400-*-*linux*)
- + with_cpu=fr400
- + ;;
- + frv550-*-*linux*)
- + with_cpu=fr550
- + ;;
- + m68k*-*-*)
- + case "$with_arch" in
- + "cf")
- + with_cpu=${default_cf_cpu}
- + ;;
- + "" | "m68k")
- + with_cpu=m${default_m68k_cpu}
- + ;;
- + esac
- + ;;
- + mips*-*-vxworks)
- + with_arch=mips2
- + ;;
- + sparc*-*-*)
- + with_cpu="`echo ${target} | sed 's/-.*$//'`"
- + ;;
- + esac
- +
- + # Avoid overriding --with-cpu-32 and --with-cpu-64 values.
- + case ${target} in
- + i[34567]86-*-*|x86_64-*-*)
- + if test x$with_cpu != x; then
- + if test x$with_cpu_32 != x || test x$with_cpu_64 != x; then
- + if test x$with_cpu_32 = x; then
- + with_cpu_32=$with_cpu
- + fi
- + if test x$with_cpu_64 = x; then
- + with_cpu_64=$with_cpu
- + fi
- + with_cpu=
- + fi
- + fi
- + ;;
- + esac
- +fi
- +
- +# Similarly for --with-schedule.
- +if test x$with_schedule = x; then
- + case ${target} in
- + hppa1*)
- + # Override default PA8000 scheduling model.
- + with_schedule=7100LC
- + ;;
- + esac
- +fi
- +
- +# Validate and mark as valid any --with options supported
- +# by this target. In order to use a particular --with option
- +# you must list it in supported_defaults; validating the value
- +# is optional. This case statement should set nothing besides
- +# supported_defaults.
- +
- +supported_defaults=
- +case "${target}" in
- + alpha*-*-*)
- + supported_defaults="cpu tune"
- + for which in cpu tune; do
- + eval "val=\$with_$which"
- + case "$val" in
- + "" \
- + | ev4 | ev45 | 21064 | ev5 | 21164 | ev56 | 21164a \
- + | pca56 | 21164PC | 21164pc | ev6 | 21264 | ev67 \
- + | 21264a)
- + ;;
- + *)
- + echo "Unknown CPU used in --with-$which=$val" 1>&2
- + exit 1
- + ;;
- + esac
- + done
- + ;;
- +
- + arm*-*-*)
- + supported_defaults="arch cpu float tune fpu abi mode"
- + for which in cpu tune; do
- + # See if it matches any of the entries in arm-cores.def
- + eval "val=\$with_$which"
- + if [ x"$val" = x ] \
- + || grep "^ARM_CORE(\"$val\"," \
- + ${srcdir}/config/arm/arm-cores.def \
- + > /dev/null; then
- + # Ok
- + new_val=`grep "^ARM_CORE(\"$val\"," \
- + ${srcdir}/config/arm/arm-cores.def | \
- + sed -e 's/^[^,]*,[ ]*//' | \
- + sed -e 's/,.*$//'`
- + eval "target_${which}_cname=$new_val"
- + echo "For $val real value is $new_val"
- + true
- + else
- + echo "Unknown CPU used in --with-$which=$val" 1>&2
- + exit 1
- + fi
- + done
- +
- + case "$with_arch" in
- + "" \
- + | armv[23456] | armv2a | armv3m | armv4t | armv5t \
- + | armv5te | armv6j |armv6k | armv6z | armv6zk | armv6-m \
- + | armv7 | armv7-a | armv7-r | armv7-m \
- + | iwmmxt | ep9312)
- + # OK
- + ;;
- + *)
- + echo "Unknown arch used in --with-arch=$with_arch" 1>&2
- + exit 1
- + ;;
- + esac
- +
- + case "$with_float" in
- + "" \
- + | soft | hard | softfp)
- + # OK
- + ;;
- + *)
- + echo "Unknown floating point type used in --with-float=$with_float" 1>&2
- + exit 1
- + ;;
- + esac
- +
- + case "$with_fpu" in
- + "" \
- + | fpa | fpe2 | fpe3 | maverick | vfp | vfp3 | vfpv3 | vfpv3-d16 | neon )
- + # OK
- + ;;
- + *)
- + echo "Unknown fpu used in --with-fpu=$with_fpu" 2>&1
- + exit 1
- + ;;
- + esac
- +
- + case "$with_abi" in
- + "" \
- + | apcs-gnu | atpcs | aapcs | iwmmxt | aapcs-linux )
- + #OK
- + ;;
- + *)
- + echo "Unknown ABI used in --with-abi=$with_abi"
- + exit 1
- + ;;
- + esac
- +
- + case "$with_mode" in
- + "" \
- + | arm | thumb )
- + #OK
- + ;;
- + *)
- + echo "Unknown mode used in --with-mode=$with_mode"
- + exit 1
- + ;;
- + esac
- +
- + if test "x$with_arch" != x && test "x$with_cpu" != x; then
- + echo "Warning: --with-arch overrides --with-cpu=$with_cpu" 1>&2
- + fi
- + ;;
- +
- + fr*-*-*linux*)
- + supported_defaults=cpu
- + case "$with_cpu" in
- + fr400) ;;
- + fr550) ;;
- + *)
- + echo "Unknown cpu used in --with-cpu=$with_cpu" 1>&2
- + exit 1
- + ;;
- + esac
- + ;;
- +
- + fido-*-* | m68k*-*-*)
- + supported_defaults="arch cpu"
- + case "$with_arch" in
- + "" | "m68k"| "cf")
- + m68k_arch_family="$with_arch"
- + ;;
- + *)
- + echo "Invalid --with-arch=$with_arch" 1>&2
- + exit 1
- + ;;
- + esac
- +
- + # We always have a $with_cpu setting here.
- + case "$with_cpu" in
- + "m68000" | "m68010" | "m68020" | "m68030" | "m68040" | "m68060")
- + m68k_cpu_ident=$with_cpu
- + ;;
- + "m68020-40")
- + m68k_cpu_ident=m68020
- + tm_defines="$tm_defines M68K_DEFAULT_TUNE=u68020_40"
- + ;;
- + "m68020-60")
- + m68k_cpu_ident=m68020
- + tm_defines="$tm_defines M68K_DEFAULT_TUNE=u68020_60"
- + ;;
- + *)
- + # We need the C identifier rather than the string.
- + m68k_cpu_ident=`awk -v arg="\"$with_cpu\"" \
- + 'BEGIN { FS="[ \t]*[,()][ \t]*" }; \
- + $1 == "M68K_DEVICE" && $2 == arg { print $3 }' \
- + ${srcdir}/config/m68k/m68k-devices.def`
- + if [ x"$m68k_cpu_ident" = x ] ; then
- + echo "Unknown CPU used in --with-cpu=$with_cpu" 1>&2
- + exit 1
- + fi
- + with_cpu="mcpu=$with_cpu"
- + ;;
- + esac
- + ;;
- +
- + hppa*-*-*)
- + supported_defaults="arch schedule"
- +
- + case "$with_arch" in
- + "" | 1.0 | 1.1 | 2.0)
- + # OK
- + ;;
- + *)
- + echo "Unknown architecture used in --with-arch=$with_arch" 1>&2
- + exit 1
- + ;;
- + esac
- +
- + case "$with_schedule" in
- + "" | 700 | 7100 | 7100LC | 7200 | 7300 | 8000)
- + # OK
- + ;;
- + *)
- + echo "Unknown processor used in --with-schedule=$with_schedule." 1>&2
- + exit 1
- + ;;
- + esac
- + ;;
- +
- + i[34567]86-*-* | x86_64-*-*)
- + supported_defaults="arch arch_32 arch_64 cpu cpu_32 cpu_64 tune tune_32 tune_64"
- + for which in arch arch_32 arch_64 cpu cpu_32 cpu_64 tune tune_32 tune_64; do
- + eval "val=\$with_$which"
- + case ${val} in
- + i386 | i486 \
- + | i586 | pentium | pentium-mmx | winchip-c6 | winchip2 \
- + | c3 | c3-2 | i686 | pentiumpro | pentium2 | pentium3 \
- + | pentium4 | k6 | k6-2 | k6-3 | athlon | athlon-tbird \
- + | athlon-4 | athlon-xp | athlon-mp | geode \
- + | prescott | pentium-m | pentium4m | pentium3m)
- + case "${target}" in
- + x86_64-*-*)
- + case "x$which" in
- + *_32)
- + ;;
- + *)
- + echo "CPU given in --with-$which=$val doesn't support 64bit mode." 1>&2
- + exit 1
- + ;;
- + esac
- + ;;
- + esac
- + # OK
- + ;;
- + "" | amdfam10 | barcelona | k8-sse3 | opteron-sse3 | athlon64-sse3 | k8 | opteron | athlon64 | athlon-fx | nocona | core2 | generic)
- + # OK
- + ;;
- + *)
- + echo "Unknown CPU given in --with-$which=$val." 1>&2
- + exit 1
- + ;;
- + esac
- + done
- + ;;
- +
- + mips*-*-*)
- + supported_defaults="abi arch float tune divide llsc mips-plt"
- +
- + case ${with_float} in
- + "" | soft | hard)
- + # OK
- + ;;
- + *)
- + echo "Unknown floating point type used in --with-float=$with_float" 1>&2
- + exit 1
- + ;;
- + esac
- +
- + case ${with_abi} in
- + "" | 32 | o64 | n32 | 64 | eabi)
- + # OK
- + ;;
- + *)
- + echo "Unknown ABI used in --with-abi=$with_abi" 1>&2
- + exit 1
- + ;;
- + esac
- +
- + case ${with_divide} in
- + "" | breaks | traps)
- + # OK
- + ;;
- + *)
- + echo "Unknown division check type use in --with-divide=$with_divide" 1>&2
- + exit 1
- + ;;
- + esac
- +
- + case ${with_llsc} in
- + yes)
- + with_llsc=llsc
- + ;;
- + no)
- + with_llsc="no-llsc"
- + ;;
- + "")
- + # OK
- + ;;
- + *)
- + echo "Unknown llsc type used in --with-llsc" 1>&2
- + exit 1
- + ;;
- + esac
- +
- + case ${with_mips_plt} in
- + yes)
- + with_mips_plt=plt
- + ;;
- + no)
- + with_mips_plt=no-plt
- + ;;
- + "")
- + ;;
- + *)
- + echo "Unknown --with-mips-plt argument: $with_mips_plt" 1>&2
- + exit 1
- + ;;
- + esac
- + ;;
- +
- + powerpc*-*-* | rs6000-*-*)
- + supported_defaults="cpu float tune"
- +
- + for which in cpu tune; do
- + eval "val=\$with_$which"
- + case ${val} in
- + default32 | default64)
- + with_which="with_$which"
- + eval $with_which=
- + ;;
- + 405cr)
- + tm_defines="${tm_defines} CONFIG_PPC405CR"
- + eval "with_$which=405"
- + ;;
- + "" | common \
- + | power | power[234567] | power6x | powerpc | powerpc64 \
- + | rios | rios1 | rios2 | rsc | rsc1 | rs64a \
- + | 401 | 403 | 405 | 405fp | 440 | 440fp | 464 | 464fp \
- + | 505 | 601 | 602 | 603 | 603e | ec603e | 604 \
- + | 604e | 620 | 630 | 740 | 750 | 7400 | 7450 \
- + | e300c[23] | 854[08] | e500mc \
- + | 801 | 821 | 823 | 860 | 970 | G3 | G4 | G5 | cell)
- + # OK
- + ;;
- + *)
- + echo "Unknown cpu used in --with-$which=$val." 1>&2
- + exit 1
- + ;;
- + esac
- + done
- + ;;
- +
- + s390*-*-*)
- + supported_defaults="arch mode tune"
- +
- + for which in arch tune; do
- + eval "val=\$with_$which"
- + case ${val} in
- + "" | g5 | g6 | z900 | z990 | z9-109 | z9-ec | z10)
- + # OK
- + ;;
- + *)
- + echo "Unknown cpu used in --with-$which=$val." 1>&2
- + exit 1
- + ;;
- + esac
- + done
- +
- + case ${with_mode} in
- + "" | esa | zarch)
- + # OK
- + ;;
- + *)
- + echo "Unknown architecture mode used in --with-mode=$with_mode." 1>&2
- + exit 1
- + ;;
- + esac
- + ;;
- +
- + sh[123456ble]-*-* | sh-*-*)
- + supported_defaults="cpu"
- + case "`echo $with_cpu | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ_ abcdefghijklmnopqrstuvwxyz- | sed s/sh/m/`" in
- + "" | m1 | m2 | m2e | m3 | m3e | m4 | m4-single | m4-single-only | m4-nofpu )
- + # OK
- + ;;
- + m2a | m2a-single | m2a-single-only | m2a-nofpu)
- + ;;
- + m4a | m4a-single | m4a-single-only | m4a-nofpu | m4al)
- + ;;
- + *)
- + echo "Unknown CPU used in --with-cpu=$with_cpu, known values:" 1>&2
- + echo "m1 m2 m2e m3 m3e m4 m4-single m4-single-only m4-nofpu" 1>&2
- + echo "m4a m4a-single m4a-single-only m4a-nofpu m4al" 1>&2
- + echo "m2a m2a-single m2a-single-only m2a-nofpu" 1>&2
- + exit 1
- + ;;
- + esac
- + ;;
- + sparc*-*-*)
- + supported_defaults="cpu float tune"
- +
- + for which in cpu tune; do
- + eval "val=\$with_$which"
- + case ${val} in
- + "" | sparc | sparcv9 | sparc64 | sparc86x \
- + | v7 | cypress | v8 | supersparc | sparclite | f930 \
- + | f934 | hypersparc | sparclite86x | sparclet | tsc701 \
- + | v9 | ultrasparc | ultrasparc3 | niagara | niagara2)
- + # OK
- + ;;
- + *)
- + echo "Unknown cpu used in --with-$which=$val" 1>&2
- + exit 1
- + ;;
- + esac
- + done
- +
- + case ${with_float} in
- + "" | soft | hard)
- + # OK
- + ;;
- + *)
- + echo "Unknown floating point type used in --with-float=$with_float" 1>&2
- + exit 1
- + ;;
- + esac
- + ;;
- +
- + spu-*-*)
- + supported_defaults="arch tune"
- +
- + for which in arch tune; do
- + eval "val=\$with_$which"
- + case ${val} in
- + "" | cell | celledp)
- + # OK
- + ;;
- + *)
- + echo "Unknown cpu used in --with-$which=$val." 1>&2
- + exit 1
- + ;;
- + esac
- + done
- + ;;
- +
- + v850*-*-*)
- + supported_defaults=cpu
- + case ${with_cpu} in
- + "" | v850e | v850e1)
- + # OK
- + ;;
- + *)
- + echo "Unknown cpu used in --with-cpu=$with_cpu" 1>&2
- + exit 1
- + ;;
- + esac
- + ;;
- +esac
- +
- +# Set some miscellaneous flags for particular targets.
- +target_cpu_default2=
- +case ${target} in
- + alpha*-*-*)
- + if test x$gas = xyes
- + then
- + target_cpu_default2="MASK_GAS"
- + fi
- + ;;
- +
- + arm*-*-*)
- + if test x$target_cpu_cname = x
- + then
- + target_cpu_default2=TARGET_CPU_generic
- + else
- + target_cpu_default2=TARGET_CPU_$target_cpu_cname
- + fi
- + ;;
- +
- + hppa*-*-*)
- + target_cpu_default2="MASK_BIG_SWITCH"
- + if test x$gas = xyes
- + then
- + target_cpu_default2="${target_cpu_default2}|MASK_GAS|MASK_JUMP_IN_DELAY"
- + fi
- + ;;
- +
- + fido*-*-* | m68k*-*-*)
- + target_cpu_default2=$m68k_cpu_ident
- + if [ x"$m68k_arch_family" != x ]; then
- + tmake_file="m68k/t-$m68k_arch_family $tmake_file"
- + fi
- + ;;
- +
- + i[34567]86-*-darwin* | x86_64-*-darwin*)
- + tmake_file="${tmake_file} i386/t-fprules-softfp soft-fp/t-softfp"
- + ;;
- + i[34567]86-*-linux* | x86_64-*-linux* | i[34567]86-*-kfreebsd*-gnu | x86_64-*-kfreebsd*-gnu)
- + tmake_file="${tmake_file} i386/t-fprules-softfp soft-fp/t-softfp i386/t-linux"
- + ;;
- + ia64*-*-linux*)
- + tmake_file="${tmake_file} ia64/t-fprules-softfp soft-fp/t-softfp"
- + ;;
- +
- + mips*-*-*)
- + if test x$gnu_ld = xyes
- + then
- + target_cpu_default2="MASK_SPLIT_ADDRESSES"
- + fi
- + case ${target} in
- + mips*el-*-*)
- + tm_defines="TARGET_ENDIAN_DEFAULT=0 $tm_defines"
- + ;;
- + esac
- + if test "x$enable_gofast" = xyes
- + then
- + tm_defines="US_SOFTWARE_GOFAST $tm_defines"
- + tmake_file="mips/t-gofast $tmake_file"
- + else
- + tmake_file="mips/t-mips $tmake_file"
- + fi
- + ;;
- +
- + powerpc*-*-* | rs6000-*-*)
- + # FIXME: The PowerPC port uses the value set at compile time,
- + # although it's only cosmetic.
- + if test "x$with_cpu" != x
- + then
- + target_cpu_default2="\\\"$with_cpu\\\""
- + fi
- + out_file=rs6000/rs6000.c
- + c_target_objs="${c_target_objs} rs6000-c.o"
- + cxx_target_objs="${cxx_target_objs} rs6000-c.o"
- + tmake_file="rs6000/t-rs6000 ${tmake_file}"
- +
- + if test x$enable_e500_double = xyes
- + then
- + tm_file="$tm_file rs6000/e500-double.h"
- + fi
- + ;;
- +
- + sh[123456ble]*-*-* | sh-*-*)
- + c_target_objs="${c_target_objs} sh-c.o"
- + cxx_target_objs="${cxx_target_objs} sh-c.o"
- + ;;
- +
- + sparc*-*-*)
- + # Some standard aliases.
- + case x$with_cpu in
- + xsparc)
- + with_cpu=v7
- + ;;
- + xsparcv9 | xsparc64)
- + with_cpu=v9
- + ;;
- + esac
- +
- + # The SPARC port checks this value at compile-time.
- + target_cpu_default2="TARGET_CPU_$with_cpu"
- + ;;
- + v850*-*-*)
- + # FIXME: The v850 is "special" in that it does not support
- + # runtime CPU selection, only --with-cpu.
- + case "x$with_cpu" in
- + x)
- + ;;
- + xv850e)
- + target_cpu_default2="TARGET_CPU_$with_cpu"
- + ;;
- + esac
- + ;;
- +esac
- +
- +t=
- +all_defaults="abi cpu cpu_32 cpu_64 arch arch_32 arch_64 tune tune_32 tune_64 schedule float mode fpu divide llsc mips-plt"
- +for option in $all_defaults
- +do
- + eval "val=\$with_"`echo $option | sed s/-/_/g`
- + if test -n "$val"; then
- + case " $supported_defaults " in
- + *" $option "*)
- + ;;
- + *)
- + echo "This target does not support --with-$option." 2>&1
- + echo "Valid --with options are: $supported_defaults" 2>&1
- + exit 1
- + ;;
- + esac
- +
- + if test "x$t" = x
- + then
- + t="{ \"$option\", \"$val\" }"
- + else
- + t="${t}, { \"$option\", \"$val\" }"
- + fi
- + fi
- +done
- +
- +if test "x$t" = x
- +then
- + configure_default_options="{ { NULL, NULL} }"
- +else
- + configure_default_options="{ ${t} }"
- +fi
- +
- +if test "$target_cpu_default2" != ""
- +then
- + if test "$target_cpu_default" != ""
- + then
- + target_cpu_default="(${target_cpu_default}|${target_cpu_default2})"
- + else
- + target_cpu_default=$target_cpu_default2
- + fi
- +fi
- diff -Nur gcc-4.4.6.orig/gcc/configure.ac gcc-4.4.6/gcc/configure.ac
- --- gcc-4.4.6.orig/gcc/configure.ac 2010-12-13 19:19:43.000000000 +0100
- +++ gcc-4.4.6/gcc/configure.ac 2011-10-22 19:23:08.532581301 +0200
- @@ -2240,10 +2240,9 @@
- as_ver=`$gcc_cv_as --version 2>/dev/null | sed 1q`
- if echo "$as_ver" | grep GNU > /dev/null; then
- changequote(,)dnl
- - as_vers=`echo $as_ver | sed -n \
- - -e 's,^.*[ ]\([0-9][0-9]*\.[0-9][0-9]*.*\)$,\1,p'`
- - as_major=`expr "$as_vers" : '\([0-9]*\)'`
- - as_minor=`expr "$as_vers" : '[0-9]*\.\([0-9]*\)'`
- + as_ver=`echo $as_ver | sed -e 's/GNU assembler\( (GNU Binutils)\)\? \([0-9.][0-9.]*\).*/\2/'`
- + as_major=`echo $as_ver | sed 's/\..*//'`
- + as_minor=`echo $as_ver | sed 's/[^.]*\.\([0-9]*\).*/\1/'`
- changequote([,])dnl
- if test $as_major -eq 2 && test $as_minor -lt 11
- then :
- @@ -3308,7 +3307,7 @@
- i?86*-*-* | mips*-*-* | alpha*-*-* | powerpc*-*-* | sparc*-*-* | m68*-*-* \
- | x86_64*-*-* | hppa*-*-* | arm*-*-* \
- | xstormy16*-*-* | cris-*-* | crisv32-*-* | xtensa*-*-* | bfin-*-* | score*-*-* \
- - | spu-*-* | fido*-*-* | m32c-*-*)
- + | spu-*-* | fido*-*-* | m32c-*-* | avr32-*-*)
- insn="nop"
- ;;
- ia64*-*-* | s390*-*-*)
- diff -Nur gcc-4.4.6.orig/gcc/doc/extend.texi gcc-4.4.6/gcc/doc/extend.texi
- --- gcc-4.4.6.orig/gcc/doc/extend.texi 2011-03-23 22:45:18.000000000 +0100
- +++ gcc-4.4.6/gcc/doc/extend.texi 2011-10-22 19:23:08.532581301 +0200
- @@ -2397,7 +2397,7 @@
-
- @item interrupt
- @cindex interrupt handler functions
- -Use this attribute on the ARM, AVR, CRX, M32C, M32R/D, m68k,
- +Use this attribute on the ARM, AVR, AVR32, CRX, M32C, M32R/D, m68k,
- and Xstormy16 ports to indicate that the specified function is an
- interrupt handler. The compiler will generate function entry and exit
- sequences suitable for use in an interrupt handler when this attribute
- @@ -2417,6 +2417,15 @@
-
- Permissible values for this parameter are: IRQ, FIQ, SWI, ABORT and UNDEF@.
-
- +Note, for the AVR32, you can specify which banking scheme is used for
- +the interrupt mode this interrupt handler is used in like this:
- +
- +@smallexample
- +void f () __attribute__ ((interrupt ("FULL")));
- +@end smallexample
- +
- +Permissible values for this parameter are: FULL, HALF, NONE and UNDEF.
- +
- On ARMv7-M the interrupt type is ignored, and the attribute means the function
- may be called with a word aligned stack pointer.
-
- @@ -4188,6 +4197,23 @@
-
- @end table
-
- +@subsection AVR32 Variable Attributes
- +
- +One attribute is currently defined for AVR32 configurations:
- +@code{rmw_addressable}
- +
- +@table @code
- +@item rmw_addressable
- +@cindex @code{rmw_addressable} attribute
- +
- +This attribute can be used to signal that a variable can be accessed
- +with the addressing mode of the AVR32 Atomic Read-Modify-Write memory
- +instructions and hence make it possible for gcc to generate these
- +instructions without using built-in functions or inline assembly statements.
- +Variables used within the AVR32 Atomic Read-Modify-Write built-in
- +functions will automatically get the @code{rmw_addressable} attribute.
- +@end table
- +
- @subsection AVR Variable Attributes
-
- @table @code
- @@ -7042,6 +7068,7 @@
- * Alpha Built-in Functions::
- * ARM iWMMXt Built-in Functions::
- * ARM NEON Intrinsics::
- +* AVR32 Built-in Functions::
- * Blackfin Built-in Functions::
- * FR-V Built-in Functions::
- * X86 Built-in Functions::
- @@ -7284,6 +7311,7 @@
- long long __builtin_arm_wzero ()
- @end smallexample
-
- +
- @node ARM NEON Intrinsics
- @subsection ARM NEON Intrinsics
-
- @@ -7292,6 +7320,74 @@
-
- @include arm-neon-intrinsics.texi
-
- +@node AVR32 Built-in Functions
- +@subsection AVR32 Built-in Functions
- +
- +Built-in functions for atomic memory (RMW) instructions. Note that these
- +built-ins will fail for targets where the RMW instructions are not
- +implemented. Also note that these instructions only that a Ks15 << 2
- +memory address and will therefor not work with any runtime computed
- +memory addresses. The user is responsible for making sure that any
- +pointers used within these functions points to a valid memory address.
- +
- +@smallexample
- +void __builtin_mems(int */*ptr*/, int /*bit*/)
- +void __builtin_memc(int */*ptr*/, int /*bit*/)
- +void __builtin_memt(int */*ptr*/, int /*bit*/)
- +@end smallexample
- +
- +Built-in functions for DSP instructions. Note that these built-ins will
- +fail for targets where the DSP instructions are not implemented.
- +
- +@smallexample
- +int __builtin_sats (int /*Rd*/,int /*sa*/, int /*bn*/)
- +int __builtin_satu (int /*Rd*/,int /*sa*/, int /*bn*/)
- +int __builtin_satrnds (int /*Rd*/,int /*sa*/, int /*bn*/)
- +int __builtin_satrndu (int /*Rd*/,int /*sa*/, int /*bn*/)
- +short __builtin_mulsathh_h (short, short)
- +int __builtin_mulsathh_w (short, short)
- +short __builtin_mulsatrndhh_h (short, short)
- +int __builtin_mulsatrndwh_w (int, short)
- +int __builtin_mulsatwh_w (int, short)
- +int __builtin_macsathh_w (int, short, short)
- +short __builtin_satadd_h (short, short)
- +short __builtin_satsub_h (short, short)
- +int __builtin_satadd_w (int, int)
- +int __builtin_satsub_w (int, int)
- +long long __builtin_mulwh_d(int, short)
- +long long __builtin_mulnwh_d(int, short)
- +long long __builtin_macwh_d(long long, int, short)
- +long long __builtin_machh_d(long long, short, short)
- +@end smallexample
- +
- +Other built-in functions for instructions that cannot easily be
- +generated by the compiler.
- +
- +@smallexample
- +void __builtin_ssrf(int);
- +void __builtin_csrf(int);
- +void __builtin_musfr(int);
- +int __builtin_mustr(void);
- +int __builtin_mfsr(int /*Status Register Address*/)
- +void __builtin_mtsr(int /*Status Register Address*/, int /*Value*/)
- +int __builtin_mfdr(int /*Debug Register Address*/)
- +void __builtin_mtdr(int /*Debug Register Address*/, int /*Value*/)
- +void __builtin_cache(void * /*Address*/, int /*Cache Operation*/)
- +void __builtin_sync(int /*Sync Operation*/)
- +void __builtin_tlbr(void)
- +void __builtin_tlbs(void)
- +void __builtin_tlbw(void)
- +void __builtin_breakpoint(void)
- +int __builtin_xchg(void * /*Address*/, int /*Value*/ )
- +short __builtin_bswap_16(short)
- +int __builtin_bswap_32(int)
- +void __builtin_cop(int/*cpnr*/, int/*crd*/, int/*crx*/, int/*cry*/, int/*op*/)
- +int __builtin_mvcr_w(int/*cpnr*/, int/*crs*/)
- +void __builtin_mvrc_w(int/*cpnr*/, int/*crd*/, int/*value*/)
- +long long __builtin_mvcr_d(int/*cpnr*/, int/*crs*/)
- +void __builtin_mvrc_d(int/*cpnr*/, int/*crd*/, long long/*value*/)
- +@end smallexample
- +
- @node Blackfin Built-in Functions
- @subsection Blackfin Built-in Functions
-
- diff -Nur gcc-4.4.6.orig/gcc/doc/invoke.texi gcc-4.4.6/gcc/doc/invoke.texi
- --- gcc-4.4.6.orig/gcc/doc/invoke.texi 2011-03-23 23:02:12.000000000 +0100
- +++ gcc-4.4.6/gcc/doc/invoke.texi 2011-10-22 19:23:08.536581300 +0200
- @@ -195,7 +195,7 @@
- -fvisibility-ms-compat @gol
- -Wabi -Wctor-dtor-privacy @gol
- -Wnon-virtual-dtor -Wreorder @gol
- --Weffc++ -Wstrict-null-sentinel @gol
- +-Weffc++ -Wno-deprecated @gol
- -Wno-non-template-friend -Wold-style-cast @gol
- -Woverloaded-virtual -Wno-pmf-conversions @gol
- -Wsign-promo}
- @@ -641,6 +641,12 @@
- -mauto-incdec -minmax -mlong-calls -mshort @gol
- -msoft-reg-count=@var{count}}
-
- +@emph{AVR32 Options}
- +@gccoptlist{-muse-rodata-section -mhard-float -msoft-float -mrelax @gol
- +-mforce-double-align -mno-init-got -mrelax -mmd-reorg-opt -masm-addr-pseudos @gol
- +-mpart=@var{part} -mcpu=@var{cpu} -march=@var{arch} @gol
- +-mfast-float -mimm-in-const-pool}
- +
- @emph{MCore Options}
- @gccoptlist{-mhardlit -mno-hardlit -mdiv -mno-div -mrelax-immediates @gol
- -mno-relax-immediates -mwide-bitfields -mno-wide-bitfields @gol
- @@ -3256,13 +3262,11 @@
- If you want to warn about code which uses the uninitialized value of the
- variable in its own initializer, use the @option{-Winit-self} option.
-
- -These warnings occur for individual uninitialized or clobbered
- -elements of structure, union or array variables as well as for
- -variables which are uninitialized or clobbered as a whole. They do
- -not occur for variables or elements declared @code{volatile}. Because
- -these warnings depend on optimization, the exact variables or elements
- -for which there are warnings will depend on the precise optimization
- -options and version of GCC used.
- +These warnings occur only for variables that are candidates for
- +register allocation. Therefore, they do not occur for a variable that
- +is declared @code{volatile}, or whose address is taken, or whose size
- +is other than 1, 2, 4 or 8 bytes. Also, they do not occur for
- +structures, unions or arrays, even when they are in registers.
-
- Note that there may be no warning about a variable that is used only
- to compute a value that itself is never used, because such
- @@ -7445,10 +7449,6 @@
- we always try to remove unnecessary ivs from the set during its
- optimization when a new iv is added to the set.
-
- -@item scev-max-expr-size
- -Bound on size of expressions used in the scalar evolutions analyzer.
- -Large expressions slow the analyzer.
- -
- @item omega-max-vars
- The maximum number of variables in an Omega constraint system.
- The default value is 128.
- @@ -8844,6 +8844,7 @@
- * ARC Options::
- * ARM Options::
- * AVR Options::
- +* AVR32 Options::
- * Blackfin Options::
- * CRIS Options::
- * CRX Options::
- @@ -9332,6 +9333,145 @@
- size.
- @end table
-
- +@node AVR32 Options
- +@subsection AVR32 Options
- +@cindex AVR32 Options
- +
- +These options are defined for AVR32 implementations:
- +
- +@table @gcctabopt
- +@item -muse-rodata-section
- +@opindex muse-rodata-section
- +Use section @samp{.rodata} for read-only data instead of @samp{.text}.
- +
- +@item -mhard-float
- +@opindex mhard-float
- +Use floating point coprocessor instructions.
- +
- +@item -msoft-float
- +@opindex msoft-float
- +Use software floating-point library for floating-point operations.
- +
- +@item -mforce-double-align
- +@opindex mforce-double-align
- +Force double-word alignment for double-word memory accesses.
- +
- +@item -masm-addr-pseudos
- +@opindex masm-addr-pseudos
- +Use assembler pseudo-instructions lda.w and call for handling direct
- +addresses. (Enabled by default)
- +
- +@item -mno-init-got
- +@opindex mno-init-got
- +Do not initialize the GOT register before using it when compiling PIC
- +code.
- +
- +@item -mrelax
- +@opindex mrelax
- +Let invoked assembler and linker do relaxing
- +(Enabled by default when optimization level is >1).
- +This means that when the address of symbols are known at link time,
- +the linker can optimize @samp{icall} and @samp{mcall}
- +instructions into a @samp{rcall} instruction if possible.
- +Loading the address of a symbol can also be optimized.
- +
- +@item -mmd-reorg-opt
- +@opindex mmd-reorg-opt
- +Perform machine dependent optimizations in reorg stage.
- +
- +@item -mpart=@var{part}
- +@opindex mpart
- +Generate code for the specified part. Permissible parts are:
- +@samp{ap7000},
- +@samp{ap7001},
- +@samp{ap7002},
- +@samp{ap7200},
- +@samp{uc3a0128},
- +@samp{uc3a0256},
- +@samp{uc3a0512},
- +@samp{uc3a0512es},
- +@samp{uc3a1128},
- +@samp{uc3a1256},
- +@samp{uc3a1512},
- +@samp{uc3a1512es},
- +@samp{uc3a3revd},
- +@samp{uc3a364},
- +@samp{uc3a364s},
- +@samp{uc3a3128},
- +@samp{uc3a3128s},
- +@samp{uc3a3256},
- +@samp{uc3a3256s},
- +@samp{uc3a464},
- +@samp{uc3a464s},
- +@samp{uc3a4128},
- +@samp{uc3a4128s},
- +@samp{uc3a4256},
- +@samp{uc3a4256s},
- +@samp{uc3b064},
- +@samp{uc3b0128},
- +@samp{uc3b0256},
- +@samp{uc3b0256es},
- +@samp{uc3b0512},
- +@samp{uc3b0512revc},
- +@samp{uc3b164},
- +@samp{uc3b1128},
- +@samp{uc3b1256},
- +@samp{uc3b1256es},
- +@samp{uc3b1512},
- +@samp{uc3b1512revc}
- +@samp{uc64d3},
- +@samp{uc128d3},
- +@samp{uc64d4},
- +@samp{uc128d4},
- +@samp{uc3c0512crevc},
- +@samp{uc3c1512crevc},
- +@samp{uc3c2512crevc},
- +@samp{uc3l0256},
- +@samp{uc3l0128},
- +@samp{uc3l064},
- +@samp{uc3l032},
- +@samp{uc3l016},
- +@samp{uc3l064revb},
- +@samp{uc64l3u},
- +@samp{uc128l3u},
- +@samp{uc256l3u},
- +@samp{uc64l4u},
- +@samp{uc128l4u},
- +@samp{uc256l4u},
- +@samp{uc3c064c},
- +@samp{uc3c0128c},
- +@samp{uc3c0256c},
- +@samp{uc3c0512c},
- +@samp{uc3c164c},
- +@samp{uc3c1128c},
- +@samp{uc3c1256c},
- +@samp{uc3c1512c},
- +@samp{uc3c264c},
- +@samp{uc3c2128c},
- +@samp{uc3c2256c},
- +@samp{uc3c2512c},
- +@samp{mxt768e}.
- +
- +@item -mcpu=@var{cpu-type}
- +@opindex mcpu
- +Same as -mpart. Obsolete.
- +
- +@item -march=@var{arch}
- +@opindex march
- +Generate code for the specified architecture. Permissible architectures are:
- +@samp{ap}, @samp{uc} and @samp{ucr2}.
- +
- +@item -mfast-float
- +@opindex mfast-float
- +Enable fast floating-point library that does not conform to IEEE-754 but is still good enough
- +for most applications. The fast floating-point library does not round to the nearest even
- +but away from zero. Enabled by default if the -funsafe-math-optimizations switch is specified.
- +
- +@item -mimm-in-const-pool
- +@opindex mimm-in-const-pool
- +Put large immediates in constant pool. This is enabled by default for archs with insn-cache.
- +@end table
- +
- @node Blackfin Options
- @subsection Blackfin Options
- @cindex Blackfin Options
- @@ -9387,29 +9527,12 @@
- contain speculative loads after jump instructions. If this option is used,
- @code{__WORKAROUND_SPECULATIVE_LOADS} is defined.
-
- -@item -mno-specld-anomaly
- -@opindex mno-specld-anomaly
- -Don't generate extra code to prevent speculative loads from occurring.
- -
- @item -mcsync-anomaly
- @opindex mcsync-anomaly
- When enabled, the compiler will ensure that the generated code does not
- contain CSYNC or SSYNC instructions too soon after conditional branches.
- If this option is used, @code{__WORKAROUND_SPECULATIVE_SYNCS} is defined.
-
- -@item -mno-csync-anomaly
- -@opindex mno-csync-anomaly
- -Don't generate extra code to prevent CSYNC or SSYNC instructions from
- -occurring too soon after a conditional branch.
- -
- -@item -mlow-64k
- -@opindex mlow-64k
- -When enabled, the compiler is free to take advantage of the knowledge that
- -the entire program fits into the low 64k of memory.
- -
- -@item -mno-low-64k
- -@opindex mno-low-64k
- -Assume that the program is arbitrarily large. This is the default.
-
- @item -mstack-check-l1
- @opindex mstack-check-l1
- @@ -9423,11 +9546,6 @@
- without virtual memory management. This option implies @option{-fPIC}.
- With a @samp{bfin-elf} target, this option implies @option{-msim}.
-
- -@item -mno-id-shared-library
- -@opindex mno-id-shared-library
- -Generate code that doesn't assume ID based shared libraries are being used.
- -This is the default.
- -
- @item -mleaf-id-shared-library
- @opindex mleaf-id-shared-library
- Generate code that supports shared libraries via the library ID method,
- @@ -9469,11 +9587,6 @@
- will lie outside of the 24 bit addressing range of the offset based
- version of subroutine call instruction.
-
- -This feature is not enabled by default. Specifying
- -@option{-mno-long-calls} will restore the default behavior. Note these
- -switches have no effect on how the compiler generates code to handle
- -function calls via function pointers.
- -
- @item -mfast-fp
- @opindex mfast-fp
- Link with the fast floating-point library. This library relaxes some of
- diff -Nur gcc-4.4.6.orig/gcc/doc/md.texi gcc-4.4.6/gcc/doc/md.texi
- --- gcc-4.4.6.orig/gcc/doc/md.texi 2009-05-07 10:14:55.000000000 +0200
- +++ gcc-4.4.6/gcc/doc/md.texi 2011-10-22 19:23:08.548581303 +0200
- @@ -4,6 +4,7 @@
- @c This is part of the GCC manual.
- @c For copying conditions, see the file gcc.texi.
-
- +
- @ifset INTERNALS
- @node Machine Desc
- @chapter Machine Descriptions
- @@ -1685,6 +1686,58 @@
- A memory reference suitable for the ARMv4 ldrsb instruction.
- @end table
-
- +@item AVR32 family---@file{avr32.h}
- +@table @code
- +@item f
- +Floating-point registers (f0 to f15)
- +
- +@item Ku@var{bits}
- +Unsigned constant representable with @var{bits} number of bits (Must be
- +two digits). I.e: An unsigned 8-bit constant is written as @samp{Ku08}
- +
- +@item Ks@var{bits}
- +Signed constant representable with @var{bits} number of bits (Must be
- +two digits). I.e: A signed 12-bit constant is written as @samp{Ks12}
- +
- +@item Is@var{bits}
- +The negated range of a signed constant representable with @var{bits}
- +number of bits. The same as @samp{Ks@var{bits}} with a negated range.
- +This means that the constant must be in the range @math{-2^{bits-1}-1} to @math{2^{bits-1}}
- +
- +@item G
- +A single/double precision floating-point immediate or 64-bit integer
- +immediate where the least and most significant words both can be
- +loaded with a move instruction. That is the the integer form of the
- +values in the least and most significant words both are in the range
- +@math{-2^{20}} to @math{2^{20}-1}.
- +
- +@item RKs@var{bits}
- +A memory reference where the address consists of a base register
- +plus a signed immediate displacement with range given by @samp{Ks@var{bits}}
- +which has the same format as for the signed immediate integer constraint
- +given above.
- +
- +@item RKu@var{bits}
- +A memory reference where the address consists of a base register
- +plus an unsigned immediate displacement with range given by @samp{Ku@var{bits}}
- +which has the same format as for the unsigned immediate integer constraint
- +given above.
- +
- +@item S
- +A memory reference with an immediate or register offset
- +
- +@item T
- +A memory reference to a constant pool entry
- +
- +@item W
- +A valid operand for use in the @samp{lda.w} instruction macro when
- +relaxing is enabled
- +
- +@item Z
- +A memory reference valid for coprocessor memory instructions
- +
- +@end table
- +
- @item AVR family---@file{config/avr/constraints.md}
- @table @code
- @item l
- diff -Nur gcc-4.4.6.orig/gcc/expmed.c gcc-4.4.6/gcc/expmed.c
- --- gcc-4.4.6.orig/gcc/expmed.c 2010-08-06 09:52:04.000000000 +0200
- +++ gcc-4.4.6/gcc/expmed.c 2011-10-22 19:23:08.548581303 +0200
- @@ -472,9 +472,9 @@
- ? ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
- || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
- && byte_offset % GET_MODE_SIZE (fieldmode) == 0)
- - : (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
- + : ( (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
- || (offset * BITS_PER_UNIT % bitsize == 0
- - && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0))))
- + && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0)))))
- {
- if (MEM_P (op0))
- op0 = adjust_address (op0, fieldmode, offset);
- diff -Nur gcc-4.4.6.orig/gcc/expr.c gcc-4.4.6/gcc/expr.c
- --- gcc-4.4.6.orig/gcc/expr.c 2011-01-16 23:56:10.000000000 +0100
- +++ gcc-4.4.6/gcc/expr.c 2011-10-22 19:23:08.552581303 +0200
- @@ -52,6 +52,7 @@
- #include "tree-flow.h"
- #include "target.h"
- #include "timevar.h"
- +#include "c-common.h"
- #include "df.h"
- #include "diagnostic.h"
-
- @@ -3647,16 +3648,17 @@
- }
- else
- {
- + emit_move_insn (stack_pointer_rtx,
- + expand_binop (Pmode,
- #ifdef STACK_GROWS_DOWNWARD
- - /* ??? This seems wrong if STACK_PUSH_CODE == POST_DEC. */
- - dest_addr = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
- - GEN_INT (-(HOST_WIDE_INT) rounded_size));
- + sub_optab,
- #else
- - /* ??? This seems wrong if STACK_PUSH_CODE == POST_INC. */
- - dest_addr = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
- - GEN_INT (rounded_size));
- + add_optab,
- #endif
- - dest_addr = gen_rtx_PRE_MODIFY (Pmode, stack_pointer_rtx, dest_addr);
- + stack_pointer_rtx,
- + GEN_INT (rounded_size),
- + NULL_RTX, 0, OPTAB_LIB_WIDEN));
- + dest_addr = stack_pointer_rtx;
- }
-
- dest = gen_rtx_MEM (mode, dest_addr);
- @@ -5775,7 +5777,8 @@
- is a bit field, we cannot use addressing to access it.
- Use bit-field techniques or SUBREG to store in it. */
-
- - if (mode == VOIDmode
- + if (
- + mode == VOIDmode
- || (mode != BLKmode && ! direct_store[(int) mode]
- && GET_MODE_CLASS (mode) != MODE_COMPLEX_INT
- && GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT)
- @@ -5932,7 +5935,18 @@
- {
- tree field = TREE_OPERAND (exp, 1);
- size_tree = DECL_SIZE (field);
- - if (!DECL_BIT_FIELD (field))
- + if (!DECL_BIT_FIELD (field)
- + /* Added for AVR32:
- + Bitfields with a size equal to a target storage
- + type might not cause DECL_BIT_FIELD to return
- + true since it can be optimized into a normal array
- + access operation. But for volatile bitfields we do
- + not allow this when targetm.narrow_volatile_bitfield ()
- + is false. We can use DECL_C_BIT_FIELD to check if this
- + really is a c-bitfield. */
- + && !(TREE_THIS_VOLATILE (exp)
- + && !targetm.narrow_volatile_bitfield ()
- + && DECL_C_BIT_FIELD (field)) )
- mode = DECL_MODE (field);
- else if (DECL_MODE (field) == BLKmode)
- blkmode_bitfield = true;
- @@ -7915,7 +7929,8 @@
- by doing the extract into an object as wide as the field
- (which we know to be the width of a basic mode), then
- storing into memory, and changing the mode to BLKmode. */
- - if (mode1 == VOIDmode
- + if (
- + mode1 == VOIDmode
- || REG_P (op0) || GET_CODE (op0) == SUBREG
- || (mode1 != BLKmode && ! direct_load[(int) mode1]
- && GET_MODE_CLASS (mode) != MODE_COMPLEX_INT
- diff -Nur gcc-4.4.6.orig/gcc/function.c gcc-4.4.6/gcc/function.c
- --- gcc-4.4.6.orig/gcc/function.c 2010-08-16 22:24:54.000000000 +0200
- +++ gcc-4.4.6/gcc/function.c 2011-10-22 19:23:08.552581303 +0200
- @@ -2810,7 +2810,11 @@
- assign_parm_remove_parallels (data);
-
- /* Copy the value into the register. */
- - if (data->nominal_mode != data->passed_mode
- + if ( (data->nominal_mode != data->passed_mode
- + /* Added for AVR32: If passed_mode is equal
- + to promoted nominal mode why should be convert?
- + The conversion should make no difference. */
- + && data->passed_mode != promoted_nominal_mode)
- || promoted_nominal_mode != data->promoted_mode)
- {
- int save_tree_used;
- diff -Nur gcc-4.4.6.orig/gcc/genemit.c gcc-4.4.6/gcc/genemit.c
- --- gcc-4.4.6.orig/gcc/genemit.c 2009-02-20 16:20:38.000000000 +0100
- +++ gcc-4.4.6/gcc/genemit.c 2011-10-22 19:23:08.552581303 +0200
- @@ -121,6 +121,24 @@
- }
-
- static void
- +gen_vararg_prologue(int operands)
- +{
- + int i;
- +
- + if (operands > 1)
- + {
- + for (i = 1; i < operands; i++)
- + printf(" rtx operand%d ATTRIBUTE_UNUSED;\n", i);
- +
- + printf(" va_list args;\n\n");
- + printf(" va_start(args, operand0);\n");
- + for (i = 1; i < operands; i++)
- + printf(" operand%d = va_arg(args, rtx);\n", i);
- + printf(" va_end(args);\n\n");
- + }
- +}
- +
- +static void
- print_code (RTX_CODE code)
- {
- const char *p1;
- @@ -406,18 +424,16 @@
- fatal ("match_dup operand number has no match_operand");
-
- /* Output the function name and argument declarations. */
- - printf ("rtx\ngen_%s (", XSTR (insn, 0));
- + printf ("rtx\ngen_%s ", XSTR (insn, 0));
- +
- if (operands)
- - for (i = 0; i < operands; i++)
- - if (i)
- - printf (",\n\trtx operand%d ATTRIBUTE_UNUSED", i);
- + printf("(rtx operand0 ATTRIBUTE_UNUSED, ...)\n");
- else
- - printf ("rtx operand%d ATTRIBUTE_UNUSED", i);
- - else
- - printf ("void");
- - printf (")\n");
- + printf("(void)\n");
- printf ("{\n");
-
- + gen_vararg_prologue(operands);
- +
- /* Output code to construct and return the rtl for the instruction body. */
-
- if (XVECLEN (insn, 1) == 1)
- @@ -461,16 +477,12 @@
- operands = max_operand_vec (expand, 1);
-
- /* Output the function name and argument declarations. */
- - printf ("rtx\ngen_%s (", XSTR (expand, 0));
- + printf ("rtx\ngen_%s ", XSTR (expand, 0));
- if (operands)
- - for (i = 0; i < operands; i++)
- - if (i)
- - printf (",\n\trtx operand%d", i);
- - else
- - printf ("rtx operand%d", i);
- + printf("(rtx operand0 ATTRIBUTE_UNUSED, ...)\n");
- else
- - printf ("void");
- - printf (")\n");
- + printf("(void)\n");
- +
- printf ("{\n");
-
- /* If we don't have any C code to write, only one insn is being written,
- @@ -480,6 +492,8 @@
- && operands > max_dup_opno
- && XVECLEN (expand, 1) == 1)
- {
- + gen_vararg_prologue(operands);
- +
- printf (" return ");
- gen_exp (XVECEXP (expand, 1, 0), DEFINE_EXPAND, NULL);
- printf (";\n}\n\n");
- @@ -493,6 +507,7 @@
- for (; i <= max_scratch_opno; i++)
- printf (" rtx operand%d ATTRIBUTE_UNUSED;\n", i);
- printf (" rtx _val = 0;\n");
- + gen_vararg_prologue(operands);
- printf (" start_sequence ();\n");
-
- /* The fourth operand of DEFINE_EXPAND is some code to be executed
- diff -Nur gcc-4.4.6.orig/gcc/genflags.c gcc-4.4.6/gcc/genflags.c
- --- gcc-4.4.6.orig/gcc/genflags.c 2007-07-26 10:37:01.000000000 +0200
- +++ gcc-4.4.6/gcc/genflags.c 2011-10-22 19:23:08.552581303 +0200
- @@ -127,7 +127,6 @@
- gen_proto (rtx insn)
- {
- int num = num_operands (insn);
- - int i;
- const char *name = XSTR (insn, 0);
- int truth = maybe_eval_c_test (XSTR (insn, 2));
-
- @@ -158,12 +157,7 @@
- if (num == 0)
- fputs ("void", stdout);
- else
- - {
- - for (i = 1; i < num; i++)
- - fputs ("rtx, ", stdout);
- -
- - fputs ("rtx", stdout);
- - }
- + fputs("rtx, ...", stdout);
-
- puts (");");
-
- @@ -173,12 +167,7 @@
- {
- printf ("static inline rtx\ngen_%s", name);
- if (num > 0)
- - {
- - putchar ('(');
- - for (i = 0; i < num-1; i++)
- - printf ("rtx ARG_UNUSED (%c), ", 'a' + i);
- - printf ("rtx ARG_UNUSED (%c))\n", 'a' + i);
- - }
- + puts("(rtx ARG_UNUSED(a), ...)");
- else
- puts ("(void)");
- puts ("{\n return 0;\n}");
- diff -Nur gcc-4.4.6.orig/gcc/genoutput.c gcc-4.4.6/gcc/genoutput.c
- --- gcc-4.4.6.orig/gcc/genoutput.c 2009-02-20 16:20:38.000000000 +0100
- +++ gcc-4.4.6/gcc/genoutput.c 2011-10-22 19:23:08.552581303 +0200
- @@ -386,7 +386,7 @@
- }
-
- if (d->name && d->name[0] != '*')
- - printf (" (insn_gen_fn) gen_%s,\n", d->name);
- + printf (" gen_%s,\n", d->name);
- else
- printf (" 0,\n");
-
- diff -Nur gcc-4.4.6.orig/gcc/ifcvt.c gcc-4.4.6/gcc/ifcvt.c
- --- gcc-4.4.6.orig/gcc/ifcvt.c 2010-01-07 15:59:59.000000000 +0100
- +++ gcc-4.4.6/gcc/ifcvt.c 2011-10-22 19:23:08.552581303 +0200
- @@ -84,7 +84,7 @@
- static int num_updated_if_blocks;
-
- /* # of changes made. */
- -static int num_true_changes;
- +int num_true_changes;
-
- /* Whether conditional execution changes were made. */
- static int cond_exec_changed_p;
- @@ -290,6 +290,9 @@
- if (must_be_last)
- return FALSE;
-
- +#ifdef IFCVT_ALLOW_MODIFY_TEST_IN_INSN
- + if ( !IFCVT_ALLOW_MODIFY_TEST_IN_INSN )
- +#endif
- if (modified_in_p (test, insn))
- {
- if (!mod_ok)
- @@ -570,15 +573,18 @@
- IFCVT_MODIFY_FINAL (ce_info);
- #endif
-
- + /* Merge the blocks! */
- + if ( reload_completed ){
- /* Conversion succeeded. */
- if (dump_file)
- fprintf (dump_file, "%d insn%s converted to conditional execution.\n",
- n_insns, (n_insns == 1) ? " was" : "s were");
-
- - /* Merge the blocks! */
- merge_if_block (ce_info);
- cond_exec_changed_p = TRUE;
- return TRUE;
- + }
- + return FALSE;
-
- fail:
- #ifdef IFCVT_MODIFY_CANCEL
- @@ -1087,7 +1093,11 @@
- != UNKNOWN))
- {
- rtx cond = if_info->cond;
- - enum rtx_code code = reversed_comparison_code (cond, if_info->jump);
- + /* This generates wrong code for AVR32. The cond code need not be reversed
- + since the addmodecc patterns add if the condition is NOT met. */
- + /* enum rtx_code code = reversed_comparison_code (cond, if_info->jump);*/
- + enum rtx_code code = GET_CODE(cond);
- +
-
- /* First try to use addcc pattern. */
- if (general_operand (XEXP (cond, 0), VOIDmode)
- @@ -3039,7 +3049,12 @@
- && noce_find_if_block (test_bb, then_edge, else_edge, pass))
- goto success;
-
- - if (HAVE_conditional_execution && reload_completed
- + if (HAVE_conditional_execution &&
- +#ifdef IFCVT_COND_EXEC_BEFORE_RELOAD
- + (reload_completed || IFCVT_COND_EXEC_BEFORE_RELOAD)
- +#else
- + reload_completed
- +#endif
- && cond_exec_find_if_block (&ce_info))
- goto success;
-
- @@ -3154,7 +3169,11 @@
-
- /* We only ever should get here after reload,
- and only if we have conditional execution. */
- +#ifdef IFCVT_COND_EXEC_BEFORE_RELOAD
- + gcc_assert (HAVE_conditional_execution && (reload_completed||IFCVT_COND_EXEC_BEFORE_RELOAD));
- +#else
- gcc_assert (HAVE_conditional_execution && reload_completed);
- +#endif
-
- /* Discover if any fall through predecessors of the current test basic block
- were && tests (which jump to the else block) or || tests (which jump to
- @@ -4259,6 +4278,14 @@
- static unsigned int
- rest_of_handle_if_after_reload (void)
- {
- + /* Hack for the AVR32 experimental ifcvt processing before reload.
- + The AVR32 specific ifcvt code needs to know when ifcvt after reload
- + has begun. */
- +#ifdef IFCVT_COND_EXEC_BEFORE_RELOAD
- + if ( IFCVT_COND_EXEC_BEFORE_RELOAD )
- + cfun->machine->ifcvt_after_reload = 1;
- +#endif
- +
- if_convert ();
- return 0;
- }
- diff -Nur gcc-4.4.6.orig/gcc/longlong.h gcc-4.4.6/gcc/longlong.h
- --- gcc-4.4.6.orig/gcc/longlong.h 2009-08-12 00:36:56.000000000 +0200
- +++ gcc-4.4.6/gcc/longlong.h 2011-10-22 19:23:08.552581303 +0200
- @@ -250,6 +250,41 @@
- #define COUNT_LEADING_ZEROS_0 32
- #endif
-
- +#if defined (__avr32__) && W_TYPE_SIZE == 32
- +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
- + __asm__ ("add\t%1, %4, %5\n\tadc\t%0, %2, %3" \
- + : "=r" ((USItype) (sh)), \
- + "=&r" ((USItype) (sl)) \
- + : "r" ((USItype) (ah)), \
- + "r" ((USItype) (bh)), \
- + "r" ((USItype) (al)), \
- + "r" ((USItype) (bl)) __CLOBBER_CC)
- +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
- + __asm__ ("sub\t%1, %4, %5\n\tsbc\t%0, %2, %3" \
- + : "=r" ((USItype) (sh)), \
- + "=&r" ((USItype) (sl)) \
- + : "r" ((USItype) (ah)), \
- + "r" ((USItype) (bh)), \
- + "r" ((USItype) (al)), \
- + "r" ((USItype) (bl)) __CLOBBER_CC)
- +
- +#if !defined (__AVR32_NO_MUL__)
- +#define __umulsidi3(a,b) ((UDItype)(a) * (UDItype)(b))
- +
- +#define umul_ppmm(w1, w0, u, v) \
- +{ \
- + DWunion __w; \
- + __w.ll = __umulsidi3 (u, v); \
- + w1 = __w.s.high; \
- + w0 = __w.s.low; \
- +}
- +#endif
- +
- +#define count_leading_zeros(COUNT,X) ((COUNT) = __builtin_clz (X))
- +#define count_trailing_zeros(COUNT,X) ((COUNT) = __builtin_ctz (X))
- +#define COUNT_LEADING_ZEROS_0 32
- +#endif
- +
- #if defined (__CRIS__) && __CRIS_arch_version >= 3
- #define count_leading_zeros(COUNT, X) ((COUNT) = __builtin_clz (X))
- #if __CRIS_arch_version >= 8
- diff -Nur gcc-4.4.6.orig/gcc/optabs.h gcc-4.4.6/gcc/optabs.h
- --- gcc-4.4.6.orig/gcc/optabs.h 2008-08-07 09:35:51.000000000 +0200
- +++ gcc-4.4.6/gcc/optabs.h 2011-10-22 19:23:08.556581301 +0200
- @@ -603,7 +603,7 @@
- extern optab code_to_optab[NUM_RTX_CODE + 1];
-
-
- -typedef rtx (*rtxfun) (rtx);
- +typedef rtx (*rtxfun) (rtx, ...);
-
- /* Indexed by the rtx-code for a conditional (e.g. EQ, LT,...)
- gives the gen_function to make a branch to test that condition. */
- diff -Nur gcc-4.4.6.orig/gcc/regrename.c gcc-4.4.6/gcc/regrename.c
- --- gcc-4.4.6.orig/gcc/regrename.c 2009-02-20 16:20:38.000000000 +0100
- +++ gcc-4.4.6/gcc/regrename.c 2011-10-22 19:23:08.556581301 +0200
- @@ -1582,6 +1582,9 @@
- bool changed = false;
- rtx insn;
-
- + rtx prev_pred_test;
- + int prev_pred_insn_skipped = 0;
- +
- for (insn = BB_HEAD (bb); ; insn = NEXT_INSN (insn))
- {
- int n_ops, i, alt, predicated;
- @@ -1621,6 +1624,58 @@
- recog_data.operand_type[i] = OP_INOUT;
- }
-
- +
- + /* Added for targets (AVR32) which supports test operands to be modified
- + in cond_exec instruction. For these targets we cannot make a change to
- + the test operands if one of the test operands is an output operand This beacuse
- + changing the test operands might cause the need for inserting a new test
- + insns in the middle of a sequence of cond_exec insns and if the test operands
- + are modified these tests will fail.
- + */
- + if ( IFCVT_ALLOW_MODIFY_TEST_IN_INSN
- + && predicated )
- + {
- + int insn_skipped = 0;
- + rtx test = COND_EXEC_TEST (PATTERN (insn));
- +
- + /* Check if the previous insn was a skipped predicated insn with the same
- + test as this predicated insns. If so we cannot do any modification to
- + this insn either since we cannot emit the test insn because the operands
- + are clobbered. */
- + if ( prev_pred_insn_skipped
- + && (rtx_equal_p (test, prev_pred_test)
- + || rtx_equal_p (test, reversed_condition (prev_pred_test))) )
- + {
- + insn_skipped = 1;
- + }
- + else
- + {
- + /* Check if the output operand is used in the test expression. */
- + for (i = 0; i < n_ops; ++i)
- + if ( recog_data.operand_type[i] == OP_INOUT
- + && reg_mentioned_p (recog_data.operand[i], test) )
- + {
- + insn_skipped = 1;
- + break;
- + }
- +
- + }
- +
- + prev_pred_test = test;
- + prev_pred_insn_skipped = insn_skipped;
- + if ( insn_skipped )
- + {
- + if (insn == BB_END (bb))
- + break;
- + else
- + continue;
- + }
- + }
- + else
- + {
- + prev_pred_insn_skipped = 0;
- + }
- +
- /* For each earlyclobber operand, zap the value data. */
- for (i = 0; i < n_ops; i++)
- if (recog_op_alt[i][alt].earlyclobber)
- diff -Nur gcc-4.4.6.orig/gcc/sched-deps.c gcc-4.4.6/gcc/sched-deps.c
- --- gcc-4.4.6.orig/gcc/sched-deps.c 2010-08-24 10:53:11.000000000 +0200
- +++ gcc-4.4.6/gcc/sched-deps.c 2011-10-22 19:23:08.556581301 +0200
- @@ -1473,7 +1473,14 @@
-
- prev_nonnote = prev_nonnote_insn (insn);
- if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
- - && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
- + /* Modification for AVR32 by RP: Why is this here, this will
- + cause instruction to be without any dependencies which might
- + cause it to be moved anywhere. For the AVR32 we try to keep
- + a group of conditionals together even if they are mutual exclusive.
- + */
- + && (! sched_insns_conditions_mutex_p (insn, prev_nonnote)
- + || GET_CODE (PATTERN (insn)) == COND_EXEC )
- + )
- add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
- }
-
- @@ -2230,8 +2237,29 @@
-
- if (code == COND_EXEC)
- {
- +#ifdef IFCVT_ALLOW_MODIFY_TEST_IN_INSN
- + if (IFCVT_ALLOW_MODIFY_TEST_IN_INSN)
- + {
- + /* Check if we have a group og conditional instructions with the same test.
- + If so we must make sure that they are not scheduled apart in order to
- + avoid unnecesarry tests and if one of the registers in the test is modified
- + in the instruction this is needed to ensure correct code. */
- + if ( prev_nonnote_insn (insn)
- + && INSN_P (prev_nonnote_insn (insn))
- + && GET_CODE (PATTERN (prev_nonnote_insn (insn))) == COND_EXEC
- + && rtx_equal_p (XEXP(COND_EXEC_TEST (PATTERN (prev_nonnote_insn (insn))), 0), XEXP (COND_EXEC_TEST (x), 0))
- + && rtx_equal_p (XEXP(COND_EXEC_TEST (PATTERN (prev_nonnote_insn (insn))), 1), XEXP (COND_EXEC_TEST (x), 1))
- + && ( GET_CODE (COND_EXEC_TEST (PATTERN (prev_nonnote_insn (insn)))) == GET_CODE (COND_EXEC_TEST (x))
- + || GET_CODE (COND_EXEC_TEST (PATTERN (prev_nonnote_insn (insn)))) == reversed_comparison_code (COND_EXEC_TEST (x), insn)))
- + {
- + SCHED_GROUP_P (insn) = 1;
- + //CANT_MOVE (prev_nonnote_insn (insn)) = 1;
- + }
- + }
- +#endif
- sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
-
- +
- /* ??? Should be recording conditions so we reduce the number of
- false dependencies. */
- x = COND_EXEC_CODE (x);
- diff -Nur gcc-4.4.6.orig/gcc/testsuite/gcc.dg/sibcall-3.c gcc-4.4.6/gcc/testsuite/gcc.dg/sibcall-3.c
- --- gcc-4.4.6.orig/gcc/testsuite/gcc.dg/sibcall-3.c 2009-01-08 18:56:52.000000000 +0100
- +++ gcc-4.4.6/gcc/testsuite/gcc.dg/sibcall-3.c 2011-10-22 19:23:08.556581301 +0200
- @@ -5,7 +5,7 @@
- Copyright (C) 2002 Free Software Foundation Inc.
- Contributed by Hans-Peter Nilsson <hp@bitrange.com> */
-
- -/* { dg-do run { xfail { { arc-*-* avr-*-* cris-*-* crisv32-*-* h8300-*-* hppa*64*-*-* m32r-*-* m68hc1?-*-* mcore-*-* mn10300-*-* xstormy16-*-* v850*-*-* vax-*-* xtensa*-*-* } || { arm*-*-* && { ! arm32 } } } } } */
- +/* { dg-do run { xfail { { arc-*-* avr-*-* avr32-*-* cris-*-* crisv32-*-* h8300-*-* hppa*64*-*-* m32r-*-* m68hc1?-*-* mcore-*-* mn10300-*-* xstormy16-*-* v850*-*-* vax-*-* xtensa*-*-* } || { arm*-*-* && { ! arm32 } } } } } */
- /* -mlongcall disables sibcall patterns. */
- /* { dg-skip-if "" { powerpc*-*-* } { "-mlongcall" } { "" } } */
- /* { dg-options "-O2 -foptimize-sibling-calls" } */
- diff -Nur gcc-4.4.6.orig/gcc/testsuite/gcc.dg/sibcall-4.c gcc-4.4.6/gcc/testsuite/gcc.dg/sibcall-4.c
- --- gcc-4.4.6.orig/gcc/testsuite/gcc.dg/sibcall-4.c 2009-01-08 18:56:52.000000000 +0100
- +++ gcc-4.4.6/gcc/testsuite/gcc.dg/sibcall-4.c 2011-10-22 19:23:08.556581301 +0200
- @@ -5,7 +5,7 @@
- Copyright (C) 2002 Free Software Foundation Inc.
- Contributed by Hans-Peter Nilsson <hp@bitrange.com> */
-
- -/* { dg-do run { xfail { { arc-*-* avr-*-* cris-*-* crisv32-*-* h8300-*-* hppa*64*-*-* m32r-*-* m68hc1?-*-* mcore-*-* mn10300-*-* xstormy16-*-* v850*-*-* vax-*-* xtensa*-*-* } || { arm*-*-* && { ! arm32 } } } } } */
- +/* { dg-do run { xfail { { arc-*-* avr-*-* avr32-*-* cris-*-* crisv32-*-* h8300-*-* hppa*64*-*-* m32r-*-* m68hc1?-*-* mcore-*-* mn10300-*-* xstormy16-*-* v850*-*-* vax-*-* xtensa*-*-* } || { arm*-*-* && { ! arm32 } } } } } */
- /* -mlongcall disables sibcall patterns. */
- /* { dg-skip-if "" { powerpc*-*-* } { "-mlongcall" } { "" } } */
- /* { dg-options "-O2 -foptimize-sibling-calls" } */
- diff -Nur gcc-4.4.6.orig/gcc/testsuite/gcc.dg/trampoline-1.c gcc-4.4.6/gcc/testsuite/gcc.dg/trampoline-1.c
- --- gcc-4.4.6.orig/gcc/testsuite/gcc.dg/trampoline-1.c 2008-05-12 23:52:38.000000000 +0200
- +++ gcc-4.4.6/gcc/testsuite/gcc.dg/trampoline-1.c 2011-10-22 19:23:08.556581301 +0200
- @@ -47,6 +47,8 @@
-
- int main (void)
- {
- +#ifndef NO_TRAMPOLINES
- foo ();
- +#endif
- return 0;
- }
- diff -Nur gcc-4.4.6.orig/libgcc/config.host gcc-4.4.6/libgcc/config.host
- --- gcc-4.4.6.orig/libgcc/config.host 2009-04-17 13:58:41.000000000 +0200
- +++ gcc-4.4.6/libgcc/config.host 2011-10-22 19:23:08.556581301 +0200
- @@ -218,6 +218,13 @@
- ;;
- arm-*-pe*)
- ;;
- +avr32-*-linux*)
- + # No need to build crtbeginT.o on uClibc systems. Should probably be
- + # moved to the OS specific section above.
- + extra_parts="crtbegin.o crtbeginS.o crtend.o crtendS.o"
- + ;;
- +avr32-*-*)
- + ;;
- avr-*-rtems*)
- ;;
- avr-*-*)
- diff -Nur gcc-4.4.6.orig/libstdc++-v3/Makefile.in gcc-4.4.6/libstdc++-v3/Makefile.in
- --- gcc-4.4.6.orig/libstdc++-v3/Makefile.in 2010-04-29 17:03:38.000000000 +0200
- +++ gcc-4.4.6/libstdc++-v3/Makefile.in 2011-10-22 19:23:08.556581301 +0200
- @@ -36,6 +36,7 @@
- build_triplet = @build@
- host_triplet = @host@
- target_triplet = @target@
- +LIBOBJDIR =
- DIST_COMMON = $(top_srcdir)/fragment.am $(srcdir)/../config.guess \
- $(srcdir)/../config.sub README ChangeLog $(srcdir)/Makefile.in \
- $(srcdir)/Makefile.am $(top_srcdir)/configure \
- diff -Nur gcc-4.4.6.orig/libstdc++-v3/config/os/gnu-linux/ctype_base.h gcc-4.4.6/libstdc++-v3/config/os/gnu-linux/ctype_base.h
- --- gcc-4.4.6.orig/libstdc++-v3/config/os/gnu-linux/ctype_base.h 2009-04-10 01:23:07.000000000 +0200
- +++ gcc-4.4.6/libstdc++-v3/config/os/gnu-linux/ctype_base.h 2011-10-22 19:23:08.556581301 +0200
- @@ -26,6 +26,8 @@
- //
- // ISO C++ 14882: 22.1 Locales
- //
- +#include <features.h>
- +#include <ctype.h>
-
- /** @file ctype_base.h
- * This is an internal header file, included by other library headers.
- @@ -40,7 +42,11 @@
- struct ctype_base
- {
- // Non-standard typedefs.
- +#ifdef __UCLIBC__
- + typedef const __ctype_touplow_t* __to_type;
- +#else
- typedef const int* __to_type;
- +#endif
-
- // NB: Offsets into ctype<char>::_M_table force a particular size
- // on the mask type. Because of this, we don't use an enum.
- diff -Nur gcc-4.4.6.orig/libstdc++-v3/include/Makefile.in gcc-4.4.6/libstdc++-v3/include/Makefile.in
- --- gcc-4.4.6.orig/libstdc++-v3/include/Makefile.in 2009-05-13 02:24:16.000000000 +0200
- +++ gcc-4.4.6/libstdc++-v3/include/Makefile.in 2011-10-22 19:23:08.556581301 +0200
- @@ -36,6 +36,7 @@
- build_triplet = @build@
- host_triplet = @host@
- target_triplet = @target@
- +LIBOBJDIR =
- DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \
- $(top_srcdir)/fragment.am
- subdir = include
- diff -Nur gcc-4.4.6.orig/libstdc++-v3/libsupc++/Makefile.in gcc-4.4.6/libstdc++-v3/libsupc++/Makefile.in
- --- gcc-4.4.6.orig/libstdc++-v3/libsupc++/Makefile.in 2009-01-15 21:02:11.000000000 +0100
- +++ gcc-4.4.6/libstdc++-v3/libsupc++/Makefile.in 2011-10-22 19:23:08.556581301 +0200
- @@ -38,6 +38,7 @@
- build_triplet = @build@
- host_triplet = @host@
- target_triplet = @target@
- +LIBOBJDIR =
- DIST_COMMON = $(glibcxxinstall_HEADERS) $(srcdir)/Makefile.am \
- $(srcdir)/Makefile.in $(top_srcdir)/fragment.am
- subdir = libsupc++
- diff -Nur gcc-4.4.6.orig/libstdc++-v3/po/Makefile.in gcc-4.4.6/libstdc++-v3/po/Makefile.in
- --- gcc-4.4.6.orig/libstdc++-v3/po/Makefile.in 2009-01-15 21:02:11.000000000 +0100
- +++ gcc-4.4.6/libstdc++-v3/po/Makefile.in 2011-10-22 19:23:08.556581301 +0200
- @@ -36,6 +36,7 @@
- build_triplet = @build@
- host_triplet = @host@
- target_triplet = @target@
- +LIBOBJDIR =
- DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \
- $(top_srcdir)/fragment.am
- subdir = po
- diff -Nur gcc-4.4.6.orig/libstdc++-v3/src/Makefile.in gcc-4.4.6/libstdc++-v3/src/Makefile.in
- --- gcc-4.4.6.orig/libstdc++-v3/src/Makefile.in 2009-08-26 21:04:11.000000000 +0200
- +++ gcc-4.4.6/libstdc++-v3/src/Makefile.in 2011-10-22 19:23:08.556581301 +0200
- @@ -37,6 +37,7 @@
- build_triplet = @build@
- host_triplet = @host@
- target_triplet = @target@
- +LIBOBJDIR =
- DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \
- $(top_srcdir)/fragment.am
- subdir = src
|