1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655965696579658965996609661966296639664966596669667966896699670967196729673967496759676967796789679968096819682968396849685968696879688968996909691969296939694969596969697969896999700970197029703970497059706970797089709971097119712971397149715971697179718971997209721972297239724972597269727972897299730973197329733973497359736973797389739974097419742974397449745974697479748974997509751975297539754975597569757975897599760976197629763976497659766976797689769977097719772977397749775977697779778977997809781978297839784978597869787978897899790979197929793979497959796979797989799980098019802980398049805980698079808980998109811981298139814981598169817981898199820982198229823982498259826982798289829983098319832983398349835983698379838983998409841984298439844984598469847984898499850985198529853985498559856985798589859986098619862986398649865986698679868986998709871987298739874987598769877987898799880988198829883988498859886988798889889989098919892989398949895989698979898989999009901990299039904990599069907990899099910991199129913991499159916991799189919992099219922992399249925992699279928992999309931993299339934993599369937993899399940994199429943994499459946994799489949995099519952995399549955995699579958995999609961996299639964996599669967996899699970997199729973997499759976997799789979998099819982998399849985998699879988998999909991999299939994999599969997999899991000010001100021000310004100051000610007100081000910010100111001210013100141001510016100171001810019100201002110022100231002410025100261002710028100291003010031100321003310034100351003610037100381003910040100411004210043100441004510046100471004810049100501005110052100531005410055100561005710058100591006010061100621006310064100651006610067100681006910070100711007210073100741007510076100771007810079100801008110082100831008410085100861008710088100891009010091100921009310094100951009610097100981009910100101011010210103101041010510106101071010810109101101011110112101131011410115101161011710118101191012010121101221012310124101251012610127101281012910130101311013210133101341013510136101371013810139101401014110142101431014410145101461014710148101491015010151101521015310154101551015610157101581015910160101611016210163101641016510166101671016810169101701017110172101731017410175101761017710178101791018010181101821018310184101851018610187101881018910190101911019210193101941019510196101971019810199102001020110202102031020410205102061020710208102091021010211102121021310214102151021610217102181021910220102211022210223102241022510226102271022810229102301023110232102331023410235102361023710238102391024010241102421024310244102451024610247102481024910250102511025210253102541025510256102571025810259102601026110262102631026410265102661026710268102691027010271102721027310274102751027610277102781027910280102811028210283102841028510286102871028810289102901029110292102931029410295102961029710298102991030010301103021030310304103051030610307103081030910310103111031210313103141031510316103171031810319103201032110322103231032410325103261032710328103291033010331103321033310334103351033610337103381033910340103411034210343103441034510346103471034810349103501035110352103531035410355103561035710358103591036010361103621036310364103651036610367103681036910370103711037210373103741037510376103771037810379103801038110382103831038410385103861038710388103891039010391103921039310394103951039610397103981039910400104011040210403104041040510406104071040810409104101041110412104131041410415104161041710418104191042010421104221042310424104251042610427104281042910430104311043210433104341043510436104371043810439104401044110442104431044410445104461044710448104491045010451104521045310454104551045610457104581045910460104611046210463104641046510466104671046810469104701047110472104731047410475104761047710478104791048010481104821048310484104851048610487104881048910490104911049210493104941049510496104971049810499105001050110502105031050410505105061050710508105091051010511105121051310514105151051610517105181051910520105211052210523105241052510526105271052810529105301053110532105331053410535105361053710538105391054010541105421054310544105451054610547105481054910550105511055210553105541055510556105571055810559105601056110562105631056410565105661056710568105691057010571105721057310574105751057610577105781057910580105811058210583105841058510586105871058810589105901059110592105931059410595105961059710598105991060010601106021060310604106051060610607106081060910610106111061210613106141061510616106171061810619106201062110622106231062410625106261062710628106291063010631106321063310634106351063610637106381063910640106411064210643106441064510646106471064810649106501065110652106531065410655106561065710658106591066010661106621066310664106651066610667106681066910670106711067210673106741067510676106771067810679106801068110682106831068410685106861068710688106891069010691106921069310694106951069610697106981069910700107011070210703107041070510706107071070810709107101071110712107131071410715107161071710718107191072010721107221072310724107251072610727107281072910730107311073210733107341073510736107371073810739107401074110742107431074410745107461074710748107491075010751107521075310754107551075610757107581075910760107611076210763107641076510766107671076810769107701077110772107731077410775107761077710778107791078010781107821078310784107851078610787107881078910790107911079210793107941079510796107971079810799108001080110802108031080410805108061080710808108091081010811108121081310814108151081610817108181081910820108211082210823108241082510826108271082810829108301083110832108331083410835108361083710838108391084010841108421084310844108451084610847108481084910850108511085210853108541085510856108571085810859108601086110862108631086410865108661086710868108691087010871108721087310874108751087610877108781087910880108811088210883108841088510886108871088810889108901089110892108931089410895108961089710898108991090010901109021090310904109051090610907109081090910910109111091210913109141091510916109171091810919109201092110922109231092410925109261092710928109291093010931109321093310934109351093610937109381093910940109411094210943109441094510946109471094810949109501095110952109531095410955109561095710958109591096010961109621096310964109651096610967109681096910970109711097210973109741097510976109771097810979109801098110982109831098410985109861098710988109891099010991109921099310994109951099610997109981099911000110011100211003110041100511006110071100811009110101101111012110131101411015110161101711018110191102011021110221102311024110251102611027110281102911030110311103211033110341103511036110371103811039110401104111042110431104411045110461104711048110491105011051110521105311054110551105611057110581105911060110611106211063110641106511066110671106811069110701107111072110731107411075110761107711078110791108011081110821108311084110851108611087110881108911090110911109211093110941109511096110971109811099111001110111102111031110411105111061110711108111091111011111111121111311114111151111611117111181111911120111211112211123111241112511126111271112811129111301113111132111331113411135111361113711138111391114011141111421114311144111451114611147111481114911150111511115211153111541115511156111571115811159111601116111162111631116411165111661116711168111691117011171111721117311174111751117611177111781117911180111811118211183111841118511186111871118811189111901119111192111931119411195111961119711198111991120011201112021120311204112051120611207112081120911210112111121211213112141121511216112171121811219112201122111222112231122411225112261122711228112291123011231112321123311234112351123611237112381123911240112411124211243112441124511246112471124811249112501125111252112531125411255112561125711258112591126011261112621126311264112651126611267112681126911270112711127211273112741127511276112771127811279112801128111282112831128411285112861128711288112891129011291112921129311294112951129611297112981129911300113011130211303113041130511306113071130811309113101131111312113131131411315113161131711318113191132011321113221132311324113251132611327113281132911330113311133211333113341133511336113371133811339113401134111342113431134411345113461134711348113491135011351113521135311354113551135611357113581135911360113611136211363113641136511366113671136811369113701137111372113731137411375113761137711378113791138011381113821138311384113851138611387113881138911390113911139211393113941139511396113971139811399114001140111402114031140411405114061140711408114091141011411114121141311414114151141611417114181141911420114211142211423114241142511426114271142811429114301143111432114331143411435114361143711438114391144011441114421144311444114451144611447114481144911450114511145211453114541145511456114571145811459114601146111462114631146411465114661146711468114691147011471114721147311474114751147611477114781147911480114811148211483114841148511486114871148811489114901149111492114931149411495114961149711498114991150011501115021150311504115051150611507115081150911510115111151211513115141151511516115171151811519115201152111522115231152411525115261152711528115291153011531115321153311534115351153611537115381153911540115411154211543115441154511546115471154811549115501155111552115531155411555115561155711558115591156011561115621156311564115651156611567115681156911570115711157211573115741157511576115771157811579115801158111582115831158411585115861158711588115891159011591115921159311594115951159611597115981159911600116011160211603116041160511606116071160811609116101161111612116131161411615116161161711618116191162011621116221162311624116251162611627116281162911630116311163211633116341163511636116371163811639116401164111642116431164411645116461164711648116491165011651116521165311654116551165611657116581165911660116611166211663116641166511666116671166811669116701167111672116731167411675116761167711678116791168011681116821168311684116851168611687116881168911690116911169211693116941169511696116971169811699117001170111702117031170411705117061170711708117091171011711117121171311714117151171611717117181171911720117211172211723117241172511726117271172811729117301173111732117331173411735117361173711738117391174011741117421174311744117451174611747117481174911750117511175211753117541175511756117571175811759117601176111762117631176411765117661176711768117691177011771117721177311774117751177611777117781177911780117811178211783117841178511786117871178811789117901179111792117931179411795117961179711798117991180011801118021180311804118051180611807118081180911810118111181211813118141181511816118171181811819118201182111822118231182411825118261182711828118291183011831118321183311834118351183611837118381183911840118411184211843118441184511846118471184811849118501185111852118531185411855118561185711858118591186011861118621186311864118651186611867118681186911870118711187211873118741187511876118771187811879118801188111882118831188411885118861188711888118891189011891118921189311894118951189611897118981189911900119011190211903119041190511906119071190811909119101191111912119131191411915119161191711918119191192011921119221192311924119251192611927119281192911930119311193211933119341193511936119371193811939119401194111942119431194411945119461194711948119491195011951119521195311954119551195611957119581195911960119611196211963119641196511966119671196811969119701197111972119731197411975119761197711978119791198011981119821198311984119851198611987119881198911990119911199211993119941199511996119971199811999120001200112002120031200412005120061200712008120091201012011120121201312014120151201612017120181201912020120211202212023120241202512026120271202812029120301203112032120331203412035120361203712038120391204012041120421204312044120451204612047120481204912050120511205212053120541205512056120571205812059120601206112062120631206412065120661206712068120691207012071120721207312074120751207612077120781207912080120811208212083120841208512086120871208812089120901209112092120931209412095120961209712098120991210012101121021210312104121051210612107121081210912110121111211212113121141211512116121171211812119121201212112122121231212412125121261212712128121291213012131121321213312134121351213612137121381213912140121411214212143121441214512146121471214812149121501215112152121531215412155121561215712158121591216012161121621216312164121651216612167121681216912170121711217212173121741217512176121771217812179121801218112182121831218412185121861218712188121891219012191121921219312194121951219612197121981219912200122011220212203122041220512206122071220812209122101221112212122131221412215122161221712218122191222012221122221222312224122251222612227122281222912230122311223212233122341223512236122371223812239122401224112242122431224412245122461224712248122491225012251122521225312254122551225612257122581225912260122611226212263122641226512266122671226812269122701227112272122731227412275122761227712278122791228012281122821228312284122851228612287122881228912290122911229212293122941229512296122971229812299123001230112302123031230412305123061230712308123091231012311123121231312314123151231612317123181231912320123211232212323123241232512326123271232812329123301233112332123331233412335123361233712338123391234012341123421234312344123451234612347123481234912350123511235212353123541235512356123571235812359123601236112362123631236412365123661236712368123691237012371123721237312374123751237612377123781237912380123811238212383123841238512386123871238812389123901239112392123931239412395123961239712398123991240012401124021240312404124051240612407124081240912410124111241212413124141241512416124171241812419124201242112422124231242412425124261242712428124291243012431124321243312434124351243612437124381243912440124411244212443124441244512446124471244812449124501245112452124531245412455124561245712458124591246012461124621246312464124651246612467124681246912470124711247212473124741247512476124771247812479124801248112482124831248412485124861248712488124891249012491124921249312494124951249612497124981249912500125011250212503125041250512506125071250812509125101251112512125131251412515125161251712518125191252012521125221252312524125251252612527125281252912530125311253212533125341253512536125371253812539125401254112542125431254412545125461254712548125491255012551125521255312554125551255612557125581255912560125611256212563125641256512566125671256812569125701257112572125731257412575125761257712578125791258012581125821258312584125851258612587125881258912590125911259212593125941259512596125971259812599126001260112602126031260412605126061260712608126091261012611126121261312614126151261612617126181261912620126211262212623126241262512626126271262812629126301263112632126331263412635126361263712638126391264012641126421264312644126451264612647126481264912650126511265212653126541265512656126571265812659126601266112662126631266412665126661266712668126691267012671126721267312674126751267612677126781267912680126811268212683126841268512686126871268812689126901269112692126931269412695126961269712698126991270012701127021270312704127051270612707127081270912710127111271212713127141271512716127171271812719127201272112722127231272412725127261272712728127291273012731127321273312734127351273612737127381273912740127411274212743127441274512746127471274812749127501275112752127531275412755127561275712758127591276012761127621276312764127651276612767127681276912770127711277212773127741277512776127771277812779127801278112782127831278412785127861278712788127891279012791127921279312794127951279612797127981279912800128011280212803128041280512806128071280812809128101281112812128131281412815128161281712818128191282012821128221282312824128251282612827128281282912830128311283212833128341283512836128371283812839128401284112842128431284412845128461284712848128491285012851128521285312854128551285612857128581285912860128611286212863128641286512866128671286812869128701287112872128731287412875128761287712878128791288012881128821288312884128851288612887128881288912890128911289212893128941289512896128971289812899129001290112902129031290412905129061290712908129091291012911129121291312914129151291612917129181291912920129211292212923129241292512926129271292812929129301293112932129331293412935129361293712938129391294012941129421294312944129451294612947129481294912950129511295212953129541295512956129571295812959129601296112962129631296412965129661296712968129691297012971129721297312974129751297612977129781297912980129811298212983129841298512986129871298812989129901299112992129931299412995129961299712998129991300013001130021300313004130051300613007130081300913010130111301213013130141301513016130171301813019130201302113022130231302413025130261302713028130291303013031130321303313034130351303613037130381303913040130411304213043130441304513046130471304813049130501305113052130531305413055130561305713058130591306013061130621306313064130651306613067130681306913070130711307213073130741307513076130771307813079130801308113082130831308413085130861308713088130891309013091130921309313094130951309613097130981309913100131011310213103131041310513106131071310813109131101311113112131131311413115131161311713118131191312013121131221312313124131251312613127131281312913130131311313213133131341313513136131371313813139131401314113142131431314413145131461314713148131491315013151131521315313154131551315613157131581315913160131611316213163131641316513166131671316813169131701317113172131731317413175131761317713178131791318013181131821318313184131851318613187131881318913190131911319213193131941319513196131971319813199132001320113202132031320413205132061320713208132091321013211132121321313214132151321613217132181321913220132211322213223132241322513226132271322813229132301323113232132331323413235132361323713238132391324013241132421324313244132451324613247132481324913250132511325213253132541325513256132571325813259132601326113262132631326413265132661326713268132691327013271132721327313274132751327613277132781327913280132811328213283132841328513286132871328813289132901329113292132931329413295132961329713298132991330013301133021330313304133051330613307133081330913310133111331213313133141331513316133171331813319133201332113322133231332413325133261332713328133291333013331133321333313334133351333613337133381333913340133411334213343133441334513346133471334813349133501335113352133531335413355133561335713358133591336013361133621336313364133651336613367133681336913370133711337213373133741337513376133771337813379133801338113382133831338413385133861338713388133891339013391133921339313394133951339613397133981339913400134011340213403134041340513406134071340813409134101341113412134131341413415134161341713418134191342013421134221342313424134251342613427134281342913430134311343213433134341343513436134371343813439134401344113442134431344413445134461344713448134491345013451134521345313454134551345613457134581345913460134611346213463134641346513466134671346813469134701347113472134731347413475134761347713478134791348013481134821348313484134851348613487134881348913490134911349213493134941349513496134971349813499135001350113502135031350413505135061350713508135091351013511135121351313514135151351613517135181351913520135211352213523135241352513526135271352813529135301353113532135331353413535135361353713538135391354013541135421354313544135451354613547135481354913550135511355213553135541355513556135571355813559135601356113562135631356413565135661356713568135691357013571135721357313574135751357613577135781357913580135811358213583135841358513586135871358813589135901359113592135931359413595135961359713598135991360013601136021360313604136051360613607136081360913610136111361213613136141361513616136171361813619136201362113622136231362413625136261362713628136291363013631136321363313634136351363613637136381363913640136411364213643136441364513646136471364813649136501365113652136531365413655136561365713658136591366013661136621366313664136651366613667136681366913670136711367213673136741367513676136771367813679136801368113682136831368413685136861368713688136891369013691136921369313694136951369613697136981369913700137011370213703137041370513706137071370813709137101371113712137131371413715137161371713718137191372013721137221372313724137251372613727137281372913730137311373213733137341373513736137371373813739137401374113742137431374413745137461374713748137491375013751137521375313754137551375613757137581375913760137611376213763137641376513766137671376813769137701377113772137731377413775137761377713778137791378013781137821378313784137851378613787137881378913790137911379213793137941379513796137971379813799138001380113802138031380413805138061380713808138091381013811138121381313814138151381613817138181381913820138211382213823138241382513826138271382813829138301383113832138331383413835138361383713838138391384013841138421384313844138451384613847138481384913850138511385213853138541385513856138571385813859138601386113862138631386413865138661386713868138691387013871138721387313874138751387613877138781387913880138811388213883138841388513886138871388813889138901389113892138931389413895138961389713898138991390013901139021390313904139051390613907139081390913910139111391213913139141391513916139171391813919139201392113922139231392413925139261392713928139291393013931139321393313934139351393613937139381393913940139411394213943139441394513946139471394813949139501395113952139531395413955139561395713958139591396013961139621396313964139651396613967139681396913970139711397213973139741397513976139771397813979139801398113982139831398413985139861398713988139891399013991139921399313994139951399613997139981399914000140011400214003140041400514006140071400814009140101401114012140131401414015140161401714018140191402014021140221402314024140251402614027140281402914030140311403214033140341403514036140371403814039140401404114042140431404414045140461404714048140491405014051140521405314054140551405614057140581405914060140611406214063140641406514066140671406814069140701407114072140731407414075140761407714078140791408014081140821408314084140851408614087140881408914090140911409214093140941409514096140971409814099141001410114102141031410414105141061410714108141091411014111141121411314114141151411614117141181411914120141211412214123141241412514126141271412814129141301413114132141331413414135141361413714138141391414014141141421414314144141451414614147141481414914150141511415214153141541415514156141571415814159141601416114162141631416414165141661416714168141691417014171141721417314174141751417614177141781417914180141811418214183141841418514186141871418814189141901419114192141931419414195141961419714198141991420014201142021420314204142051420614207142081420914210142111421214213142141421514216142171421814219142201422114222142231422414225142261422714228142291423014231142321423314234142351423614237142381423914240142411424214243142441424514246142471424814249142501425114252142531425414255142561425714258142591426014261142621426314264142651426614267142681426914270142711427214273142741427514276142771427814279142801428114282142831428414285142861428714288142891429014291142921429314294142951429614297142981429914300143011430214303143041430514306143071430814309143101431114312143131431414315143161431714318143191432014321143221432314324143251432614327143281432914330143311433214333143341433514336143371433814339143401434114342143431434414345143461434714348143491435014351143521435314354143551435614357143581435914360143611436214363143641436514366143671436814369143701437114372143731437414375143761437714378143791438014381143821438314384143851438614387143881438914390143911439214393143941439514396143971439814399144001440114402144031440414405144061440714408144091441014411144121441314414144151441614417144181441914420144211442214423144241442514426144271442814429144301443114432144331443414435144361443714438144391444014441144421444314444144451444614447144481444914450144511445214453144541445514456144571445814459144601446114462144631446414465144661446714468144691447014471144721447314474144751447614477144781447914480144811448214483144841448514486144871448814489144901449114492144931449414495144961449714498144991450014501145021450314504145051450614507145081450914510145111451214513145141451514516145171451814519145201452114522145231452414525145261452714528145291453014531145321453314534145351453614537145381453914540145411454214543145441454514546145471454814549145501455114552145531455414555145561455714558145591456014561145621456314564145651456614567145681456914570145711457214573145741457514576145771457814579145801458114582145831458414585145861458714588145891459014591145921459314594145951459614597145981459914600146011460214603146041460514606146071460814609146101461114612146131461414615146161461714618146191462014621146221462314624146251462614627146281462914630146311463214633146341463514636146371463814639146401464114642146431464414645146461464714648146491465014651146521465314654146551465614657146581465914660146611466214663146641466514666146671466814669146701467114672146731467414675146761467714678146791468014681146821468314684146851468614687146881468914690146911469214693146941469514696146971469814699147001470114702147031470414705147061470714708147091471014711147121471314714147151471614717147181471914720147211472214723147241472514726147271472814729147301473114732147331473414735147361473714738147391474014741147421474314744147451474614747147481474914750147511475214753147541475514756147571475814759147601476114762147631476414765147661476714768147691477014771147721477314774147751477614777147781477914780147811478214783147841478514786147871478814789147901479114792147931479414795147961479714798147991480014801148021480314804148051480614807148081480914810148111481214813148141481514816148171481814819148201482114822148231482414825148261482714828148291483014831148321483314834148351483614837148381483914840148411484214843148441484514846148471484814849148501485114852148531485414855148561485714858148591486014861148621486314864148651486614867148681486914870148711487214873148741487514876148771487814879148801488114882148831488414885148861488714888148891489014891148921489314894148951489614897148981489914900149011490214903149041490514906149071490814909149101491114912149131491414915149161491714918149191492014921149221492314924149251492614927149281492914930149311493214933149341493514936149371493814939149401494114942149431494414945149461494714948149491495014951149521495314954149551495614957149581495914960149611496214963149641496514966149671496814969149701497114972149731497414975149761497714978149791498014981149821498314984149851498614987149881498914990149911499214993149941499514996149971499814999150001500115002150031500415005150061500715008150091501015011150121501315014150151501615017150181501915020150211502215023150241502515026150271502815029150301503115032150331503415035150361503715038150391504015041150421504315044150451504615047150481504915050150511505215053150541505515056150571505815059150601506115062150631506415065150661506715068150691507015071150721507315074150751507615077150781507915080150811508215083150841508515086150871508815089150901509115092150931509415095150961509715098150991510015101151021510315104151051510615107151081510915110151111511215113151141511515116151171511815119151201512115122151231512415125151261512715128151291513015131151321513315134151351513615137151381513915140151411514215143151441514515146151471514815149151501515115152151531515415155151561515715158151591516015161151621516315164151651516615167151681516915170151711517215173151741517515176151771517815179151801518115182151831518415185151861518715188151891519015191151921519315194151951519615197151981519915200152011520215203152041520515206152071520815209152101521115212152131521415215152161521715218152191522015221152221522315224152251522615227152281522915230152311523215233152341523515236152371523815239152401524115242152431524415245152461524715248152491525015251152521525315254152551525615257152581525915260152611526215263152641526515266152671526815269152701527115272152731527415275152761527715278152791528015281152821528315284152851528615287152881528915290152911529215293152941529515296152971529815299153001530115302153031530415305153061530715308153091531015311153121531315314153151531615317153181531915320153211532215323153241532515326153271532815329153301533115332153331533415335153361533715338153391534015341153421534315344153451534615347153481534915350153511535215353153541535515356153571535815359153601536115362153631536415365153661536715368153691537015371153721537315374153751537615377153781537915380153811538215383153841538515386153871538815389153901539115392153931539415395153961539715398153991540015401154021540315404154051540615407154081540915410154111541215413154141541515416154171541815419154201542115422154231542415425154261542715428154291543015431154321543315434154351543615437154381543915440154411544215443154441544515446154471544815449154501545115452154531545415455154561545715458154591546015461154621546315464154651546615467154681546915470154711547215473154741547515476154771547815479154801548115482154831548415485154861548715488154891549015491154921549315494154951549615497154981549915500155011550215503155041550515506155071550815509155101551115512155131551415515155161551715518155191552015521155221552315524155251552615527155281552915530155311553215533155341553515536155371553815539155401554115542155431554415545155461554715548155491555015551155521555315554155551555615557155581555915560155611556215563155641556515566155671556815569155701557115572155731557415575155761557715578155791558015581155821558315584155851558615587155881558915590155911559215593155941559515596155971559815599156001560115602156031560415605156061560715608156091561015611156121561315614156151561615617156181561915620156211562215623156241562515626156271562815629156301563115632156331563415635156361563715638156391564015641156421564315644156451564615647156481564915650156511565215653156541565515656156571565815659156601566115662156631566415665156661566715668156691567015671156721567315674156751567615677156781567915680156811568215683156841568515686156871568815689156901569115692156931569415695156961569715698156991570015701157021570315704157051570615707157081570915710157111571215713157141571515716157171571815719157201572115722157231572415725157261572715728157291573015731157321573315734157351573615737157381573915740157411574215743157441574515746157471574815749157501575115752157531575415755157561575715758157591576015761157621576315764157651576615767157681576915770157711577215773157741577515776157771577815779157801578115782157831578415785157861578715788157891579015791157921579315794157951579615797157981579915800158011580215803158041580515806158071580815809158101581115812158131581415815158161581715818158191582015821158221582315824158251582615827158281582915830158311583215833158341583515836158371583815839158401584115842158431584415845158461584715848158491585015851158521585315854158551585615857158581585915860158611586215863158641586515866158671586815869158701587115872158731587415875158761587715878158791588015881158821588315884158851588615887158881588915890158911589215893158941589515896158971589815899159001590115902159031590415905159061590715908159091591015911159121591315914159151591615917159181591915920159211592215923159241592515926159271592815929159301593115932159331593415935159361593715938159391594015941159421594315944159451594615947159481594915950159511595215953159541595515956159571595815959159601596115962159631596415965159661596715968159691597015971159721597315974159751597615977159781597915980159811598215983159841598515986159871598815989159901599115992159931599415995159961599715998159991600016001160021600316004160051600616007160081600916010160111601216013160141601516016160171601816019160201602116022160231602416025160261602716028160291603016031160321603316034160351603616037160381603916040160411604216043160441604516046160471604816049160501605116052160531605416055160561605716058160591606016061160621606316064160651606616067160681606916070160711607216073160741607516076160771607816079160801608116082160831608416085160861608716088160891609016091160921609316094160951609616097160981609916100161011610216103161041610516106161071610816109161101611116112161131611416115161161611716118161191612016121161221612316124161251612616127161281612916130161311613216133161341613516136161371613816139161401614116142161431614416145161461614716148161491615016151161521615316154161551615616157161581615916160161611616216163161641616516166161671616816169161701617116172161731617416175161761617716178161791618016181161821618316184161851618616187161881618916190161911619216193161941619516196161971619816199162001620116202162031620416205162061620716208162091621016211162121621316214162151621616217162181621916220162211622216223162241622516226162271622816229162301623116232162331623416235162361623716238162391624016241162421624316244162451624616247162481624916250162511625216253162541625516256162571625816259162601626116262162631626416265162661626716268162691627016271162721627316274162751627616277162781627916280162811628216283162841628516286162871628816289162901629116292162931629416295162961629716298162991630016301163021630316304163051630616307163081630916310163111631216313163141631516316163171631816319163201632116322163231632416325163261632716328163291633016331163321633316334163351633616337163381633916340163411634216343163441634516346163471634816349163501635116352163531635416355163561635716358163591636016361163621636316364163651636616367163681636916370163711637216373163741637516376163771637816379163801638116382163831638416385163861638716388163891639016391163921639316394163951639616397163981639916400164011640216403164041640516406164071640816409164101641116412164131641416415164161641716418164191642016421164221642316424164251642616427164281642916430164311643216433164341643516436164371643816439164401644116442164431644416445164461644716448164491645016451164521645316454164551645616457164581645916460164611646216463164641646516466164671646816469164701647116472164731647416475164761647716478164791648016481164821648316484164851648616487164881648916490164911649216493164941649516496164971649816499165001650116502165031650416505165061650716508165091651016511165121651316514165151651616517165181651916520165211652216523165241652516526165271652816529165301653116532165331653416535165361653716538165391654016541165421654316544165451654616547165481654916550165511655216553165541655516556165571655816559165601656116562165631656416565165661656716568165691657016571165721657316574165751657616577165781657916580165811658216583165841658516586165871658816589165901659116592165931659416595165961659716598165991660016601166021660316604166051660616607166081660916610166111661216613166141661516616166171661816619166201662116622166231662416625166261662716628166291663016631166321663316634166351663616637166381663916640166411664216643166441664516646166471664816649166501665116652166531665416655166561665716658166591666016661166621666316664166651666616667166681666916670166711667216673166741667516676166771667816679166801668116682166831668416685166861668716688166891669016691166921669316694166951669616697166981669916700167011670216703167041670516706167071670816709167101671116712167131671416715167161671716718167191672016721167221672316724167251672616727167281672916730167311673216733167341673516736167371673816739167401674116742167431674416745167461674716748167491675016751167521675316754167551675616757167581675916760167611676216763167641676516766167671676816769167701677116772167731677416775167761677716778167791678016781167821678316784167851678616787167881678916790167911679216793167941679516796167971679816799168001680116802168031680416805168061680716808168091681016811168121681316814168151681616817168181681916820168211682216823168241682516826168271682816829168301683116832168331683416835168361683716838168391684016841168421684316844168451684616847168481684916850168511685216853168541685516856168571685816859168601686116862168631686416865168661686716868168691687016871168721687316874168751687616877168781687916880168811688216883168841688516886168871688816889168901689116892168931689416895168961689716898168991690016901169021690316904169051690616907169081690916910169111691216913169141691516916169171691816919169201692116922169231692416925169261692716928169291693016931169321693316934169351693616937169381693916940169411694216943169441694516946169471694816949169501695116952169531695416955169561695716958169591696016961169621696316964169651696616967169681696916970169711697216973169741697516976169771697816979169801698116982169831698416985169861698716988169891699016991169921699316994169951699616997169981699917000170011700217003170041700517006170071700817009170101701117012170131701417015170161701717018170191702017021170221702317024170251702617027170281702917030170311703217033170341703517036170371703817039170401704117042170431704417045170461704717048170491705017051170521705317054170551705617057170581705917060170611706217063170641706517066170671706817069170701707117072170731707417075170761707717078170791708017081170821708317084170851708617087170881708917090170911709217093170941709517096170971709817099171001710117102171031710417105171061710717108171091711017111171121711317114171151711617117171181711917120171211712217123171241712517126171271712817129171301713117132171331713417135171361713717138171391714017141171421714317144171451714617147171481714917150171511715217153171541715517156171571715817159171601716117162171631716417165171661716717168171691717017171171721717317174171751717617177171781717917180171811718217183171841718517186171871718817189171901719117192171931719417195171961719717198171991720017201172021720317204172051720617207172081720917210172111721217213172141721517216172171721817219172201722117222172231722417225172261722717228172291723017231172321723317234172351723617237172381723917240172411724217243172441724517246172471724817249172501725117252172531725417255172561725717258172591726017261172621726317264172651726617267172681726917270172711727217273172741727517276172771727817279172801728117282172831728417285172861728717288172891729017291172921729317294172951729617297172981729917300173011730217303173041730517306173071730817309173101731117312173131731417315173161731717318173191732017321173221732317324173251732617327173281732917330173311733217333173341733517336173371733817339173401734117342173431734417345173461734717348173491735017351173521735317354173551735617357173581735917360173611736217363173641736517366173671736817369173701737117372173731737417375173761737717378173791738017381173821738317384173851738617387173881738917390173911739217393173941739517396173971739817399174001740117402174031740417405174061740717408174091741017411174121741317414174151741617417174181741917420174211742217423174241742517426174271742817429174301743117432174331743417435174361743717438174391744017441174421744317444174451744617447174481744917450174511745217453174541745517456174571745817459174601746117462174631746417465174661746717468174691747017471174721747317474174751747617477174781747917480174811748217483174841748517486174871748817489174901749117492174931749417495174961749717498174991750017501175021750317504175051750617507175081750917510175111751217513175141751517516175171751817519175201752117522175231752417525175261752717528175291753017531175321753317534175351753617537175381753917540175411754217543175441754517546175471754817549175501755117552175531755417555175561755717558175591756017561175621756317564175651756617567175681756917570175711757217573175741757517576175771757817579175801758117582175831758417585175861758717588175891759017591175921759317594175951759617597175981759917600176011760217603176041760517606176071760817609176101761117612176131761417615176161761717618176191762017621176221762317624176251762617627176281762917630176311763217633176341763517636176371763817639176401764117642176431764417645176461764717648176491765017651176521765317654176551765617657176581765917660176611766217663176641766517666176671766817669176701767117672176731767417675176761767717678176791768017681176821768317684176851768617687176881768917690176911769217693176941769517696176971769817699177001770117702177031770417705177061770717708177091771017711177121771317714177151771617717177181771917720177211772217723177241772517726177271772817729177301773117732177331773417735177361773717738177391774017741177421774317744177451774617747177481774917750177511775217753177541775517756177571775817759177601776117762177631776417765177661776717768177691777017771177721777317774177751777617777177781777917780177811778217783177841778517786177871778817789177901779117792177931779417795177961779717798177991780017801178021780317804178051780617807178081780917810178111781217813178141781517816178171781817819178201782117822178231782417825178261782717828178291783017831178321783317834178351783617837178381783917840178411784217843178441784517846178471784817849178501785117852178531785417855178561785717858178591786017861178621786317864178651786617867178681786917870178711787217873178741787517876178771787817879178801788117882178831788417885178861788717888178891789017891178921789317894178951789617897178981789917900179011790217903179041790517906179071790817909179101791117912179131791417915179161791717918179191792017921179221792317924179251792617927179281792917930179311793217933179341793517936179371793817939179401794117942179431794417945179461794717948179491795017951179521795317954179551795617957179581795917960179611796217963179641796517966179671796817969179701797117972179731797417975179761797717978179791798017981179821798317984179851798617987179881798917990179911799217993179941799517996179971799817999180001800118002180031800418005180061800718008180091801018011180121801318014180151801618017180181801918020180211802218023180241802518026180271802818029180301803118032180331803418035180361803718038180391804018041180421804318044180451804618047180481804918050180511805218053180541805518056180571805818059180601806118062180631806418065180661806718068180691807018071180721807318074180751807618077180781807918080180811808218083180841808518086180871808818089180901809118092180931809418095180961809718098180991810018101181021810318104181051810618107181081810918110181111811218113181141811518116181171811818119181201812118122181231812418125181261812718128181291813018131181321813318134181351813618137181381813918140181411814218143181441814518146181471814818149181501815118152181531815418155181561815718158181591816018161181621816318164181651816618167181681816918170181711817218173181741817518176181771817818179181801818118182181831818418185181861818718188181891819018191181921819318194181951819618197181981819918200182011820218203182041820518206182071820818209182101821118212182131821418215182161821718218182191822018221182221822318224182251822618227182281822918230182311823218233182341823518236182371823818239182401824118242182431824418245182461824718248182491825018251182521825318254182551825618257182581825918260182611826218263182641826518266182671826818269182701827118272182731827418275182761827718278182791828018281182821828318284182851828618287182881828918290182911829218293182941829518296182971829818299183001830118302183031830418305183061830718308183091831018311183121831318314183151831618317183181831918320183211832218323183241832518326183271832818329183301833118332183331833418335183361833718338183391834018341183421834318344183451834618347183481834918350183511835218353183541835518356183571835818359183601836118362183631836418365183661836718368183691837018371183721837318374183751837618377183781837918380183811838218383183841838518386183871838818389183901839118392183931839418395183961839718398183991840018401184021840318404184051840618407184081840918410184111841218413184141841518416184171841818419184201842118422184231842418425184261842718428184291843018431184321843318434184351843618437184381843918440184411844218443184441844518446184471844818449184501845118452184531845418455184561845718458184591846018461184621846318464184651846618467184681846918470184711847218473184741847518476184771847818479184801848118482184831848418485184861848718488184891849018491184921849318494184951849618497184981849918500185011850218503185041850518506185071850818509185101851118512185131851418515185161851718518185191852018521185221852318524185251852618527185281852918530185311853218533185341853518536185371853818539185401854118542185431854418545185461854718548185491855018551185521855318554185551855618557185581855918560185611856218563185641856518566185671856818569185701857118572185731857418575185761857718578185791858018581185821858318584185851858618587185881858918590185911859218593185941859518596185971859818599186001860118602186031860418605186061860718608186091861018611186121861318614186151861618617186181861918620186211862218623186241862518626186271862818629186301863118632186331863418635186361863718638186391864018641186421864318644186451864618647186481864918650186511865218653186541865518656186571865818659186601866118662186631866418665186661866718668186691867018671186721867318674186751867618677186781867918680186811868218683186841868518686186871868818689186901869118692186931869418695186961869718698186991870018701187021870318704187051870618707187081870918710187111871218713187141871518716187171871818719187201872118722187231872418725187261872718728187291873018731187321873318734187351873618737187381873918740187411874218743187441874518746187471874818749187501875118752187531875418755187561875718758187591876018761187621876318764187651876618767187681876918770187711877218773187741877518776187771877818779187801878118782187831878418785187861878718788187891879018791187921879318794187951879618797187981879918800188011880218803188041880518806188071880818809188101881118812188131881418815188161881718818188191882018821188221882318824188251882618827188281882918830188311883218833188341883518836188371883818839188401884118842188431884418845188461884718848188491885018851188521885318854188551885618857188581885918860188611886218863188641886518866188671886818869188701887118872188731887418875188761887718878188791888018881188821888318884188851888618887188881888918890188911889218893188941889518896188971889818899189001890118902189031890418905189061890718908189091891018911189121891318914189151891618917189181891918920189211892218923189241892518926189271892818929189301893118932189331893418935189361893718938189391894018941189421894318944189451894618947189481894918950189511895218953189541895518956189571895818959189601896118962189631896418965189661896718968189691897018971189721897318974189751897618977189781897918980189811898218983189841898518986189871898818989189901899118992189931899418995189961899718998189991900019001190021900319004190051900619007190081900919010190111901219013190141901519016190171901819019190201902119022190231902419025190261902719028190291903019031190321903319034190351903619037190381903919040190411904219043190441904519046190471904819049190501905119052190531905419055190561905719058190591906019061190621906319064190651906619067190681906919070190711907219073190741907519076190771907819079190801908119082190831908419085190861908719088190891909019091190921909319094190951909619097190981909919100191011910219103191041910519106191071910819109191101911119112191131911419115191161911719118191191912019121191221912319124191251912619127191281912919130191311913219133191341913519136191371913819139191401914119142191431914419145191461914719148191491915019151191521915319154191551915619157191581915919160191611916219163191641916519166191671916819169191701917119172191731917419175191761917719178191791918019181191821918319184191851918619187191881918919190191911919219193191941919519196191971919819199192001920119202192031920419205192061920719208192091921019211192121921319214192151921619217192181921919220192211922219223192241922519226192271922819229192301923119232192331923419235192361923719238192391924019241192421924319244192451924619247192481924919250192511925219253192541925519256192571925819259192601926119262192631926419265192661926719268192691927019271192721927319274192751927619277192781927919280192811928219283192841928519286192871928819289192901929119292192931929419295192961929719298192991930019301193021930319304193051930619307193081930919310193111931219313193141931519316193171931819319193201932119322193231932419325193261932719328193291933019331193321933319334193351933619337193381933919340193411934219343193441934519346193471934819349193501935119352193531935419355193561935719358193591936019361193621936319364193651936619367193681936919370193711937219373193741937519376193771937819379193801938119382193831938419385193861938719388193891939019391193921939319394193951939619397193981939919400194011940219403194041940519406194071940819409194101941119412194131941419415194161941719418194191942019421194221942319424194251942619427194281942919430194311943219433194341943519436194371943819439194401944119442194431944419445194461944719448194491945019451194521945319454194551945619457194581945919460194611946219463194641946519466194671946819469194701947119472194731947419475194761947719478194791948019481194821948319484194851948619487194881948919490194911949219493194941949519496194971949819499195001950119502195031950419505195061950719508195091951019511195121951319514195151951619517195181951919520195211952219523195241952519526195271952819529195301953119532195331953419535195361953719538195391954019541195421954319544195451954619547195481954919550195511955219553195541955519556195571955819559195601956119562195631956419565195661956719568195691957019571195721957319574195751957619577195781957919580195811958219583195841958519586195871958819589195901959119592195931959419595195961959719598195991960019601196021960319604196051960619607196081960919610196111961219613196141961519616196171961819619196201962119622196231962419625196261962719628196291963019631196321963319634196351963619637196381963919640196411964219643196441964519646196471964819649196501965119652196531965419655196561965719658196591966019661196621966319664196651966619667196681966919670196711967219673196741967519676196771967819679196801968119682196831968419685196861968719688196891969019691196921969319694196951969619697196981969919700197011970219703197041970519706197071970819709197101971119712197131971419715197161971719718197191972019721197221972319724197251972619727197281972919730197311973219733197341973519736197371973819739197401974119742197431974419745197461974719748197491975019751197521975319754197551975619757197581975919760197611976219763197641976519766197671976819769197701977119772197731977419775197761977719778197791978019781197821978319784197851978619787197881978919790197911979219793197941979519796197971979819799198001980119802198031980419805198061980719808198091981019811198121981319814198151981619817198181981919820198211982219823198241982519826198271982819829198301983119832198331983419835198361983719838198391984019841198421984319844198451984619847198481984919850198511985219853198541985519856198571985819859198601986119862198631986419865198661986719868198691987019871198721987319874198751987619877198781987919880198811988219883198841988519886198871988819889198901989119892198931989419895198961989719898198991990019901199021990319904199051990619907199081990919910199111991219913199141991519916199171991819919199201992119922199231992419925199261992719928199291993019931199321993319934199351993619937199381993919940199411994219943199441994519946199471994819949199501995119952199531995419955199561995719958199591996019961199621996319964199651996619967199681996919970199711997219973199741997519976199771997819979199801998119982199831998419985199861998719988199891999019991199921999319994199951999619997199981999920000200012000220003200042000520006200072000820009200102001120012200132001420015200162001720018200192002020021200222002320024200252002620027200282002920030200312003220033200342003520036200372003820039200402004120042200432004420045200462004720048200492005020051200522005320054200552005620057200582005920060200612006220063200642006520066200672006820069200702007120072200732007420075200762007720078200792008020081200822008320084200852008620087200882008920090200912009220093200942009520096200972009820099201002010120102201032010420105201062010720108201092011020111201122011320114201152011620117201182011920120201212012220123201242012520126201272012820129201302013120132201332013420135201362013720138201392014020141201422014320144201452014620147201482014920150201512015220153201542015520156201572015820159201602016120162201632016420165201662016720168201692017020171201722017320174201752017620177201782017920180201812018220183201842018520186201872018820189201902019120192201932019420195201962019720198201992020020201202022020320204202052020620207202082020920210202112021220213202142021520216202172021820219202202022120222202232022420225202262022720228202292023020231202322023320234202352023620237202382023920240202412024220243202442024520246202472024820249202502025120252202532025420255202562025720258202592026020261202622026320264202652026620267202682026920270202712027220273202742027520276202772027820279202802028120282202832028420285202862028720288202892029020291202922029320294202952029620297202982029920300203012030220303203042030520306203072030820309203102031120312203132031420315203162031720318203192032020321203222032320324203252032620327203282032920330203312033220333203342033520336203372033820339203402034120342203432034420345203462034720348203492035020351203522035320354203552035620357203582035920360203612036220363203642036520366203672036820369203702037120372203732037420375203762037720378203792038020381203822038320384203852038620387203882038920390203912039220393203942039520396203972039820399204002040120402204032040420405204062040720408204092041020411204122041320414204152041620417204182041920420204212042220423204242042520426204272042820429204302043120432204332043420435204362043720438204392044020441204422044320444204452044620447204482044920450204512045220453204542045520456204572045820459204602046120462204632046420465204662046720468204692047020471204722047320474204752047620477204782047920480204812048220483204842048520486204872048820489204902049120492204932049420495204962049720498204992050020501205022050320504205052050620507205082050920510205112051220513205142051520516205172051820519205202052120522205232052420525205262052720528205292053020531205322053320534205352053620537205382053920540205412054220543205442054520546205472054820549205502055120552205532055420555205562055720558205592056020561205622056320564205652056620567205682056920570205712057220573205742057520576205772057820579205802058120582205832058420585205862058720588205892059020591205922059320594205952059620597205982059920600206012060220603206042060520606206072060820609206102061120612206132061420615206162061720618206192062020621206222062320624206252062620627206282062920630206312063220633206342063520636206372063820639206402064120642206432064420645206462064720648206492065020651206522065320654206552065620657206582065920660206612066220663206642066520666206672066820669206702067120672206732067420675206762067720678206792068020681206822068320684206852068620687206882068920690206912069220693206942069520696206972069820699207002070120702207032070420705207062070720708207092071020711207122071320714207152071620717207182071920720207212072220723207242072520726207272072820729207302073120732207332073420735207362073720738207392074020741207422074320744207452074620747207482074920750207512075220753207542075520756207572075820759207602076120762207632076420765207662076720768207692077020771207722077320774207752077620777207782077920780207812078220783207842078520786207872078820789207902079120792207932079420795207962079720798207992080020801208022080320804208052080620807208082080920810208112081220813208142081520816208172081820819208202082120822208232082420825208262082720828208292083020831208322083320834208352083620837208382083920840208412084220843208442084520846208472084820849208502085120852208532085420855208562085720858208592086020861208622086320864208652086620867208682086920870208712087220873208742087520876208772087820879208802088120882208832088420885208862088720888208892089020891208922089320894208952089620897208982089920900209012090220903209042090520906209072090820909209102091120912209132091420915209162091720918209192092020921209222092320924209252092620927209282092920930209312093220933209342093520936209372093820939209402094120942209432094420945209462094720948209492095020951209522095320954209552095620957209582095920960209612096220963209642096520966209672096820969209702097120972209732097420975209762097720978209792098020981209822098320984209852098620987209882098920990209912099220993209942099520996209972099820999210002100121002210032100421005210062100721008210092101021011210122101321014210152101621017210182101921020210212102221023210242102521026210272102821029210302103121032210332103421035210362103721038210392104021041210422104321044210452104621047210482104921050210512105221053210542105521056210572105821059210602106121062210632106421065210662106721068210692107021071210722107321074210752107621077210782107921080210812108221083210842108521086210872108821089210902109121092210932109421095210962109721098210992110021101211022110321104211052110621107211082110921110211112111221113211142111521116211172111821119211202112121122211232112421125211262112721128211292113021131211322113321134211352113621137211382113921140211412114221143211442114521146211472114821149211502115121152211532115421155211562115721158211592116021161211622116321164211652116621167211682116921170211712117221173211742117521176211772117821179211802118121182211832118421185211862118721188211892119021191211922119321194211952119621197211982119921200212012120221203212042120521206212072120821209212102121121212212132121421215212162121721218212192122021221212222122321224212252122621227212282122921230212312123221233212342123521236212372123821239212402124121242212432124421245212462124721248212492125021251212522125321254212552125621257212582125921260212612126221263212642126521266212672126821269212702127121272212732127421275212762127721278212792128021281212822128321284212852128621287212882128921290212912129221293212942129521296212972129821299213002130121302213032130421305213062130721308213092131021311213122131321314213152131621317213182131921320213212132221323213242132521326213272132821329213302133121332213332133421335213362133721338213392134021341213422134321344213452134621347213482134921350213512135221353213542135521356213572135821359213602136121362213632136421365213662136721368213692137021371213722137321374213752137621377213782137921380213812138221383213842138521386213872138821389213902139121392213932139421395213962139721398213992140021401214022140321404214052140621407214082140921410214112141221413214142141521416214172141821419214202142121422214232142421425214262142721428214292143021431214322143321434214352143621437214382143921440214412144221443214442144521446214472144821449214502145121452214532145421455214562145721458214592146021461214622146321464214652146621467214682146921470214712147221473214742147521476214772147821479214802148121482214832148421485214862148721488214892149021491214922149321494214952149621497214982149921500215012150221503215042150521506215072150821509215102151121512215132151421515215162151721518215192152021521215222152321524215252152621527215282152921530215312153221533215342153521536215372153821539215402154121542215432154421545215462154721548215492155021551215522155321554215552155621557215582155921560215612156221563215642156521566215672156821569215702157121572215732157421575215762157721578215792158021581215822158321584215852158621587215882158921590215912159221593215942159521596215972159821599216002160121602216032160421605216062160721608216092161021611216122161321614216152161621617216182161921620216212162221623216242162521626216272162821629216302163121632216332163421635216362163721638216392164021641216422164321644216452164621647216482164921650216512165221653216542165521656216572165821659216602166121662216632166421665216662166721668216692167021671216722167321674216752167621677216782167921680216812168221683216842168521686216872168821689216902169121692216932169421695216962169721698216992170021701217022170321704217052170621707217082170921710217112171221713217142171521716217172171821719217202172121722217232172421725217262172721728217292173021731217322173321734217352173621737217382173921740217412174221743217442174521746217472174821749217502175121752217532175421755217562175721758217592176021761217622176321764217652176621767217682176921770217712177221773217742177521776217772177821779217802178121782217832178421785217862178721788217892179021791217922179321794217952179621797217982179921800218012180221803218042180521806218072180821809218102181121812218132181421815218162181721818218192182021821218222182321824218252182621827218282182921830218312183221833218342183521836218372183821839218402184121842218432184421845218462184721848218492185021851218522185321854218552185621857218582185921860218612186221863218642186521866218672186821869218702187121872218732187421875218762187721878218792188021881218822188321884218852188621887218882188921890218912189221893218942189521896218972189821899219002190121902219032190421905219062190721908219092191021911219122191321914219152191621917219182191921920219212192221923219242192521926219272192821929219302193121932219332193421935219362193721938219392194021941219422194321944219452194621947219482194921950219512195221953219542195521956219572195821959219602196121962219632196421965219662196721968219692197021971219722197321974219752197621977219782197921980219812198221983219842198521986219872198821989219902199121992219932199421995219962199721998219992200022001220022200322004220052200622007220082200922010220112201222013220142201522016220172201822019220202202122022220232202422025220262202722028220292203022031220322203322034220352203622037220382203922040220412204222043220442204522046220472204822049220502205122052220532205422055220562205722058220592206022061220622206322064220652206622067220682206922070220712207222073220742207522076220772207822079220802208122082220832208422085220862208722088220892209022091220922209322094220952209622097220982209922100221012210222103221042210522106221072210822109221102211122112221132211422115221162211722118221192212022121221222212322124221252212622127221282212922130221312213222133221342213522136221372213822139221402214122142221432214422145221462214722148221492215022151221522215322154221552215622157221582215922160221612216222163221642216522166221672216822169221702217122172221732217422175221762217722178221792218022181221822218322184221852218622187221882218922190221912219222193221942219522196221972219822199222002220122202222032220422205222062220722208222092221022211222122221322214222152221622217222182221922220222212222222223222242222522226222272222822229222302223122232222332223422235222362223722238222392224022241222422224322244222452224622247222482224922250222512225222253222542225522256222572225822259222602226122262222632226422265222662226722268222692227022271222722227322274222752227622277222782227922280222812228222283222842228522286222872228822289222902229122292222932229422295222962229722298222992230022301223022230322304223052230622307223082230922310223112231222313223142231522316223172231822319223202232122322223232232422325223262232722328223292233022331223322233322334223352233622337223382233922340223412234222343223442234522346223472234822349223502235122352223532235422355223562235722358223592236022361223622236322364223652236622367223682236922370223712237222373223742237522376223772237822379223802238122382223832238422385223862238722388223892239022391223922239322394223952239622397223982239922400224012240222403224042240522406224072240822409224102241122412224132241422415224162241722418224192242022421224222242322424224252242622427224282242922430224312243222433224342243522436224372243822439224402244122442224432244422445224462244722448224492245022451224522245322454224552245622457224582245922460224612246222463224642246522466224672246822469224702247122472224732247422475224762247722478224792248022481224822248322484224852248622487224882248922490224912249222493224942249522496224972249822499225002250122502225032250422505225062250722508225092251022511225122251322514225152251622517225182251922520225212252222523225242252522526225272252822529225302253122532225332253422535225362253722538225392254022541225422254322544225452254622547225482254922550225512255222553225542255522556225572255822559225602256122562225632256422565225662256722568225692257022571225722257322574225752257622577225782257922580225812258222583225842258522586225872258822589225902259122592225932259422595225962259722598225992260022601226022260322604226052260622607226082260922610226112261222613226142261522616226172261822619226202262122622226232262422625226262262722628226292263022631226322263322634226352263622637226382263922640226412264222643226442264522646226472264822649226502265122652226532265422655226562265722658226592266022661226622266322664226652266622667226682266922670226712267222673226742267522676226772267822679226802268122682226832268422685226862268722688226892269022691226922269322694226952269622697226982269922700227012270222703227042270522706 |
- --- a/gcc/builtins.c
- +++ b/gcc/builtins.c
- @@ -11108,7 +11108,7 @@ validate_gimple_arglist (const_gimple ca
-
- do
- {
- - code = va_arg (ap, enum tree_code);
- + code = va_arg (ap, int);
- switch (code)
- {
- case 0:
- --- a/gcc/calls.c
- +++ b/gcc/calls.c
- @@ -3447,7 +3447,7 @@ emit_library_call_value_1 (int retval, r
- for (; count < nargs; count++)
- {
- rtx val = va_arg (p, rtx);
- - enum machine_mode mode = va_arg (p, enum machine_mode);
- + enum machine_mode mode = va_arg (p, int);
-
- /* We cannot convert the arg value to the mode the library wants here;
- must do it earlier where we know the signedness of the arg. */
- --- /dev/null
- +++ b/gcc/config/avr32/avr32.c
- @@ -0,0 +1,8060 @@
- +/*
- + Target hooks and helper functions for AVR32.
- + Copyright 2003,2004,2005,2006,2007,2008,2009,2010 Atmel Corporation.
- +
- + This file is part of GCC.
- +
- + This program is free software; you can redistribute it and/or modify
- + it under the terms of the GNU General Public License as published by
- + the Free Software Foundation; either version 2 of the License, or
- + (at your option) any later version.
- +
- + This program is distributed in the hope that it will be useful,
- + but WITHOUT ANY WARRANTY; without even the implied warranty of
- + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- + GNU General Public License for more details.
- +
- + You should have received a copy of the GNU General Public License
- + along with this program; if not, write to the Free Software
- + Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
- +
- +#include "config.h"
- +#include "system.h"
- +#include "coretypes.h"
- +#include "tm.h"
- +#include "rtl.h"
- +#include "tree.h"
- +#include "obstack.h"
- +#include "regs.h"
- +#include "hard-reg-set.h"
- +#include "real.h"
- +#include "insn-config.h"
- +#include "conditions.h"
- +#include "output.h"
- +#include "insn-attr.h"
- +#include "flags.h"
- +#include "reload.h"
- +#include "function.h"
- +#include "expr.h"
- +#include "optabs.h"
- +#include "toplev.h"
- +#include "recog.h"
- +#include "ggc.h"
- +#include "except.h"
- +#include "c-pragma.h"
- +#include "integrate.h"
- +#include "tm_p.h"
- +#include "langhooks.h"
- +#include "hooks.h"
- +#include "df.h"
- +
- +#include "target.h"
- +#include "target-def.h"
- +
- +#include <ctype.h>
- +
- +
- +
- +/* Global variables. */
- +typedef struct minipool_node Mnode;
- +typedef struct minipool_fixup Mfix;
- +
- +/* Obstack for minipool constant handling. */
- +static struct obstack minipool_obstack;
- +static char *minipool_startobj;
- +static rtx minipool_vector_label;
- +
- +/* True if we are currently building a constant table. */
- +int making_const_table;
- +
- +tree fndecl_attribute_args = NULL_TREE;
- +
- +
- +/* Function prototypes. */
- +static unsigned long avr32_isr_value (tree);
- +static unsigned long avr32_compute_func_type (void);
- +static tree avr32_handle_isr_attribute (tree *, tree, tree, int, bool *);
- +static tree avr32_handle_acall_attribute (tree *, tree, tree, int, bool *);
- +static tree avr32_handle_fndecl_attribute (tree * node, tree name, tree args,
- + int flags, bool * no_add_attrs);
- +static void avr32_reorg (void);
- +bool avr32_return_in_msb (tree type);
- +bool avr32_vector_mode_supported (enum machine_mode mode);
- +static void avr32_init_libfuncs (void);
- +static void avr32_file_end (void);
- +static void flashvault_decl_list_add (unsigned int vector_num, const char *name);
- +
- +
- +
- +static void
- +avr32_add_gc_roots (void)
- +{
- + gcc_obstack_init (&minipool_obstack);
- + minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
- +}
- +
- +
- +/* List of all known AVR32 parts */
- +static const struct part_type_s avr32_part_types[] = {
- + /* name, part_type, architecture type, macro */
- + {"none", PART_TYPE_AVR32_NONE, ARCH_TYPE_AVR32_AP, "__AVR32__"},
- + {"ap7000", PART_TYPE_AVR32_AP7000, ARCH_TYPE_AVR32_AP, "__AVR32_AP7000__"},
- + {"ap7001", PART_TYPE_AVR32_AP7001, ARCH_TYPE_AVR32_AP, "__AVR32_AP7001__"},
- + {"ap7002", PART_TYPE_AVR32_AP7002, ARCH_TYPE_AVR32_AP, "__AVR32_AP7002__"},
- + {"ap7200", PART_TYPE_AVR32_AP7200, ARCH_TYPE_AVR32_AP, "__AVR32_AP7200__"},
- + {"uc3a0128", PART_TYPE_AVR32_UC3A0128, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A0128__"},
- + {"uc3a0256", PART_TYPE_AVR32_UC3A0256, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A0256__"},
- + {"uc3a0512", PART_TYPE_AVR32_UC3A0512, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A0512__"},
- + {"uc3a0512es", PART_TYPE_AVR32_UC3A0512ES, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3A0512ES__"},
- + {"uc3a1128", PART_TYPE_AVR32_UC3A1128, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A1128__"},
- + {"uc3a1256", PART_TYPE_AVR32_UC3A1256, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A1256__"},
- + {"uc3a1512", PART_TYPE_AVR32_UC3A1512, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A1512__"},
- + {"uc3a1512es", PART_TYPE_AVR32_UC3A1512ES, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3A1512ES__"},
- + {"uc3a3revd", PART_TYPE_AVR32_UC3A3REVD, ARCH_TYPE_AVR32_UCR2NOMUL, "__AVR32_UC3A3256S__"},
- + {"uc3a364", PART_TYPE_AVR32_UC3A364, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A364__"},
- + {"uc3a364s", PART_TYPE_AVR32_UC3A364S, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A364S__"},
- + {"uc3a3128", PART_TYPE_AVR32_UC3A3128, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A3128__"},
- + {"uc3a3128s", PART_TYPE_AVR32_UC3A3128S, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A3128S__"},
- + {"uc3a3256", PART_TYPE_AVR32_UC3A3256, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A3256__"},
- + {"uc3a3256s", PART_TYPE_AVR32_UC3A3256S, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A3256S__"},
- + {"uc3a464", PART_TYPE_AVR32_UC3A464, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A464__"},
- + {"uc3a464s", PART_TYPE_AVR32_UC3A464S, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A464S__"},
- + {"uc3a4128", PART_TYPE_AVR32_UC3A4128, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A4128__"},
- + {"uc3a4128s", PART_TYPE_AVR32_UC3A4128S, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A4128S__"},
- + {"uc3a4256", PART_TYPE_AVR32_UC3A4256, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A4256__"},
- + {"uc3a4256s", PART_TYPE_AVR32_UC3A4256S, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A4256S__"},
- + {"uc3b064", PART_TYPE_AVR32_UC3B064, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B064__"},
- + {"uc3b0128", PART_TYPE_AVR32_UC3B0128, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B0128__"},
- + {"uc3b0256", PART_TYPE_AVR32_UC3B0256, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B0256__"},
- + {"uc3b0256es", PART_TYPE_AVR32_UC3B0256ES, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B0256ES__"},
- + {"uc3b0512", PART_TYPE_AVR32_UC3B0512, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3B0512__"},
- + {"uc3b0512revc", PART_TYPE_AVR32_UC3B0512REVC, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3B0512REVC__"},
- + {"uc3b164", PART_TYPE_AVR32_UC3B164, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B164__"},
- + {"uc3b1128", PART_TYPE_AVR32_UC3B1128, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B1128__"},
- + {"uc3b1256", PART_TYPE_AVR32_UC3B1256, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B1256__"},
- + {"uc3b1256es", PART_TYPE_AVR32_UC3B1256ES, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B1256ES__"},
- + {"uc3b1512", PART_TYPE_AVR32_UC3B1512, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3B1512__"},
- + {"uc3b1512revc", PART_TYPE_AVR32_UC3B1512REVC, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3B1512REVC__"},
- + {"uc64d3", PART_TYPE_AVR32_UC64D3, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC64D3__"},
- + {"uc128d3", PART_TYPE_AVR32_UC128D3, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC128D3__"},
- + {"uc64d4", PART_TYPE_AVR32_UC64D4, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC64D4__"},
- + {"uc128d4", PART_TYPE_AVR32_UC128D4, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC128D4__"},
- + {"uc3c0512crevc", PART_TYPE_AVR32_UC3C0512CREVC, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3C0512CREVC__"},
- + {"uc3c1512crevc", PART_TYPE_AVR32_UC3C1512CREVC, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3C1512CREVC__"},
- + {"uc3c2512crevc", PART_TYPE_AVR32_UC3C2512CREVC, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3C2512CREVC__"},
- + {"uc3l0256", PART_TYPE_AVR32_UC3L0256, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3L0256__"},
- + {"uc3l0128", PART_TYPE_AVR32_UC3L0128, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3L0128__"},
- + {"uc3l064", PART_TYPE_AVR32_UC3L064, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3L064__"},
- + {"uc3l032", PART_TYPE_AVR32_UC3L032, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3L032__"},
- + {"uc3l016", PART_TYPE_AVR32_UC3L016, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3L016__"},
- + {"uc3l064revb", PART_TYPE_AVR32_UC3L064REVB, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3L064REVB__"},
- + {"uc64l3u", PART_TYPE_AVR32_UC64L3U, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC64L3U__"},
- + {"uc128l3u", PART_TYPE_AVR32_UC128L3U, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC128L3U__"},
- + {"uc256l3u", PART_TYPE_AVR32_UC256L3U, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC256L3U__"},
- + {"uc64l4u", PART_TYPE_AVR32_UC64L4U, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC64L4U__"},
- + {"uc128l4u", PART_TYPE_AVR32_UC128L4U, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC128L4U__"},
- + {"uc256l4u", PART_TYPE_AVR32_UC256L4U, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC256L4U__"},
- + {"uc3c064c", PART_TYPE_AVR32_UC3C064C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C064C__"},
- + {"uc3c0128c", PART_TYPE_AVR32_UC3C0128C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C0128C__"},
- + {"uc3c0256c", PART_TYPE_AVR32_UC3C0256C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C0256C__"},
- + {"uc3c0512c", PART_TYPE_AVR32_UC3C0512C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C0512C__"},
- + {"uc3c164c", PART_TYPE_AVR32_UC3C164C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C164C__"},
- + {"uc3c1128c", PART_TYPE_AVR32_UC3C1128C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C1128C__"},
- + {"uc3c1256c", PART_TYPE_AVR32_UC3C1256C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C1256C__"},
- + {"uc3c1512c", PART_TYPE_AVR32_UC3C1512C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C1512C__"},
- + {"uc3c264c", PART_TYPE_AVR32_UC3C264C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C264C__"},
- + {"uc3c2128c", PART_TYPE_AVR32_UC3C2128C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C2128C__"},
- + {"uc3c2256c", PART_TYPE_AVR32_UC3C2256C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C2256C__"},
- + {"uc3c2512c", PART_TYPE_AVR32_UC3C2512C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C2512C__"},
- + {"mxt768e", PART_TYPE_AVR32_MXT768E, ARCH_TYPE_AVR32_UCR3, "__AVR32_MXT768E__"},
- + {NULL, 0, 0, NULL}
- +};
- +
- +/* List of all known AVR32 architectures */
- +static const struct arch_type_s avr32_arch_types[] = {
- + /* name, architecture type, microarchitecture type, feature flags, macro */
- + {"ap", ARCH_TYPE_AVR32_AP, UARCH_TYPE_AVR32B,
- + (FLAG_AVR32_HAS_DSP
- + | FLAG_AVR32_HAS_SIMD
- + | FLAG_AVR32_HAS_UNALIGNED_WORD
- + | FLAG_AVR32_HAS_BRANCH_PRED | FLAG_AVR32_HAS_RETURN_STACK
- + | FLAG_AVR32_HAS_CACHES),
- + "__AVR32_AP__"},
- + {"ucr1", ARCH_TYPE_AVR32_UCR1, UARCH_TYPE_AVR32A,
- + (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW),
- + "__AVR32_UC__=1"},
- + {"ucr2", ARCH_TYPE_AVR32_UCR2, UARCH_TYPE_AVR32A,
- + (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW
- + | FLAG_AVR32_HAS_V2_INSNS),
- + "__AVR32_UC__=2"},
- + {"ucr2nomul", ARCH_TYPE_AVR32_UCR2NOMUL, UARCH_TYPE_AVR32A,
- + (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW
- + | FLAG_AVR32_HAS_V2_INSNS | FLAG_AVR32_HAS_NO_MUL_INSNS),
- + "__AVR32_UC__=2"},
- + {"ucr3", ARCH_TYPE_AVR32_UCR3, UARCH_TYPE_AVR32A,
- + (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW
- + | FLAG_AVR32_HAS_V2_INSNS),
- + "__AVR32_UC__=3"},
- + {"ucr3fp", ARCH_TYPE_AVR32_UCR3FP, UARCH_TYPE_AVR32A,
- + (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW | FLAG_AVR32_HAS_FPU
- + | FLAG_AVR32_HAS_V2_INSNS),
- + "__AVR32_UC__=3"},
- + {NULL, 0, 0, 0, NULL}
- +};
- +
- +/* Default arch name */
- +const char *avr32_arch_name = "none";
- +const char *avr32_part_name = "none";
- +
- +const struct part_type_s *avr32_part;
- +const struct arch_type_s *avr32_arch;
- +
- +
- +/* FIXME: needs to use GC. */
- +struct flashvault_decl_list
- +{
- + struct flashvault_decl_list *next;
- + unsigned int vector_num;
- + const char *name;
- +};
- +
- +static struct flashvault_decl_list *flashvault_decl_list_head = NULL;
- +
- +
- +/* Set default target_flags. */
- +#undef TARGET_DEFAULT_TARGET_FLAGS
- +#define TARGET_DEFAULT_TARGET_FLAGS \
- + (MASK_HAS_ASM_ADDR_PSEUDOS | MASK_MD_REORG_OPTIMIZATION | MASK_COND_EXEC_BEFORE_RELOAD)
- +
- +void
- +avr32_optimization_options (int level, int size)
- +{
- + if (AVR32_ALWAYS_PIC)
- + flag_pic = 1;
- +
- + /* Enable section anchors if optimization is enabled. */
- + if (level > 0 || size)
- + flag_section_anchors = 2;
- +}
- +
- +
- +/* Override command line options */
- +void
- +avr32_override_options (void)
- +{
- + const struct part_type_s *part;
- + const struct arch_type_s *arch;
- +
- + /*Add backward compability*/
- + if (strcmp ("uc", avr32_arch_name)== 0)
- + {
- + fprintf (stderr, "Warning: Deprecated arch `%s' specified. "
- + "Please use '-march=ucr1' instead. "
- + "Converting to arch 'ucr1'\n",
- + avr32_arch_name);
- + avr32_arch_name="ucr1";
- + }
- +
- + /* Check if arch type is set. */
- + for (arch = avr32_arch_types; arch->name; arch++)
- + {
- + if (strcmp (arch->name, avr32_arch_name) == 0)
- + break;
- + }
- + avr32_arch = arch;
- +
- + if (!arch->name && strcmp("none", avr32_arch_name) != 0)
- + {
- + fprintf (stderr, "Unknown arch `%s' specified\n"
- + "Known arch names:\n"
- + "\tuc (deprecated)\n",
- + avr32_arch_name);
- + for (arch = avr32_arch_types; arch->name; arch++)
- + fprintf (stderr, "\t%s\n", arch->name);
- + avr32_arch = &avr32_arch_types[ARCH_TYPE_AVR32_AP];
- + }
- +
- + /* Check if part type is set. */
- + for (part = avr32_part_types; part->name; part++)
- + if (strcmp (part->name, avr32_part_name) == 0)
- + break;
- +
- + avr32_part = part;
- + if (!part->name)
- + {
- + fprintf (stderr, "Unknown part `%s' specified\nKnown part names:\n",
- + avr32_part_name);
- + for (part = avr32_part_types; part->name; part++)
- + {
- + if (strcmp("none", part->name) != 0)
- + fprintf (stderr, "\t%s\n", part->name);
- + }
- + /* Set default to NONE*/
- + avr32_part = &avr32_part_types[PART_TYPE_AVR32_NONE];
- + }
- +
- + /* NB! option -march= overrides option -mpart
- + * if both are used at the same time */
- + if (!arch->name)
- + avr32_arch = &avr32_arch_types[avr32_part->arch_type];
- +
- + /* If optimization level is two or greater, then align start of loops to a
- + word boundary since this will allow folding the first insn of the loop.
- + Do this only for targets supporting branch prediction. */
- + if (optimize >= 2 && TARGET_BRANCH_PRED)
- + align_loops = 2;
- +
- +
- + /* Enable fast-float library if unsafe math optimizations
- + are used. */
- + if (flag_unsafe_math_optimizations)
- + target_flags |= MASK_FAST_FLOAT;
- +
- + /* Check if we should set avr32_imm_in_const_pool
- + based on if caches are present or not. */
- + if ( avr32_imm_in_const_pool == -1 )
- + {
- + if ( TARGET_CACHES )
- + avr32_imm_in_const_pool = 1;
- + else
- + avr32_imm_in_const_pool = 0;
- + }
- +
- + if (TARGET_NO_PIC)
- + flag_pic = 0;
- + avr32_add_gc_roots ();
- +}
- +
- +
- +/*
- +If defined, a function that outputs the assembler code for entry to a
- +function. The prologue is responsible for setting up the stack frame,
- +initializing the frame pointer register, saving registers that must be
- +saved, and allocating size additional bytes of storage for the
- +local variables. size is an integer. file is a stdio
- +stream to which the assembler code should be output.
- +
- +The label for the beginning of the function need not be output by this
- +macro. That has already been done when the macro is run.
- +
- +To determine which registers to save, the macro can refer to the array
- +regs_ever_live: element r is nonzero if hard register
- +r is used anywhere within the function. This implies the function
- +prologue should save register r, provided it is not one of the
- +call-used registers. (TARGET_ASM_FUNCTION_EPILOGUE must likewise use
- +regs_ever_live.)
- +
- +On machines that have ``register windows'', the function entry code does
- +not save on the stack the registers that are in the windows, even if
- +they are supposed to be preserved by function calls; instead it takes
- +appropriate steps to ``push'' the register stack, if any non-call-used
- +registers are used in the function.
- +
- +On machines where functions may or may not have frame-pointers, the
- +function entry code must vary accordingly; it must set up the frame
- +pointer if one is wanted, and not otherwise. To determine whether a
- +frame pointer is in wanted, the macro can refer to the variable
- +frame_pointer_needed. The variable's value will be 1 at run
- +time in a function that needs a frame pointer. (see Elimination).
- +
- +The function entry code is responsible for allocating any stack space
- +required for the function. This stack space consists of the regions
- +listed below. In most cases, these regions are allocated in the
- +order listed, with the last listed region closest to the top of the
- +stack (the lowest address if STACK_GROWS_DOWNWARD is defined, and
- +the highest address if it is not defined). You can use a different order
- +for a machine if doing so is more convenient or required for
- +compatibility reasons. Except in cases where required by standard
- +or by a debugger, there is no reason why the stack layout used by GCC
- +need agree with that used by other compilers for a machine.
- +*/
- +
- +#undef TARGET_ASM_FUNCTION_PROLOGUE
- +#define TARGET_ASM_FUNCTION_PROLOGUE avr32_target_asm_function_prologue
- +
- +#undef TARGET_ASM_FILE_END
- +#define TARGET_ASM_FILE_END avr32_file_end
- +
- +#undef TARGET_DEFAULT_SHORT_ENUMS
- +#define TARGET_DEFAULT_SHORT_ENUMS hook_bool_void_false
- +
- +#undef TARGET_PROMOTE_FUNCTION_ARGS
- +#define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
- +
- +#undef TARGET_PROMOTE_FUNCTION_RETURN
- +#define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
- +
- +#undef TARGET_PROMOTE_PROTOTYPES
- +#define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
- +
- +#undef TARGET_MUST_PASS_IN_STACK
- +#define TARGET_MUST_PASS_IN_STACK avr32_must_pass_in_stack
- +
- +#undef TARGET_PASS_BY_REFERENCE
- +#define TARGET_PASS_BY_REFERENCE avr32_pass_by_reference
- +
- +#undef TARGET_STRICT_ARGUMENT_NAMING
- +#define TARGET_STRICT_ARGUMENT_NAMING avr32_strict_argument_naming
- +
- +#undef TARGET_VECTOR_MODE_SUPPORTED_P
- +#define TARGET_VECTOR_MODE_SUPPORTED_P avr32_vector_mode_supported
- +
- +#undef TARGET_RETURN_IN_MEMORY
- +#define TARGET_RETURN_IN_MEMORY avr32_return_in_memory
- +
- +#undef TARGET_RETURN_IN_MSB
- +#define TARGET_RETURN_IN_MSB avr32_return_in_msb
- +
- +#undef TARGET_ENCODE_SECTION_INFO
- +#define TARGET_ENCODE_SECTION_INFO avr32_encode_section_info
- +
- +#undef TARGET_ARG_PARTIAL_BYTES
- +#define TARGET_ARG_PARTIAL_BYTES avr32_arg_partial_bytes
- +
- +#undef TARGET_STRIP_NAME_ENCODING
- +#define TARGET_STRIP_NAME_ENCODING avr32_strip_name_encoding
- +
- +#define streq(string1, string2) (strcmp (string1, string2) == 0)
- +
- +#undef TARGET_NARROW_VOLATILE_BITFIELD
- +#define TARGET_NARROW_VOLATILE_BITFIELD hook_bool_void_false
- +
- +#undef TARGET_ATTRIBUTE_TABLE
- +#define TARGET_ATTRIBUTE_TABLE avr32_attribute_table
- +
- +#undef TARGET_COMP_TYPE_ATTRIBUTES
- +#define TARGET_COMP_TYPE_ATTRIBUTES avr32_comp_type_attributes
- +
- +
- +#undef TARGET_RTX_COSTS
- +#define TARGET_RTX_COSTS avr32_rtx_costs
- +
- +#undef TARGET_CANNOT_FORCE_CONST_MEM
- +#define TARGET_CANNOT_FORCE_CONST_MEM avr32_cannot_force_const_mem
- +
- +#undef TARGET_ASM_INTEGER
- +#define TARGET_ASM_INTEGER avr32_assemble_integer
- +
- +#undef TARGET_FUNCTION_VALUE
- +#define TARGET_FUNCTION_VALUE avr32_function_value
- +
- +#undef TARGET_MIN_ANCHOR_OFFSET
- +#define TARGET_MIN_ANCHOR_OFFSET (0)
- +
- +#undef TARGET_MAX_ANCHOR_OFFSET
- +#define TARGET_MAX_ANCHOR_OFFSET ((1 << 15) - 1)
- +#undef TARGET_SECONDARY_RELOAD
- +#define TARGET_SECONDARY_RELOAD avr32_secondary_reload
- +
- +
- +/*
- + * Defining the option, -mlist-devices to list the devices supported by gcc.
- + * This option should be used while printing target-help to list all the
- + * supported devices.
- + */
- +#undef TARGET_HELP
- +#define TARGET_HELP avr32_target_help
- +
- +void avr32_target_help ()
- +{
- + if (avr32_list_supported_parts)
- + {
- + const struct part_type_s *list;
- + fprintf (stdout, "List of parts supported by avr32-gcc:\n");
- + for (list = avr32_part_types; list->name; list++)
- + {
- + if (strcmp("none", list->name) != 0)
- + fprintf (stdout, "%-20s%s\n", list->name, list->macro);
- + }
- + fprintf (stdout, "\n\n");
- + }
- +}
- +
- +enum reg_class
- +avr32_secondary_reload (bool in_p, rtx x, enum reg_class class,
- + enum machine_mode mode, secondary_reload_info *sri)
- +{
- +
- + if ( avr32_rmw_memory_operand (x, mode) )
- + {
- + if (!in_p)
- + sri->icode = CODE_FOR_reload_out_rmw_memory_operand;
- + else
- + sri->icode = CODE_FOR_reload_in_rmw_memory_operand;
- + }
- + return NO_REGS;
- +
- +}
- +/*
- + * Switches to the appropriate section for output of constant pool
- + * entry x in mode. You can assume that x is some kind of constant in
- + * RTL. The argument mode is redundant except in the case of a
- + * const_int rtx. Select the section by calling readonly_data_ section
- + * or one of the alternatives for other sections. align is the
- + * constant alignment in bits.
- + *
- + * The default version of this function takes care of putting symbolic
- + * constants in flag_ pic mode in data_section and everything else in
- + * readonly_data_section.
- + */
- +//#undef TARGET_ASM_SELECT_RTX_SECTION
- +//#define TARGET_ASM_SELECT_RTX_SECTION avr32_select_rtx_section
- +
- +
- +/*
- + * If non-null, this hook performs a target-specific pass over the
- + * instruction stream. The compiler will run it at all optimization
- + * levels, just before the point at which it normally does
- + * delayed-branch scheduling.
- + *
- + * The exact purpose of the hook varies from target to target. Some
- + * use it to do transformations that are necessary for correctness,
- + * such as laying out in-function constant pools or avoiding hardware
- + * hazards. Others use it as an opportunity to do some
- + * machine-dependent optimizations.
- + *
- + * You need not implement the hook if it has nothing to do. The
- + * default definition is null.
- + */
- +#undef TARGET_MACHINE_DEPENDENT_REORG
- +#define TARGET_MACHINE_DEPENDENT_REORG avr32_reorg
- +
- +/* Target hook for assembling integer objects.
- + Need to handle integer vectors */
- +static bool
- +avr32_assemble_integer (rtx x, unsigned int size, int aligned_p)
- +{
- + if (avr32_vector_mode_supported (GET_MODE (x)))
- + {
- + int i, units;
- +
- + if (GET_CODE (x) != CONST_VECTOR)
- + abort ();
- +
- + units = CONST_VECTOR_NUNITS (x);
- +
- + switch (GET_MODE (x))
- + {
- + case V2HImode:
- + size = 2;
- + break;
- + case V4QImode:
- + size = 1;
- + break;
- + default:
- + abort ();
- + }
- +
- + for (i = 0; i < units; i++)
- + {
- + rtx elt;
- +
- + elt = CONST_VECTOR_ELT (x, i);
- + assemble_integer (elt, size, i == 0 ? 32 : size * BITS_PER_UNIT, 1);
- + }
- +
- + return true;
- + }
- +
- + return default_assemble_integer (x, size, aligned_p);
- +}
- +
- +
- +/*
- + * This target hook describes the relative costs of RTL expressions.
- + *
- + * The cost may depend on the precise form of the expression, which is
- + * available for examination in x, and the rtx code of the expression
- + * in which it is contained, found in outer_code. code is the
- + * expression code--redundant, since it can be obtained with GET_CODE
- + * (x).
- + *
- + * In implementing this hook, you can use the construct COSTS_N_INSNS
- + * (n) to specify a cost equal to n fast instructions.
- + *
- + * On entry to the hook, *total contains a default estimate for the
- + * cost of the expression. The hook should modify this value as
- + * necessary. Traditionally, the default costs are COSTS_N_INSNS (5)
- + * for multiplications, COSTS_N_INSNS (7) for division and modulus
- + * operations, and COSTS_N_INSNS (1) for all other operations.
- + *
- + * When optimizing for code size, i.e. when optimize_size is non-zero,
- + * this target hook should be used to estimate the relative size cost
- + * of an expression, again relative to COSTS_N_INSNS.
- + *
- + * The hook returns true when all subexpressions of x have been
- + * processed, and false when rtx_cost should recurse.
- + */
- +
- +/* Worker routine for avr32_rtx_costs. */
- +static inline int
- +avr32_rtx_costs_1 (rtx x, enum rtx_code code ATTRIBUTE_UNUSED,
- + enum rtx_code outer ATTRIBUTE_UNUSED)
- +{
- + enum machine_mode mode = GET_MODE (x);
- +
- + switch (GET_CODE (x))
- + {
- + case MEM:
- + /* Using pre decrement / post increment memory operations on the
- + avr32_uc architecture means that two writebacks must be performed
- + and hence two cycles are needed. */
- + if (!optimize_size
- + && GET_MODE_SIZE (mode) <= 2 * UNITS_PER_WORD
- + && TARGET_ARCH_UC
- + && (GET_CODE (XEXP (x, 0)) == PRE_DEC
- + || GET_CODE (XEXP (x, 0)) == POST_INC))
- + return COSTS_N_INSNS (5);
- +
- + /* Memory costs quite a lot for the first word, but subsequent words
- + load at the equivalent of a single insn each. */
- + if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
- + return COSTS_N_INSNS (3 + (GET_MODE_SIZE (mode) / UNITS_PER_WORD));
- +
- + return COSTS_N_INSNS (4);
- + case SYMBOL_REF:
- + case CONST:
- + /* These are valid for the pseudo insns: lda.w and call which operates
- + on direct addresses. We assume that the cost of a lda.w is the same
- + as the cost of a ld.w insn. */
- + return (outer == SET) ? COSTS_N_INSNS (4) : COSTS_N_INSNS (1);
- + case DIV:
- + case MOD:
- + case UDIV:
- + case UMOD:
- + return optimize_size ? COSTS_N_INSNS (1) : COSTS_N_INSNS (16);
- +
- + case ROTATE:
- + case ROTATERT:
- + if (mode == TImode)
- + return COSTS_N_INSNS (100);
- +
- + if (mode == DImode)
- + return COSTS_N_INSNS (10);
- + return COSTS_N_INSNS (4);
- + case ASHIFT:
- + case LSHIFTRT:
- + case ASHIFTRT:
- + case NOT:
- + if (mode == TImode)
- + return COSTS_N_INSNS (10);
- +
- + if (mode == DImode)
- + return COSTS_N_INSNS (4);
- + return COSTS_N_INSNS (1);
- + case PLUS:
- + case MINUS:
- + case NEG:
- + case COMPARE:
- + case ABS:
- + if (GET_MODE_CLASS (mode) == MODE_FLOAT)
- + return COSTS_N_INSNS (100);
- +
- + if (mode == TImode)
- + return COSTS_N_INSNS (50);
- +
- + if (mode == DImode)
- + return COSTS_N_INSNS (2);
- + return COSTS_N_INSNS (1);
- +
- + case MULT:
- + {
- + if (GET_MODE_CLASS (mode) == MODE_FLOAT)
- + return COSTS_N_INSNS (300);
- +
- + if (mode == TImode)
- + return COSTS_N_INSNS (16);
- +
- + if (mode == DImode)
- + return COSTS_N_INSNS (4);
- +
- + if (mode == HImode)
- + return COSTS_N_INSNS (2);
- +
- + return COSTS_N_INSNS (3);
- + }
- + case IF_THEN_ELSE:
- + if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
- + return COSTS_N_INSNS (4);
- + return COSTS_N_INSNS (1);
- + case SIGN_EXTEND:
- + case ZERO_EXTEND:
- + /* Sign/Zero extensions of registers cost quite much since these
- + instrcutions only take one register operand which means that gcc
- + often must insert some move instrcutions */
- + if (mode == QImode || mode == HImode)
- + return (COSTS_N_INSNS (GET_CODE (XEXP (x, 0)) == MEM ? 0 : 1));
- + return COSTS_N_INSNS (4);
- + case UNSPEC:
- + /* divmod operations */
- + if (XINT (x, 1) == UNSPEC_UDIVMODSI4_INTERNAL
- + || XINT (x, 1) == UNSPEC_DIVMODSI4_INTERNAL)
- + {
- + return optimize_size ? COSTS_N_INSNS (1) : COSTS_N_INSNS (16);
- + }
- + /* Fallthrough */
- + default:
- + return COSTS_N_INSNS (1);
- + }
- +}
- +
- +
- +static bool
- +avr32_rtx_costs (rtx x, int code, int outer_code, int *total)
- +{
- + *total = avr32_rtx_costs_1 (x, code, outer_code);
- + return true;
- +}
- +
- +
- +bool
- +avr32_cannot_force_const_mem (rtx x ATTRIBUTE_UNUSED)
- +{
- + /* Do not want symbols in the constant pool when compiling pic or if using
- + address pseudo instructions. */
- + return ((flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS)
- + && avr32_find_symbol (x) != NULL_RTX);
- +}
- +
- +
- +/* Table of machine attributes. */
- +const struct attribute_spec avr32_attribute_table[] = {
- + /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
- + /* Interrupt Service Routines have special prologue and epilogue
- + requirements. */
- + {"isr", 0, 1, false, false, false, avr32_handle_isr_attribute},
- + {"interrupt", 0, 1, false, false, false, avr32_handle_isr_attribute},
- + {"acall", 0, 1, false, true, true, avr32_handle_acall_attribute},
- + {"naked", 0, 0, true, false, false, avr32_handle_fndecl_attribute},
- + {"rmw_addressable", 0, 0, true, false, false, NULL},
- + {"flashvault", 0, 1, true, false, false, avr32_handle_fndecl_attribute},
- + {"flashvault_impl", 0, 1, true, false, false, avr32_handle_fndecl_attribute},
- + {NULL, 0, 0, false, false, false, NULL}
- +};
- +
- +
- +typedef struct
- +{
- + const char *const arg;
- + const unsigned long return_value;
- +}
- +isr_attribute_arg;
- +
- +
- +static const isr_attribute_arg isr_attribute_args[] = {
- + {"FULL", AVR32_FT_ISR_FULL},
- + {"full", AVR32_FT_ISR_FULL},
- + {"HALF", AVR32_FT_ISR_HALF},
- + {"half", AVR32_FT_ISR_HALF},
- + {"NONE", AVR32_FT_ISR_NONE},
- + {"none", AVR32_FT_ISR_NONE},
- + {"UNDEF", AVR32_FT_ISR_NONE},
- + {"undef", AVR32_FT_ISR_NONE},
- + {"SWI", AVR32_FT_ISR_NONE},
- + {"swi", AVR32_FT_ISR_NONE},
- + {NULL, AVR32_FT_ISR_NONE}
- +};
- +
- +
- +/* Returns the (interrupt) function type of the current
- + function, or AVR32_FT_UNKNOWN if the type cannot be determined. */
- +static unsigned long
- +avr32_isr_value (tree argument)
- +{
- + const isr_attribute_arg *ptr;
- + const char *arg;
- +
- + /* No argument - default to ISR_NONE. */
- + if (argument == NULL_TREE)
- + return AVR32_FT_ISR_NONE;
- +
- + /* Get the value of the argument. */
- + if (TREE_VALUE (argument) == NULL_TREE
- + || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
- + return AVR32_FT_UNKNOWN;
- +
- + arg = TREE_STRING_POINTER (TREE_VALUE (argument));
- +
- + /* Check it against the list of known arguments. */
- + for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
- + if (streq (arg, ptr->arg))
- + return ptr->return_value;
- +
- + /* An unrecognized interrupt type. */
- + return AVR32_FT_UNKNOWN;
- +}
- +
- +
- +/*
- +These hooks specify assembly directives for creating certain kinds
- +of integer object. The TARGET_ASM_BYTE_OP directive creates a
- +byte-sized object, the TARGET_ASM_ALIGNED_HI_OP one creates an
- +aligned two-byte object, and so on. Any of the hooks may be
- +NULL, indicating that no suitable directive is available.
- +
- +The compiler will print these strings at the start of a new line,
- +followed immediately by the object's initial value. In most cases,
- +the string should contain a tab, a pseudo-op, and then another tab.
- +*/
- +#undef TARGET_ASM_BYTE_OP
- +#define TARGET_ASM_BYTE_OP "\t.byte\t"
- +#undef TARGET_ASM_ALIGNED_HI_OP
- +#define TARGET_ASM_ALIGNED_HI_OP "\t.align 1\n\t.short\t"
- +#undef TARGET_ASM_ALIGNED_SI_OP
- +#define TARGET_ASM_ALIGNED_SI_OP "\t.align 2\n\t.int\t"
- +#undef TARGET_ASM_ALIGNED_DI_OP
- +#define TARGET_ASM_ALIGNED_DI_OP NULL
- +#undef TARGET_ASM_ALIGNED_TI_OP
- +#define TARGET_ASM_ALIGNED_TI_OP NULL
- +#undef TARGET_ASM_UNALIGNED_HI_OP
- +#define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
- +#undef TARGET_ASM_UNALIGNED_SI_OP
- +#define TARGET_ASM_UNALIGNED_SI_OP "\t.int\t"
- +#undef TARGET_ASM_UNALIGNED_DI_OP
- +#define TARGET_ASM_UNALIGNED_DI_OP NULL
- +#undef TARGET_ASM_UNALIGNED_TI_OP
- +#define TARGET_ASM_UNALIGNED_TI_OP NULL
- +
- +#undef TARGET_ASM_OUTPUT_MI_THUNK
- +#define TARGET_ASM_OUTPUT_MI_THUNK avr32_output_mi_thunk
- +
- +#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
- +#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
- +
- +
- +static void
- +avr32_output_mi_thunk (FILE * file,
- + tree thunk ATTRIBUTE_UNUSED,
- + HOST_WIDE_INT delta,
- + HOST_WIDE_INT vcall_offset, tree function)
- + {
- + int mi_delta = delta;
- + int this_regno =
- + (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function) ?
- + INTERNAL_REGNUM (11) : INTERNAL_REGNUM (12));
- +
- +
- + if (!avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21")
- + || vcall_offset)
- + {
- + fputs ("\tpushm\tlr\n", file);
- + }
- +
- +
- + if (mi_delta != 0)
- + {
- + if (avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21"))
- + {
- + fprintf (file, "\tsub\t%s, %d\n", reg_names[this_regno], -mi_delta);
- + }
- + else
- + {
- + /* Immediate is larger than k21 we must make us a temp register by
- + pushing a register to the stack. */
- + fprintf (file, "\tmov\tlr, lo(%d)\n", mi_delta);
- + fprintf (file, "\torh\tlr, hi(%d)\n", mi_delta);
- + fprintf (file, "\tadd\t%s, lr\n", reg_names[this_regno]);
- + }
- + }
- +
- +
- + if (vcall_offset != 0)
- + {
- + fprintf (file, "\tld.w\tlr, %s[0]\n", reg_names[this_regno]);
- + fprintf (file, "\tld.w\tlr, lr[%i]\n", (int) vcall_offset);
- + fprintf (file, "\tadd\t%s, lr\n", reg_names[this_regno]);
- + }
- +
- +
- + if (!avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21")
- + || vcall_offset)
- + {
- + fputs ("\tpopm\tlr\n", file);
- + }
- +
- + /* Jump to the function. We assume that we can use an rjmp since the
- + function to jump to is local and probably not too far away from
- + the thunk. If this assumption proves to be wrong we could implement
- + this jump by calculating the offset between the jump source and destination
- + and put this in the constant pool and then perform an add to pc.
- + This would also be legitimate PIC code. But for now we hope that an rjmp
- + will be sufficient...
- + */
- + fputs ("\trjmp\t", file);
- + assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
- + fputc ('\n', file);
- + }
- +
- +
- +/* Implements target hook vector_mode_supported. */
- +bool
- +avr32_vector_mode_supported (enum machine_mode mode)
- +{
- + if ((mode == V2HImode) || (mode == V4QImode))
- + return true;
- +
- + return false;
- +}
- +
- +
- +#undef TARGET_INIT_LIBFUNCS
- +#define TARGET_INIT_LIBFUNCS avr32_init_libfuncs
- +
- +#undef TARGET_INIT_BUILTINS
- +#define TARGET_INIT_BUILTINS avr32_init_builtins
- +
- +#undef TARGET_EXPAND_BUILTIN
- +#define TARGET_EXPAND_BUILTIN avr32_expand_builtin
- +
- +tree int_ftype_int, int_ftype_void, short_ftype_short, void_ftype_int_int,
- + void_ftype_ptr_int;
- +tree void_ftype_int, void_ftype_ulong, void_ftype_void, int_ftype_ptr_int;
- +tree short_ftype_short, int_ftype_int_short, int_ftype_short_short,
- + short_ftype_short_short;
- +tree int_ftype_int_int, longlong_ftype_int_short, longlong_ftype_short_short;
- +tree void_ftype_int_int_int_int_int, void_ftype_int_int_int;
- +tree longlong_ftype_int_int, void_ftype_int_int_longlong;
- +tree int_ftype_int_int_int, longlong_ftype_longlong_int_short;
- +tree longlong_ftype_longlong_short_short, int_ftype_int_short_short;
- +
- +#define def_builtin(NAME, TYPE, CODE) \
- + add_builtin_function ((NAME), (TYPE), (CODE), \
- + BUILT_IN_MD, NULL, NULL_TREE)
- +
- +#define def_mbuiltin(MASK, NAME, TYPE, CODE) \
- + do \
- + { \
- + if ((MASK)) \
- + add_builtin_function ((NAME), (TYPE), (CODE), \
- + BUILT_IN_MD, NULL, NULL_TREE); \
- + } \
- + while (0)
- +
- +struct builtin_description
- +{
- + const unsigned int mask;
- + const enum insn_code icode;
- + const char *const name;
- + const int code;
- + const enum rtx_code comparison;
- + const unsigned int flag;
- + const tree *ftype;
- +};
- +
- +static const struct builtin_description bdesc_2arg[] = {
- +
- +#define DSP_BUILTIN(code, builtin, ftype) \
- + { 1, CODE_FOR_##code, "__builtin_" #code , \
- + AVR32_BUILTIN_##builtin, 0, 0, ftype }
- +
- + DSP_BUILTIN (mulsathh_h, MULSATHH_H, &short_ftype_short_short),
- + DSP_BUILTIN (mulsathh_w, MULSATHH_W, &int_ftype_short_short),
- + DSP_BUILTIN (mulsatrndhh_h, MULSATRNDHH_H, &short_ftype_short_short),
- + DSP_BUILTIN (mulsatrndwh_w, MULSATRNDWH_W, &int_ftype_int_short),
- + DSP_BUILTIN (mulsatwh_w, MULSATWH_W, &int_ftype_int_short),
- + DSP_BUILTIN (satadd_h, SATADD_H, &short_ftype_short_short),
- + DSP_BUILTIN (satsub_h, SATSUB_H, &short_ftype_short_short),
- + DSP_BUILTIN (satadd_w, SATADD_W, &int_ftype_int_int),
- + DSP_BUILTIN (satsub_w, SATSUB_W, &int_ftype_int_int),
- + DSP_BUILTIN (mulwh_d, MULWH_D, &longlong_ftype_int_short),
- + DSP_BUILTIN (mulnwh_d, MULNWH_D, &longlong_ftype_int_short)
- +};
- +
- +
- +void
- +avr32_init_builtins (void)
- +{
- + unsigned int i;
- + const struct builtin_description *d;
- + tree endlink = void_list_node;
- + tree int_endlink = tree_cons (NULL_TREE, integer_type_node, endlink);
- + tree longlong_endlink =
- + tree_cons (NULL_TREE, long_long_integer_type_node, endlink);
- + tree short_endlink =
- + tree_cons (NULL_TREE, short_integer_type_node, endlink);
- + tree void_endlink = tree_cons (NULL_TREE, void_type_node, endlink);
- +
- + /* int func (int) */
- + int_ftype_int = build_function_type (integer_type_node, int_endlink);
- +
- + /* short func (short) */
- + short_ftype_short
- + = build_function_type (short_integer_type_node, short_endlink);
- +
- + /* short func (short, short) */
- + short_ftype_short_short
- + = build_function_type (short_integer_type_node,
- + tree_cons (NULL_TREE, short_integer_type_node,
- + short_endlink));
- +
- + /* long long func (long long, short, short) */
- + longlong_ftype_longlong_short_short
- + = build_function_type (long_long_integer_type_node,
- + tree_cons (NULL_TREE, long_long_integer_type_node,
- + tree_cons (NULL_TREE,
- + short_integer_type_node,
- + short_endlink)));
- +
- + /* long long func (short, short) */
- + longlong_ftype_short_short
- + = build_function_type (long_long_integer_type_node,
- + tree_cons (NULL_TREE, short_integer_type_node,
- + short_endlink));
- +
- + /* int func (int, int) */
- + int_ftype_int_int
- + = build_function_type (integer_type_node,
- + tree_cons (NULL_TREE, integer_type_node,
- + int_endlink));
- +
- + /* long long func (int, int) */
- + longlong_ftype_int_int
- + = build_function_type (long_long_integer_type_node,
- + tree_cons (NULL_TREE, integer_type_node,
- + int_endlink));
- +
- + /* long long int func (long long, int, short) */
- + longlong_ftype_longlong_int_short
- + = build_function_type (long_long_integer_type_node,
- + tree_cons (NULL_TREE, long_long_integer_type_node,
- + tree_cons (NULL_TREE, integer_type_node,
- + short_endlink)));
- +
- + /* long long int func (int, short) */
- + longlong_ftype_int_short
- + = build_function_type (long_long_integer_type_node,
- + tree_cons (NULL_TREE, integer_type_node,
- + short_endlink));
- +
- + /* int func (int, short, short) */
- + int_ftype_int_short_short
- + = build_function_type (integer_type_node,
- + tree_cons (NULL_TREE, integer_type_node,
- + tree_cons (NULL_TREE,
- + short_integer_type_node,
- + short_endlink)));
- +
- + /* int func (short, short) */
- + int_ftype_short_short
- + = build_function_type (integer_type_node,
- + tree_cons (NULL_TREE, short_integer_type_node,
- + short_endlink));
- +
- + /* int func (int, short) */
- + int_ftype_int_short
- + = build_function_type (integer_type_node,
- + tree_cons (NULL_TREE, integer_type_node,
- + short_endlink));
- +
- + /* void func (int, int) */
- + void_ftype_int_int
- + = build_function_type (void_type_node,
- + tree_cons (NULL_TREE, integer_type_node,
- + int_endlink));
- +
- + /* void func (int, int, int) */
- + void_ftype_int_int_int
- + = build_function_type (void_type_node,
- + tree_cons (NULL_TREE, integer_type_node,
- + tree_cons (NULL_TREE, integer_type_node,
- + int_endlink)));
- +
- + /* void func (int, int, long long) */
- + void_ftype_int_int_longlong
- + = build_function_type (void_type_node,
- + tree_cons (NULL_TREE, integer_type_node,
- + tree_cons (NULL_TREE, integer_type_node,
- + longlong_endlink)));
- +
- + /* void func (int, int, int, int, int) */
- + void_ftype_int_int_int_int_int
- + = build_function_type (void_type_node,
- + tree_cons (NULL_TREE, integer_type_node,
- + tree_cons (NULL_TREE, integer_type_node,
- + tree_cons (NULL_TREE,
- + integer_type_node,
- + tree_cons
- + (NULL_TREE,
- + integer_type_node,
- + int_endlink)))));
- +
- + /* void func (void *, int) */
- + void_ftype_ptr_int
- + = build_function_type (void_type_node,
- + tree_cons (NULL_TREE, ptr_type_node, int_endlink));
- +
- + /* void func (int) */
- + void_ftype_int = build_function_type (void_type_node, int_endlink);
- +
- + /* void func (ulong) */
- + void_ftype_ulong = build_function_type_list (void_type_node,
- + long_unsigned_type_node, NULL_TREE);
- +
- + /* void func (void) */
- + void_ftype_void = build_function_type (void_type_node, void_endlink);
- +
- + /* int func (void) */
- + int_ftype_void = build_function_type (integer_type_node, void_endlink);
- +
- + /* int func (void *, int) */
- + int_ftype_ptr_int
- + = build_function_type (integer_type_node,
- + tree_cons (NULL_TREE, ptr_type_node, int_endlink));
- +
- + /* int func (int, int, int) */
- + int_ftype_int_int_int
- + = build_function_type (integer_type_node,
- + tree_cons (NULL_TREE, integer_type_node,
- + tree_cons (NULL_TREE, integer_type_node,
- + int_endlink)));
- +
- + /* Initialize avr32 builtins. */
- + def_builtin ("__builtin_mfsr", int_ftype_int, AVR32_BUILTIN_MFSR);
- + def_builtin ("__builtin_mtsr", void_ftype_int_int, AVR32_BUILTIN_MTSR);
- + def_builtin ("__builtin_mfdr", int_ftype_int, AVR32_BUILTIN_MFDR);
- + def_builtin ("__builtin_mtdr", void_ftype_int_int, AVR32_BUILTIN_MTDR);
- + def_builtin ("__builtin_cache", void_ftype_ptr_int, AVR32_BUILTIN_CACHE);
- + def_builtin ("__builtin_sync", void_ftype_int, AVR32_BUILTIN_SYNC);
- + def_builtin ("__builtin_ssrf", void_ftype_int, AVR32_BUILTIN_SSRF);
- + def_builtin ("__builtin_csrf", void_ftype_int, AVR32_BUILTIN_CSRF);
- + def_builtin ("__builtin_tlbr", void_ftype_void, AVR32_BUILTIN_TLBR);
- + def_builtin ("__builtin_tlbs", void_ftype_void, AVR32_BUILTIN_TLBS);
- + def_builtin ("__builtin_tlbw", void_ftype_void, AVR32_BUILTIN_TLBW);
- + def_builtin ("__builtin_breakpoint", void_ftype_void,
- + AVR32_BUILTIN_BREAKPOINT);
- + def_builtin ("__builtin_xchg", int_ftype_ptr_int, AVR32_BUILTIN_XCHG);
- + def_builtin ("__builtin_ldxi", int_ftype_ptr_int, AVR32_BUILTIN_LDXI);
- + def_builtin ("__builtin_bswap_16", short_ftype_short,
- + AVR32_BUILTIN_BSWAP16);
- + def_builtin ("__builtin_bswap_32", int_ftype_int, AVR32_BUILTIN_BSWAP32);
- + def_builtin ("__builtin_cop", void_ftype_int_int_int_int_int,
- + AVR32_BUILTIN_COP);
- + def_builtin ("__builtin_mvcr_w", int_ftype_int_int, AVR32_BUILTIN_MVCR_W);
- + def_builtin ("__builtin_mvrc_w", void_ftype_int_int_int,
- + AVR32_BUILTIN_MVRC_W);
- + def_builtin ("__builtin_mvcr_d", longlong_ftype_int_int,
- + AVR32_BUILTIN_MVCR_D);
- + def_builtin ("__builtin_mvrc_d", void_ftype_int_int_longlong,
- + AVR32_BUILTIN_MVRC_D);
- + def_builtin ("__builtin_sats", int_ftype_int_int_int, AVR32_BUILTIN_SATS);
- + def_builtin ("__builtin_satu", int_ftype_int_int_int, AVR32_BUILTIN_SATU);
- + def_builtin ("__builtin_satrnds", int_ftype_int_int_int,
- + AVR32_BUILTIN_SATRNDS);
- + def_builtin ("__builtin_satrndu", int_ftype_int_int_int,
- + AVR32_BUILTIN_SATRNDU);
- + def_builtin ("__builtin_musfr", void_ftype_int, AVR32_BUILTIN_MUSFR);
- + def_builtin ("__builtin_mustr", int_ftype_void, AVR32_BUILTIN_MUSTR);
- + def_builtin ("__builtin_macsathh_w", int_ftype_int_short_short,
- + AVR32_BUILTIN_MACSATHH_W);
- + def_builtin ("__builtin_macwh_d", longlong_ftype_longlong_int_short,
- + AVR32_BUILTIN_MACWH_D);
- + def_builtin ("__builtin_machh_d", longlong_ftype_longlong_short_short,
- + AVR32_BUILTIN_MACHH_D);
- + def_builtin ("__builtin_mems", void_ftype_ptr_int, AVR32_BUILTIN_MEMS);
- + def_builtin ("__builtin_memt", void_ftype_ptr_int, AVR32_BUILTIN_MEMT);
- + def_builtin ("__builtin_memc", void_ftype_ptr_int, AVR32_BUILTIN_MEMC);
- + def_builtin ("__builtin_sleep", void_ftype_int, AVR32_BUILTIN_SLEEP);
- + def_builtin ("__builtin_avr32_delay_cycles", void_ftype_int, AVR32_BUILTIN_DELAY_CYCLES);
- +
- + /* Add all builtins that are more or less simple operations on two
- + operands. */
- + for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
- + {
- + /* Use one of the operands; the target can have a different mode for
- + mask-generating compares. */
- +
- + if (d->name == 0)
- + continue;
- +
- + def_mbuiltin (d->mask, d->name, *(d->ftype), d->code);
- + }
- +}
- +
- +
- +/* Subroutine of avr32_expand_builtin to take care of binop insns. */
- +static rtx
- +avr32_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
- +{
- + rtx pat;
- + tree arg0 = CALL_EXPR_ARG (exp,0);
- + tree arg1 = CALL_EXPR_ARG (exp,1);
- + rtx op0 = expand_normal (arg0);
- + rtx op1 = expand_normal (arg1);
- + enum machine_mode tmode = insn_data[icode].operand[0].mode;
- + enum machine_mode mode0 = insn_data[icode].operand[1].mode;
- + enum machine_mode mode1 = insn_data[icode].operand[2].mode;
- +
- + if (!target
- + || GET_MODE (target) != tmode
- + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
- + target = gen_reg_rtx (tmode);
- +
- + /* In case the insn wants input operands in modes different from the
- + result, abort. */
- + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
- + {
- + /* If op0 is already a reg we must cast it to the correct mode. */
- + if (REG_P (op0))
- + op0 = convert_to_mode (mode0, op0, 1);
- + else
- + op0 = copy_to_mode_reg (mode0, op0);
- + }
- + if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
- + {
- + /* If op1 is already a reg we must cast it to the correct mode. */
- + if (REG_P (op1))
- + op1 = convert_to_mode (mode1, op1, 1);
- + else
- + op1 = copy_to_mode_reg (mode1, op1);
- + }
- + pat = GEN_FCN (icode) (target, op0, op1);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- + return target;
- +}
- +
- +
- +/* Expand an expression EXP that calls a built-in function,
- + with result going to TARGET if that's convenient
- + (and in mode MODE if that's convenient).
- + SUBTARGET may be used as the target for computing one of EXP's operands.
- + IGNORE is nonzero if the value is to be ignored. */
- +rtx
- +avr32_expand_builtin (tree exp,
- + rtx target,
- + rtx subtarget ATTRIBUTE_UNUSED,
- + enum machine_mode mode ATTRIBUTE_UNUSED,
- + int ignore ATTRIBUTE_UNUSED)
- +{
- + const struct builtin_description *d;
- + unsigned int i;
- + enum insn_code icode = 0;
- + tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
- + tree arg0, arg1, arg2;
- + rtx op0, op1, op2, pat;
- + enum machine_mode tmode, mode0, mode1;
- + enum machine_mode arg0_mode;
- + int fcode = DECL_FUNCTION_CODE (fndecl);
- +
- + switch (fcode)
- + {
- + default:
- + break;
- +
- + case AVR32_BUILTIN_SATS:
- + case AVR32_BUILTIN_SATU:
- + case AVR32_BUILTIN_SATRNDS:
- + case AVR32_BUILTIN_SATRNDU:
- + {
- + const char *fname;
- + switch (fcode)
- + {
- + default:
- + case AVR32_BUILTIN_SATS:
- + icode = CODE_FOR_sats;
- + fname = "sats";
- + break;
- + case AVR32_BUILTIN_SATU:
- + icode = CODE_FOR_satu;
- + fname = "satu";
- + break;
- + case AVR32_BUILTIN_SATRNDS:
- + icode = CODE_FOR_satrnds;
- + fname = "satrnds";
- + break;
- + case AVR32_BUILTIN_SATRNDU:
- + icode = CODE_FOR_satrndu;
- + fname = "satrndu";
- + break;
- + }
- +
- + arg0 = CALL_EXPR_ARG (exp,0);
- + arg1 = CALL_EXPR_ARG (exp,1);
- + arg2 = CALL_EXPR_ARG (exp,2);
- + op0 = expand_normal (arg0);
- + op1 = expand_normal (arg1);
- + op2 = expand_normal (arg2);
- +
- + tmode = insn_data[icode].operand[0].mode;
- +
- +
- + if (target == 0
- + || GET_MODE (target) != tmode
- + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
- + target = gen_reg_rtx (tmode);
- +
- +
- + if (!(*insn_data[icode].operand[0].predicate) (op0, GET_MODE (op0)))
- + {
- + op0 = copy_to_mode_reg (insn_data[icode].operand[0].mode, op0);
- + }
- +
- + if (!(*insn_data[icode].operand[1].predicate) (op1, SImode))
- + {
- + error ("Parameter 2 to __builtin_%s should be a constant number.",
- + fname);
- + return NULL_RTX;
- + }
- +
- + if (!(*insn_data[icode].operand[1].predicate) (op2, SImode))
- + {
- + error ("Parameter 3 to __builtin_%s should be a constant number.",
- + fname);
- + return NULL_RTX;
- + }
- +
- + emit_move_insn (target, op0);
- + pat = GEN_FCN (icode) (target, op1, op2);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- +
- + return target;
- + }
- + case AVR32_BUILTIN_MUSTR:
- + icode = CODE_FOR_mustr;
- + tmode = insn_data[icode].operand[0].mode;
- +
- + if (target == 0
- + || GET_MODE (target) != tmode
- + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
- + target = gen_reg_rtx (tmode);
- + pat = GEN_FCN (icode) (target);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- + return target;
- +
- + case AVR32_BUILTIN_MFSR:
- + icode = CODE_FOR_mfsr;
- + arg0 = CALL_EXPR_ARG (exp,0);
- + op0 = expand_normal (arg0);
- + tmode = insn_data[icode].operand[0].mode;
- + mode0 = insn_data[icode].operand[1].mode;
- +
- + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
- + {
- + error ("Parameter 1 to __builtin_mfsr must be a constant number");
- + }
- +
- + if (target == 0
- + || GET_MODE (target) != tmode
- + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
- + target = gen_reg_rtx (tmode);
- + pat = GEN_FCN (icode) (target, op0);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- + return target;
- + case AVR32_BUILTIN_MTSR:
- + icode = CODE_FOR_mtsr;
- + arg0 = CALL_EXPR_ARG (exp,0);
- + arg1 = CALL_EXPR_ARG (exp,1);
- + op0 = expand_normal (arg0);
- + op1 = expand_normal (arg1);
- + mode0 = insn_data[icode].operand[0].mode;
- + mode1 = insn_data[icode].operand[1].mode;
- +
- + if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
- + {
- + error ("Parameter 1 to __builtin_mtsr must be a constant number");
- + return gen_reg_rtx (mode0);
- + }
- + if (!(*insn_data[icode].operand[1].predicate) (op1, mode1))
- + op1 = copy_to_mode_reg (mode1, op1);
- + pat = GEN_FCN (icode) (op0, op1);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- + return NULL_RTX;
- + case AVR32_BUILTIN_MFDR:
- + icode = CODE_FOR_mfdr;
- + arg0 = CALL_EXPR_ARG (exp,0);
- + op0 = expand_normal (arg0);
- + tmode = insn_data[icode].operand[0].mode;
- + mode0 = insn_data[icode].operand[1].mode;
- +
- + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
- + {
- + error ("Parameter 1 to __builtin_mfdr must be a constant number");
- + }
- +
- + if (target == 0
- + || GET_MODE (target) != tmode
- + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
- + target = gen_reg_rtx (tmode);
- + pat = GEN_FCN (icode) (target, op0);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- + return target;
- + case AVR32_BUILTIN_MTDR:
- + icode = CODE_FOR_mtdr;
- + arg0 = CALL_EXPR_ARG (exp,0);
- + arg1 = CALL_EXPR_ARG (exp,1);
- + op0 = expand_normal (arg0);
- + op1 = expand_normal (arg1);
- + mode0 = insn_data[icode].operand[0].mode;
- + mode1 = insn_data[icode].operand[1].mode;
- +
- + if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
- + {
- + error ("Parameter 1 to __builtin_mtdr must be a constant number");
- + return gen_reg_rtx (mode0);
- + }
- + if (!(*insn_data[icode].operand[1].predicate) (op1, mode1))
- + op1 = copy_to_mode_reg (mode1, op1);
- + pat = GEN_FCN (icode) (op0, op1);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- + return NULL_RTX;
- + case AVR32_BUILTIN_CACHE:
- + icode = CODE_FOR_cache;
- + arg0 = CALL_EXPR_ARG (exp,0);
- + arg1 = CALL_EXPR_ARG (exp,1);
- + op0 = expand_normal (arg0);
- + op1 = expand_normal (arg1);
- + mode0 = insn_data[icode].operand[0].mode;
- + mode1 = insn_data[icode].operand[1].mode;
- +
- + if (!(*insn_data[icode].operand[1].predicate) (op1, mode1))
- + {
- + error ("Parameter 2 to __builtin_cache must be a constant number");
- + return gen_reg_rtx (mode1);
- + }
- +
- + if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
- + op0 = copy_to_mode_reg (mode0, op0);
- +
- + pat = GEN_FCN (icode) (op0, op1);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- + return NULL_RTX;
- + case AVR32_BUILTIN_SYNC:
- + case AVR32_BUILTIN_MUSFR:
- + case AVR32_BUILTIN_SSRF:
- + case AVR32_BUILTIN_CSRF:
- + {
- + const char *fname;
- + switch (fcode)
- + {
- + default:
- + case AVR32_BUILTIN_SYNC:
- + icode = CODE_FOR_sync;
- + fname = "sync";
- + break;
- + case AVR32_BUILTIN_MUSFR:
- + icode = CODE_FOR_musfr;
- + fname = "musfr";
- + break;
- + case AVR32_BUILTIN_SSRF:
- + icode = CODE_FOR_ssrf;
- + fname = "ssrf";
- + break;
- + case AVR32_BUILTIN_CSRF:
- + icode = CODE_FOR_csrf;
- + fname = "csrf";
- + break;
- + }
- +
- + arg0 = CALL_EXPR_ARG (exp,0);
- + op0 = expand_normal (arg0);
- + mode0 = insn_data[icode].operand[0].mode;
- +
- + if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
- + {
- + if (icode == CODE_FOR_musfr)
- + op0 = copy_to_mode_reg (mode0, op0);
- + else
- + {
- + error ("Parameter to __builtin_%s is illegal.", fname);
- + return gen_reg_rtx (mode0);
- + }
- + }
- + pat = GEN_FCN (icode) (op0);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- + return NULL_RTX;
- + }
- + case AVR32_BUILTIN_TLBR:
- + icode = CODE_FOR_tlbr;
- + pat = GEN_FCN (icode) (NULL_RTX);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- + return NULL_RTX;
- + case AVR32_BUILTIN_TLBS:
- + icode = CODE_FOR_tlbs;
- + pat = GEN_FCN (icode) (NULL_RTX);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- + return NULL_RTX;
- + case AVR32_BUILTIN_TLBW:
- + icode = CODE_FOR_tlbw;
- + pat = GEN_FCN (icode) (NULL_RTX);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- + return NULL_RTX;
- + case AVR32_BUILTIN_BREAKPOINT:
- + icode = CODE_FOR_breakpoint;
- + pat = GEN_FCN (icode) (NULL_RTX);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- + return NULL_RTX;
- + case AVR32_BUILTIN_XCHG:
- + icode = CODE_FOR_sync_lock_test_and_setsi;
- + arg0 = CALL_EXPR_ARG (exp,0);
- + arg1 = CALL_EXPR_ARG (exp,1);
- + op0 = expand_normal (arg0);
- + op1 = expand_normal (arg1);
- + tmode = insn_data[icode].operand[0].mode;
- + mode0 = insn_data[icode].operand[1].mode;
- + mode1 = insn_data[icode].operand[2].mode;
- +
- + if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
- + {
- + op1 = copy_to_mode_reg (mode1, op1);
- + }
- +
- + op0 = force_reg (GET_MODE (op0), op0);
- + op0 = gen_rtx_MEM (GET_MODE (op0), op0);
- + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
- + {
- + error
- + ("Parameter 1 to __builtin_xchg must be a pointer to an integer.");
- + }
- +
- + if (target == 0
- + || GET_MODE (target) != tmode
- + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
- + target = gen_reg_rtx (tmode);
- + pat = GEN_FCN (icode) (target, op0, op1);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- + return target;
- + case AVR32_BUILTIN_LDXI:
- + icode = CODE_FOR_ldxi;
- + arg0 = CALL_EXPR_ARG (exp,0);
- + arg1 = CALL_EXPR_ARG (exp,1);
- + arg2 = CALL_EXPR_ARG (exp,2);
- + op0 = expand_normal (arg0);
- + op1 = expand_normal (arg1);
- + op2 = expand_normal (arg2);
- + tmode = insn_data[icode].operand[0].mode;
- + mode0 = insn_data[icode].operand[1].mode;
- + mode1 = insn_data[icode].operand[2].mode;
- +
- + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
- + {
- + op0 = copy_to_mode_reg (mode0, op0);
- + }
- +
- + if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
- + {
- + op1 = copy_to_mode_reg (mode1, op1);
- + }
- +
- + if (!(*insn_data[icode].operand[3].predicate) (op2, SImode))
- + {
- + error
- + ("Parameter 3 to __builtin_ldxi must be a valid extract shift operand: (0|8|16|24)");
- + return gen_reg_rtx (mode0);
- + }
- +
- + if (target == 0
- + || GET_MODE (target) != tmode
- + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
- + target = gen_reg_rtx (tmode);
- + pat = GEN_FCN (icode) (target, op0, op1, op2);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- + return target;
- + case AVR32_BUILTIN_BSWAP16:
- + {
- + icode = CODE_FOR_bswap_16;
- + arg0 = CALL_EXPR_ARG (exp,0);
- + arg0_mode = TYPE_MODE (TREE_TYPE (arg0));
- + mode0 = insn_data[icode].operand[1].mode;
- + if (arg0_mode != mode0)
- + arg0 = build1 (NOP_EXPR,
- + (*lang_hooks.types.type_for_mode) (mode0, 0), arg0);
- +
- + op0 = expand_expr (arg0, NULL_RTX, HImode, 0);
- + tmode = insn_data[icode].operand[0].mode;
- +
- +
- + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
- + {
- + if ( CONST_INT_P (op0) )
- + {
- + HOST_WIDE_INT val = ( ((INTVAL (op0)&0x00ff) << 8) |
- + ((INTVAL (op0)&0xff00) >> 8) );
- + /* Sign extend 16-bit value to host wide int */
- + val <<= (HOST_BITS_PER_WIDE_INT - 16);
- + val >>= (HOST_BITS_PER_WIDE_INT - 16);
- + op0 = GEN_INT(val);
- + if (target == 0
- + || GET_MODE (target) != tmode
- + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
- + target = gen_reg_rtx (tmode);
- + emit_move_insn(target, op0);
- + return target;
- + }
- + else
- + op0 = copy_to_mode_reg (mode0, op0);
- + }
- +
- + if (target == 0
- + || GET_MODE (target) != tmode
- + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
- + {
- + target = gen_reg_rtx (tmode);
- + }
- +
- +
- + pat = GEN_FCN (icode) (target, op0);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- +
- + return target;
- + }
- + case AVR32_BUILTIN_BSWAP32:
- + {
- + icode = CODE_FOR_bswap_32;
- + arg0 = CALL_EXPR_ARG (exp,0);
- + op0 = expand_normal (arg0);
- + tmode = insn_data[icode].operand[0].mode;
- + mode0 = insn_data[icode].operand[1].mode;
- +
- + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
- + {
- + if ( CONST_INT_P (op0) )
- + {
- + HOST_WIDE_INT val = ( ((INTVAL (op0)&0x000000ff) << 24) |
- + ((INTVAL (op0)&0x0000ff00) << 8) |
- + ((INTVAL (op0)&0x00ff0000) >> 8) |
- + ((INTVAL (op0)&0xff000000) >> 24) );
- + /* Sign extend 32-bit value to host wide int */
- + val <<= (HOST_BITS_PER_WIDE_INT - 32);
- + val >>= (HOST_BITS_PER_WIDE_INT - 32);
- + op0 = GEN_INT(val);
- + if (target == 0
- + || GET_MODE (target) != tmode
- + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
- + target = gen_reg_rtx (tmode);
- + emit_move_insn(target, op0);
- + return target;
- + }
- + else
- + op0 = copy_to_mode_reg (mode0, op0);
- + }
- +
- + if (target == 0
- + || GET_MODE (target) != tmode
- + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
- + target = gen_reg_rtx (tmode);
- +
- +
- + pat = GEN_FCN (icode) (target, op0);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- +
- + return target;
- + }
- + case AVR32_BUILTIN_MVCR_W:
- + case AVR32_BUILTIN_MVCR_D:
- + {
- + arg0 = CALL_EXPR_ARG (exp,0);
- + arg1 = CALL_EXPR_ARG (exp,1);
- + op0 = expand_normal (arg0);
- + op1 = expand_normal (arg1);
- +
- + if (fcode == AVR32_BUILTIN_MVCR_W)
- + icode = CODE_FOR_mvcrsi;
- + else
- + icode = CODE_FOR_mvcrdi;
- +
- + tmode = insn_data[icode].operand[0].mode;
- +
- + if (target == 0
- + || GET_MODE (target) != tmode
- + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
- + target = gen_reg_rtx (tmode);
- +
- + if (!(*insn_data[icode].operand[1].predicate) (op0, SImode))
- + {
- + error
- + ("Parameter 1 to __builtin_cop is not a valid coprocessor number.");
- + error ("Number should be between 0 and 7.");
- + return NULL_RTX;
- + }
- +
- + if (!(*insn_data[icode].operand[2].predicate) (op1, SImode))
- + {
- + error
- + ("Parameter 2 to __builtin_cop is not a valid coprocessor register number.");
- + error ("Number should be between 0 and 15.");
- + return NULL_RTX;
- + }
- +
- + pat = GEN_FCN (icode) (target, op0, op1);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- +
- + return target;
- + }
- + case AVR32_BUILTIN_MACSATHH_W:
- + case AVR32_BUILTIN_MACWH_D:
- + case AVR32_BUILTIN_MACHH_D:
- + {
- + arg0 = CALL_EXPR_ARG (exp,0);
- + arg1 = CALL_EXPR_ARG (exp,1);
- + arg2 = CALL_EXPR_ARG (exp,2);
- + op0 = expand_normal (arg0);
- + op1 = expand_normal (arg1);
- + op2 = expand_normal (arg2);
- +
- + icode = ((fcode == AVR32_BUILTIN_MACSATHH_W) ? CODE_FOR_macsathh_w :
- + (fcode == AVR32_BUILTIN_MACWH_D) ? CODE_FOR_macwh_d :
- + CODE_FOR_machh_d);
- +
- + tmode = insn_data[icode].operand[0].mode;
- + mode0 = insn_data[icode].operand[1].mode;
- + mode1 = insn_data[icode].operand[2].mode;
- +
- +
- + if (!target
- + || GET_MODE (target) != tmode
- + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
- + target = gen_reg_rtx (tmode);
- +
- + if (!(*insn_data[icode].operand[0].predicate) (op0, tmode))
- + {
- + /* If op0 is already a reg we must cast it to the correct mode. */
- + if (REG_P (op0))
- + op0 = convert_to_mode (tmode, op0, 1);
- + else
- + op0 = copy_to_mode_reg (tmode, op0);
- + }
- +
- + if (!(*insn_data[icode].operand[1].predicate) (op1, mode0))
- + {
- + /* If op1 is already a reg we must cast it to the correct mode. */
- + if (REG_P (op1))
- + op1 = convert_to_mode (mode0, op1, 1);
- + else
- + op1 = copy_to_mode_reg (mode0, op1);
- + }
- +
- + if (!(*insn_data[icode].operand[2].predicate) (op2, mode1))
- + {
- + /* If op1 is already a reg we must cast it to the correct mode. */
- + if (REG_P (op2))
- + op2 = convert_to_mode (mode1, op2, 1);
- + else
- + op2 = copy_to_mode_reg (mode1, op2);
- + }
- +
- + emit_move_insn (target, op0);
- +
- + pat = GEN_FCN (icode) (target, op1, op2);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- + return target;
- + }
- + case AVR32_BUILTIN_MVRC_W:
- + case AVR32_BUILTIN_MVRC_D:
- + {
- + arg0 = CALL_EXPR_ARG (exp,0);
- + arg1 = CALL_EXPR_ARG (exp,1);
- + arg2 = CALL_EXPR_ARG (exp,2);
- + op0 = expand_normal (arg0);
- + op1 = expand_normal (arg1);
- + op2 = expand_normal (arg2);
- +
- + if (fcode == AVR32_BUILTIN_MVRC_W)
- + icode = CODE_FOR_mvrcsi;
- + else
- + icode = CODE_FOR_mvrcdi;
- +
- + if (!(*insn_data[icode].operand[0].predicate) (op0, SImode))
- + {
- + error ("Parameter 1 is not a valid coprocessor number.");
- + error ("Number should be between 0 and 7.");
- + return NULL_RTX;
- + }
- +
- + if (!(*insn_data[icode].operand[1].predicate) (op1, SImode))
- + {
- + error ("Parameter 2 is not a valid coprocessor register number.");
- + error ("Number should be between 0 and 15.");
- + return NULL_RTX;
- + }
- +
- + if (GET_CODE (op2) == CONST_INT
- + || GET_CODE (op2) == CONST
- + || GET_CODE (op2) == SYMBOL_REF || GET_CODE (op2) == LABEL_REF)
- + {
- + op2 = force_const_mem (insn_data[icode].operand[2].mode, op2);
- + }
- +
- + if (!(*insn_data[icode].operand[2].predicate) (op2, GET_MODE (op2)))
- + op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
- +
- +
- + pat = GEN_FCN (icode) (op0, op1, op2);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- +
- + return NULL_RTX;
- + }
- + case AVR32_BUILTIN_COP:
- + {
- + rtx op3, op4;
- + tree arg3, arg4;
- + icode = CODE_FOR_cop;
- + arg0 = CALL_EXPR_ARG (exp,0);
- + arg1 = CALL_EXPR_ARG (exp,1);
- + arg2 = CALL_EXPR_ARG (exp,2);
- + arg3 = CALL_EXPR_ARG (exp,3);
- + arg4 = CALL_EXPR_ARG (exp,4);
- + op0 = expand_normal (arg0);
- + op1 = expand_normal (arg1);
- + op2 = expand_normal (arg2);
- + op3 = expand_normal (arg3);
- + op4 = expand_normal (arg4);
- +
- + if (!(*insn_data[icode].operand[0].predicate) (op0, SImode))
- + {
- + error
- + ("Parameter 1 to __builtin_cop is not a valid coprocessor number.");
- + error ("Number should be between 0 and 7.");
- + return NULL_RTX;
- + }
- +
- + if (!(*insn_data[icode].operand[1].predicate) (op1, SImode))
- + {
- + error
- + ("Parameter 2 to __builtin_cop is not a valid coprocessor register number.");
- + error ("Number should be between 0 and 15.");
- + return NULL_RTX;
- + }
- +
- + if (!(*insn_data[icode].operand[2].predicate) (op2, SImode))
- + {
- + error
- + ("Parameter 3 to __builtin_cop is not a valid coprocessor register number.");
- + error ("Number should be between 0 and 15.");
- + return NULL_RTX;
- + }
- +
- + if (!(*insn_data[icode].operand[3].predicate) (op3, SImode))
- + {
- + error
- + ("Parameter 4 to __builtin_cop is not a valid coprocessor register number.");
- + error ("Number should be between 0 and 15.");
- + return NULL_RTX;
- + }
- +
- + if (!(*insn_data[icode].operand[4].predicate) (op4, SImode))
- + {
- + error
- + ("Parameter 5 to __builtin_cop is not a valid coprocessor operation.");
- + error ("Number should be between 0 and 127.");
- + return NULL_RTX;
- + }
- +
- + pat = GEN_FCN (icode) (op0, op1, op2, op3, op4);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- +
- + return target;
- + }
- +
- + case AVR32_BUILTIN_MEMS:
- + case AVR32_BUILTIN_MEMC:
- + case AVR32_BUILTIN_MEMT:
- + {
- + if (!TARGET_RMW)
- + error ("Trying to use __builtin_mem(s/c/t) when target does not support RMW insns.");
- +
- + switch (fcode) {
- + case AVR32_BUILTIN_MEMS:
- + icode = CODE_FOR_iorsi3;
- + break;
- + case AVR32_BUILTIN_MEMC:
- + icode = CODE_FOR_andsi3;
- + break;
- + case AVR32_BUILTIN_MEMT:
- + icode = CODE_FOR_xorsi3;
- + break;
- + }
- + arg0 = CALL_EXPR_ARG (exp,0);
- + arg1 = CALL_EXPR_ARG (exp,1);
- + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
- + if ( GET_CODE (op0) == SYMBOL_REF )
- + // This symbol must be RMW addressable
- + SYMBOL_REF_FLAGS (op0) |= (1 << SYMBOL_FLAG_RMW_ADDR_SHIFT);
- + op0 = gen_rtx_MEM(SImode, op0);
- + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
- + mode0 = insn_data[icode].operand[1].mode;
- +
- +
- + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
- + {
- + error ("Parameter 1 to __builtin_mem(s/c/t) must be a Ks15<<2 address or a rmw addressable symbol.");
- + }
- +
- + if ( !CONST_INT_P (op1)
- + || INTVAL (op1) > 31
- + || INTVAL (op1) < 0 )
- + error ("Parameter 2 to __builtin_mem(s/c/t) must be a constant between 0 and 31.");
- +
- + if ( fcode == AVR32_BUILTIN_MEMC )
- + op1 = GEN_INT((~(1 << INTVAL(op1)))&0xffffffff);
- + else
- + op1 = GEN_INT((1 << INTVAL(op1))&0xffffffff);
- + pat = GEN_FCN (icode) (op0, op0, op1);
- + if (!pat)
- + return 0;
- + emit_insn (pat);
- + return op0;
- + }
- +
- + case AVR32_BUILTIN_SLEEP:
- + {
- + arg0 = CALL_EXPR_ARG (exp, 0);
- + op0 = expand_normal (arg0);
- + int intval = INTVAL(op0);
- +
- + /* Check if the argument if integer and if the value of integer
- + is greater than 0. */
- +
- + if (!CONSTANT_P (op0))
- + error ("Parameter 1 to __builtin_sleep() is not a valid integer.");
- + if (intval < 0 )
- + error ("Parameter 1 to __builtin_sleep() should be an integer greater than 0.");
- +
- + int strncmpval = strncmp (avr32_part_name,"uc3l", 4);
- +
- + /* Check if op0 is less than 7 for uc3l* and less than 6 for other
- + devices. By this check we are avoiding if operand is less than
- + 256. For more devices, add more such checks. */
- +
- + if ( strncmpval == 0 && intval >= 7)
- + error ("Parameter 1 to __builtin_sleep() should be less than or equal to 7.");
- + else if ( strncmp != 0 && intval >= 6)
- + error ("Parameter 1 to __builtin_sleep() should be less than or equal to 6.");
- +
- + emit_insn (gen_sleep(op0));
- + return target;
- +
- + }
- + case AVR32_BUILTIN_DELAY_CYCLES:
- + {
- + arg0 = CALL_EXPR_ARG (exp, 0);
- + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
- +
- + if (TARGET_ARCH_AP)
- + error (" __builtin_avr32_delay_cycles() not supported for \'%s\' architecture.", avr32_arch_name);
- + if (!CONSTANT_P (op0))
- + error ("Parameter 1 to __builtin_avr32_delay_cycles() should be an integer.");
- + emit_insn (gen_delay_cycles (op0));
- + return 0;
- +
- + }
- +
- + }
- +
- + for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
- + if (d->code == fcode)
- + return avr32_expand_binop_builtin (d->icode, exp, target);
- +
- +
- + /* @@@ Should really do something sensible here. */
- + return NULL_RTX;
- +}
- +
- +
- +/* Handle an "interrupt" or "isr" attribute;
- + arguments as in struct attribute_spec.handler. */
- +static tree
- +avr32_handle_isr_attribute (tree * node, tree name, tree args,
- + int flags, bool * no_add_attrs)
- +{
- + if (DECL_P (*node))
- + {
- + if (TREE_CODE (*node) != FUNCTION_DECL)
- + {
- + warning (OPT_Wattributes,"`%s' attribute only applies to functions",
- + IDENTIFIER_POINTER (name));
- + *no_add_attrs = true;
- + }
- + /* FIXME: the argument if any is checked for type attributes; should it
- + be checked for decl ones? */
- + }
- + else
- + {
- + if (TREE_CODE (*node) == FUNCTION_TYPE
- + || TREE_CODE (*node) == METHOD_TYPE)
- + {
- + if (avr32_isr_value (args) == AVR32_FT_UNKNOWN)
- + {
- + warning (OPT_Wattributes,"`%s' attribute ignored", IDENTIFIER_POINTER (name));
- + *no_add_attrs = true;
- + }
- + }
- + else if (TREE_CODE (*node) == POINTER_TYPE
- + && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
- + || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
- + && avr32_isr_value (args) != AVR32_FT_UNKNOWN)
- + {
- + *node = build_variant_type_copy (*node);
- + TREE_TYPE (*node) = build_type_attribute_variant
- + (TREE_TYPE (*node),
- + tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
- + *no_add_attrs = true;
- + }
- + else
- + {
- + /* Possibly pass this attribute on from the type to a decl. */
- + if (flags & ((int) ATTR_FLAG_DECL_NEXT
- + | (int) ATTR_FLAG_FUNCTION_NEXT
- + | (int) ATTR_FLAG_ARRAY_NEXT))
- + {
- + *no_add_attrs = true;
- + return tree_cons (name, args, NULL_TREE);
- + }
- + else
- + {
- + warning (OPT_Wattributes,"`%s' attribute ignored", IDENTIFIER_POINTER (name));
- + }
- + }
- + }
- +
- + return NULL_TREE;
- +}
- +
- +
- +/* Handle an attribute requiring a FUNCTION_DECL;
- + arguments as in struct attribute_spec.handler. */
- +static tree
- +avr32_handle_fndecl_attribute (tree * node, tree name,
- + tree args,
- + int flags ATTRIBUTE_UNUSED,
- + bool * no_add_attrs)
- +{
- + if (TREE_CODE (*node) != FUNCTION_DECL)
- + {
- + warning (OPT_Wattributes,"%qs attribute only applies to functions",
- + IDENTIFIER_POINTER (name));
- + *no_add_attrs = true;
- + return NULL_TREE;
- + }
- +
- + fndecl_attribute_args = args;
- + if (args == NULL_TREE)
- + return NULL_TREE;
- +
- + tree value = TREE_VALUE (args);
- + if (TREE_CODE (value) != INTEGER_CST)
- + {
- + warning (OPT_Wattributes,
- + "argument of %qs attribute is not an integer constant",
- + IDENTIFIER_POINTER (name));
- + *no_add_attrs = true;
- + }
- +
- + return NULL_TREE;
- +}
- +
- +
- +/* Handle an acall attribute;
- + arguments as in struct attribute_spec.handler. */
- +
- +static tree
- +avr32_handle_acall_attribute (tree * node, tree name,
- + tree args ATTRIBUTE_UNUSED,
- + int flags ATTRIBUTE_UNUSED, bool * no_add_attrs)
- +{
- + if (TREE_CODE (*node) == FUNCTION_TYPE || TREE_CODE (*node) == METHOD_TYPE)
- + {
- + warning (OPT_Wattributes,"`%s' attribute not yet supported...",
- + IDENTIFIER_POINTER (name));
- + *no_add_attrs = true;
- + return NULL_TREE;
- + }
- +
- + warning (OPT_Wattributes,"`%s' attribute only applies to functions",
- + IDENTIFIER_POINTER (name));
- + *no_add_attrs = true;
- + return NULL_TREE;
- +}
- +
- +
- +bool
- +avr32_flashvault_call(tree decl)
- +{
- + tree attributes;
- + tree fv_attribute;
- + tree vector_tree;
- + unsigned int vector;
- +
- + if (decl && TREE_CODE (decl) == FUNCTION_DECL)
- + {
- + attributes = DECL_ATTRIBUTES(decl);
- + fv_attribute = lookup_attribute ("flashvault", attributes);
- + if (fv_attribute != NULL_TREE)
- + {
- + /* Get attribute parameter, for the function vector number. */
- + /*
- + There is probably an easier, standard way to retrieve the
- + attribute parameter which needs to be done here.
- + */
- + vector_tree = TREE_VALUE(fv_attribute);
- + if (vector_tree != NULL_TREE)
- + {
- + vector = (unsigned int)TREE_INT_CST_LOW(TREE_VALUE(vector_tree));
- + fprintf (asm_out_file,
- + "\tmov\tr8, lo(%i)\t# Load vector number for sscall.\n",
- + vector);
- + }
- +
- + fprintf (asm_out_file,
- + "\tsscall\t# Secure system call.\n");
- +
- + return true;
- + }
- + }
- +
- + return false;
- +}
- +
- +
- +static bool has_attribute_p (tree decl, const char *name)
- +{
- + if (decl && TREE_CODE (decl) == FUNCTION_DECL)
- + {
- + return (lookup_attribute (name, DECL_ATTRIBUTES(decl)) != NULL_TREE);
- + }
- + return NULL_TREE;
- +}
- +
- +
- +/* Return 0 if the attributes for two types are incompatible, 1 if they
- + are compatible, and 2 if they are nearly compatible (which causes a
- + warning to be generated). */
- +static int
- +avr32_comp_type_attributes (tree type1, tree type2)
- +{
- + bool acall1, acall2, isr1, isr2, naked1, naked2, fv1, fv2, fvimpl1, fvimpl2;
- +
- + /* Check for mismatch of non-default calling convention. */
- + if (TREE_CODE (type1) != FUNCTION_TYPE)
- + return 1;
- +
- + /* Check for mismatched call attributes. */
- + acall1 = lookup_attribute ("acall", TYPE_ATTRIBUTES (type1)) != NULL;
- + acall2 = lookup_attribute ("acall", TYPE_ATTRIBUTES (type2)) != NULL;
- + naked1 = lookup_attribute ("naked", TYPE_ATTRIBUTES (type1)) != NULL;
- + naked2 = lookup_attribute ("naked", TYPE_ATTRIBUTES (type2)) != NULL;
- + fv1 = lookup_attribute ("flashvault", TYPE_ATTRIBUTES (type1)) != NULL;
- + fv2 = lookup_attribute ("flashvault", TYPE_ATTRIBUTES (type2)) != NULL;
- + fvimpl1 = lookup_attribute ("flashvault_impl", TYPE_ATTRIBUTES (type1)) != NULL;
- + fvimpl2 = lookup_attribute ("flashvault_impl", TYPE_ATTRIBUTES (type2)) != NULL;
- + isr1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
- + if (!isr1)
- + isr1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
- +
- + isr2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
- + if (!isr2)
- + isr2 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
- +
- + if ((acall1 && isr2)
- + || (acall2 && isr1)
- + || (naked1 && isr2)
- + || (naked2 && isr1)
- + || (fv1 && isr2)
- + || (fv2 && isr1)
- + || (fvimpl1 && isr2)
- + || (fvimpl2 && isr1)
- + || (fv1 && fvimpl2)
- + || (fv2 && fvimpl1)
- + )
- + return 0;
- +
- + return 1;
- +}
- +
- +
- +/* Computes the type of the current function. */
- +static unsigned long
- +avr32_compute_func_type (void)
- +{
- + unsigned long type = AVR32_FT_UNKNOWN;
- + tree a;
- + tree attr;
- +
- + if (TREE_CODE (current_function_decl) != FUNCTION_DECL)
- + abort ();
- +
- + /* Decide if the current function is volatile. Such functions never
- + return, and many memory cycles can be saved by not storing register
- + values that will never be needed again. This optimization was added to
- + speed up context switching in a kernel application. */
- + if (optimize > 0
- + && TREE_NOTHROW (current_function_decl)
- + && TREE_THIS_VOLATILE (current_function_decl))
- + type |= AVR32_FT_VOLATILE;
- +
- + if (cfun->static_chain_decl != NULL)
- + type |= AVR32_FT_NESTED;
- +
- + attr = DECL_ATTRIBUTES (current_function_decl);
- +
- + a = lookup_attribute ("isr", attr);
- + if (a == NULL_TREE)
- + a = lookup_attribute ("interrupt", attr);
- +
- + if (a == NULL_TREE)
- + type |= AVR32_FT_NORMAL;
- + else
- + type |= avr32_isr_value (TREE_VALUE (a));
- +
- +
- + a = lookup_attribute ("acall", attr);
- + if (a != NULL_TREE)
- + type |= AVR32_FT_ACALL;
- +
- + a = lookup_attribute ("naked", attr);
- + if (a != NULL_TREE)
- + type |= AVR32_FT_NAKED;
- +
- + a = lookup_attribute ("flashvault", attr);
- + if (a != NULL_TREE)
- + type |= AVR32_FT_FLASHVAULT;
- +
- + a = lookup_attribute ("flashvault_impl", attr);
- + if (a != NULL_TREE)
- + type |= AVR32_FT_FLASHVAULT_IMPL;
- +
- + return type;
- +}
- +
- +
- +/* Returns the type of the current function. */
- +static unsigned long
- +avr32_current_func_type (void)
- +{
- + if (AVR32_FUNC_TYPE (cfun->machine->func_type) == AVR32_FT_UNKNOWN)
- + cfun->machine->func_type = avr32_compute_func_type ();
- +
- + return cfun->machine->func_type;
- +}
- +
- +
- +/*
- +This target hook should return true if we should not pass type solely
- +in registers. The file expr.h defines a definition that is usually appropriate,
- +refer to expr.h for additional documentation.
- +*/
- +bool
- +avr32_must_pass_in_stack (enum machine_mode mode ATTRIBUTE_UNUSED, tree type)
- +{
- + if (type && AGGREGATE_TYPE_P (type)
- + /* If the alignment is less than the size then pass in the struct on
- + the stack. */
- + && ((unsigned int) TYPE_ALIGN_UNIT (type) <
- + (unsigned int) int_size_in_bytes (type))
- + /* If we support unaligned word accesses then structs of size 4 and 8
- + can have any alignment and still be passed in registers. */
- + && !(TARGET_UNALIGNED_WORD
- + && (int_size_in_bytes (type) == 4
- + || int_size_in_bytes (type) == 8))
- + /* Double word structs need only a word alignment. */
- + && !(int_size_in_bytes (type) == 8 && TYPE_ALIGN_UNIT (type) >= 4))
- + return true;
- +
- + if (type && AGGREGATE_TYPE_P (type)
- + /* Structs of size 3,5,6,7 are always passed in registers. */
- + && (int_size_in_bytes (type) == 3
- + || int_size_in_bytes (type) == 5
- + || int_size_in_bytes (type) == 6 || int_size_in_bytes (type) == 7))
- + return true;
- +
- +
- + return (type && TREE_ADDRESSABLE (type));
- +}
- +
- +
- +bool
- +avr32_strict_argument_naming (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED)
- +{
- + return true;
- +}
- +
- +
- +/*
- + This target hook should return true if an argument at the position indicated
- + by cum should be passed by reference. This predicate is queried after target
- + independent reasons for being passed by reference, such as TREE_ADDRESSABLE (type).
- +
- + If the hook returns true, a copy of that argument is made in memory and a
- + pointer to the argument is passed instead of the argument itself. The pointer
- + is passed in whatever way is appropriate for passing a pointer to that type.
- +*/
- +bool
- +avr32_pass_by_reference (CUMULATIVE_ARGS * cum ATTRIBUTE_UNUSED,
- + enum machine_mode mode ATTRIBUTE_UNUSED,
- + tree type, bool named ATTRIBUTE_UNUSED)
- +{
- + return (type && (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST));
- +}
- +
- +
- +static int
- +avr32_arg_partial_bytes (CUMULATIVE_ARGS * pcum ATTRIBUTE_UNUSED,
- + enum machine_mode mode ATTRIBUTE_UNUSED,
- + tree type ATTRIBUTE_UNUSED,
- + bool named ATTRIBUTE_UNUSED)
- +{
- + return 0;
- +}
- +
- +
- +struct gcc_target targetm = TARGET_INITIALIZER;
- +
- +/*
- + Table used to convert from register number in the assembler instructions and
- + the register numbers used in gcc.
- +*/
- +const int avr32_function_arg_reglist[] = {
- + INTERNAL_REGNUM (12),
- + INTERNAL_REGNUM (11),
- + INTERNAL_REGNUM (10),
- + INTERNAL_REGNUM (9),
- + INTERNAL_REGNUM (8)
- +};
- +
- +
- +rtx avr32_compare_op0 = NULL_RTX;
- +rtx avr32_compare_op1 = NULL_RTX;
- +rtx avr32_compare_operator = NULL_RTX;
- +rtx avr32_acc_cache = NULL_RTX;
- +/* type of branch to use */
- +enum avr32_cmp_type avr32_branch_type;
- +
- +
- +/*
- + Returns nonzero if it is allowed to store a value of mode mode in hard
- + register number regno.
- +*/
- +int
- +avr32_hard_regno_mode_ok (int regnr, enum machine_mode mode)
- +{
- + switch (mode)
- + {
- + case DImode: /* long long */
- + case DFmode: /* double */
- + case SCmode: /* __complex__ float */
- + case CSImode: /* __complex__ int */
- + if (regnr < 4)
- + { /* long long int not supported in r12, sp, lr or pc. */
- + return 0;
- + }
- + else
- + {
- + /* long long int has to be referred in even registers. */
- + if (regnr % 2)
- + return 0;
- + else
- + return 1;
- + }
- + case CDImode: /* __complex__ long long */
- + case DCmode: /* __complex__ double */
- + case TImode: /* 16 bytes */
- + if (regnr < 7)
- + return 0;
- + else if (regnr % 2)
- + return 0;
- + else
- + return 1;
- + default:
- + return 1;
- + }
- +}
- +
- +
- +int
- +avr32_rnd_operands (rtx add, rtx shift)
- +{
- + if (GET_CODE (shift) == CONST_INT &&
- + GET_CODE (add) == CONST_INT && INTVAL (shift) > 0)
- + {
- + if ((1 << (INTVAL (shift) - 1)) == INTVAL (add))
- + return TRUE;
- + }
- +
- + return FALSE;
- +}
- +
- +
- +int
- +avr32_const_ok_for_constraint_p (HOST_WIDE_INT value, char c, const char *str)
- +{
- + switch (c)
- + {
- + case 'K':
- + case 'I':
- + {
- + HOST_WIDE_INT min_value = 0, max_value = 0;
- + char size_str[3];
- + int const_size;
- +
- + size_str[0] = str[2];
- + size_str[1] = str[3];
- + size_str[2] = '\0';
- + const_size = atoi (size_str);
- +
- + if (TOUPPER (str[1]) == 'U')
- + {
- + min_value = 0;
- + max_value = (1 << const_size) - 1;
- + }
- + else if (TOUPPER (str[1]) == 'S')
- + {
- + min_value = -(1 << (const_size - 1));
- + max_value = (1 << (const_size - 1)) - 1;
- + }
- +
- + if (c == 'I')
- + {
- + value = -value;
- + }
- +
- + if (value >= min_value && value <= max_value)
- + {
- + return 1;
- + }
- + break;
- + }
- + case 'M':
- + return avr32_mask_upper_bits_operand (GEN_INT (value), VOIDmode);
- + case 'J':
- + return avr32_hi16_immediate_operand (GEN_INT (value), VOIDmode);
- + case 'O':
- + return one_bit_set_operand (GEN_INT (value), VOIDmode);
- + case 'N':
- + return one_bit_cleared_operand (GEN_INT (value), VOIDmode);
- + case 'L':
- + /* The lower 16-bits are set. */
- + return ((value & 0xffff) == 0xffff) ;
- + }
- +
- + return 0;
- +}
- +
- +
- +/* Compute mask of registers which needs saving upon function entry. */
- +static unsigned long
- +avr32_compute_save_reg_mask (int push)
- +{
- + unsigned long func_type;
- + unsigned int save_reg_mask = 0;
- + unsigned int reg;
- +
- + func_type = avr32_current_func_type ();
- +
- + if (IS_INTERRUPT (func_type))
- + {
- + unsigned int max_reg = 12;
- +
- + /* Get the banking scheme for the interrupt */
- + switch (func_type)
- + {
- + case AVR32_FT_ISR_FULL:
- + max_reg = 0;
- + break;
- + case AVR32_FT_ISR_HALF:
- + max_reg = 7;
- + break;
- + case AVR32_FT_ISR_NONE:
- + max_reg = 12;
- + break;
- + }
- +
- + /* Interrupt functions must not corrupt any registers, even call
- + clobbered ones. If this is a leaf function we can just examine the
- + registers used by the RTL, but otherwise we have to assume that
- + whatever function is called might clobber anything, and so we have
- + to save all the call-clobbered registers as well. */
- +
- + /* Need not push the registers r8-r12 for AVR32A architectures, as this
- + is automatially done in hardware. We also do not have any shadow
- + registers. */
- + if (TARGET_UARCH_AVR32A)
- + {
- + max_reg = 7;
- + func_type = AVR32_FT_ISR_NONE;
- + }
- +
- + /* All registers which are used and are not shadowed must be saved. */
- + for (reg = 0; reg <= max_reg; reg++)
- + if (df_regs_ever_live_p (INTERNAL_REGNUM (reg))
- + || (!current_function_is_leaf
- + && call_used_regs[INTERNAL_REGNUM (reg)]))
- + save_reg_mask |= (1 << reg);
- +
- + /* Check LR */
- + if ((df_regs_ever_live_p (LR_REGNUM)
- + || !current_function_is_leaf || frame_pointer_needed)
- + /* Only non-shadowed register models */
- + && (func_type == AVR32_FT_ISR_NONE))
- + save_reg_mask |= (1 << ASM_REGNUM (LR_REGNUM));
- +
- + /* Make sure that the GOT register is pushed. */
- + if (max_reg >= ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM)
- + && crtl->uses_pic_offset_table)
- + save_reg_mask |= (1 << ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM));
- +
- + }
- + else
- + {
- + int use_pushm = optimize_size;
- +
- + /* In the normal case we only need to save those registers which are
- + call saved and which are used by this function. */
- + for (reg = 0; reg <= 7; reg++)
- + if (df_regs_ever_live_p (INTERNAL_REGNUM (reg))
- + && !call_used_regs[INTERNAL_REGNUM (reg)])
- + save_reg_mask |= (1 << reg);
- +
- + /* Make sure that the GOT register is pushed. */
- + if (crtl->uses_pic_offset_table)
- + save_reg_mask |= (1 << ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM));
- +
- +
- + /* If we optimize for size and do not have anonymous arguments: use
- + pushm/popm always. */
- + if (use_pushm)
- + {
- + if ((save_reg_mask & (1 << 0))
- + || (save_reg_mask & (1 << 1))
- + || (save_reg_mask & (1 << 2)) || (save_reg_mask & (1 << 3)))
- + save_reg_mask |= 0xf;
- +
- + if ((save_reg_mask & (1 << 4))
- + || (save_reg_mask & (1 << 5))
- + || (save_reg_mask & (1 << 6)) || (save_reg_mask & (1 << 7)))
- + save_reg_mask |= 0xf0;
- +
- + if ((save_reg_mask & (1 << 8)) || (save_reg_mask & (1 << 9)))
- + save_reg_mask |= 0x300;
- + }
- +
- +
- + /* Check LR */
- + if ((df_regs_ever_live_p (LR_REGNUM)
- + || !current_function_is_leaf
- + || (optimize_size
- + && save_reg_mask
- + && !crtl->calls_eh_return)
- + || frame_pointer_needed)
- + && !IS_FLASHVAULT (func_type))
- + {
- + if (push
- + /* Never pop LR into PC for functions which
- + calls __builtin_eh_return, since we need to
- + fix the SP after the restoring of the registers
- + and before returning. */
- + || crtl->calls_eh_return)
- + {
- + /* Push/Pop LR */
- + save_reg_mask |= (1 << ASM_REGNUM (LR_REGNUM));
- + }
- + else
- + {
- + /* Pop PC */
- + save_reg_mask |= (1 << ASM_REGNUM (PC_REGNUM));
- + }
- + }
- + }
- +
- +
- + /* Save registers so the exception handler can modify them. */
- + if (crtl->calls_eh_return)
- + {
- + unsigned int i;
- +
- + for (i = 0;; i++)
- + {
- + reg = EH_RETURN_DATA_REGNO (i);
- + if (reg == INVALID_REGNUM)
- + break;
- + save_reg_mask |= 1 << ASM_REGNUM (reg);
- + }
- + }
- +
- + return save_reg_mask;
- +}
- +
- +
- +/* Compute total size in bytes of all saved registers. */
- +static int
- +avr32_get_reg_mask_size (int reg_mask)
- +{
- + int reg, size;
- + size = 0;
- +
- + for (reg = 0; reg <= 15; reg++)
- + if (reg_mask & (1 << reg))
- + size += 4;
- +
- + return size;
- +}
- +
- +
- +/* Get a register from one of the registers which are saved onto the stack
- + upon function entry. */
- +static int
- +avr32_get_saved_reg (int save_reg_mask)
- +{
- + unsigned int reg;
- +
- + /* Find the first register which is saved in the saved_reg_mask */
- + for (reg = 0; reg <= 15; reg++)
- + if (save_reg_mask & (1 << reg))
- + return reg;
- +
- + return -1;
- +}
- +
- +
- +/* Return 1 if it is possible to return using a single instruction. */
- +int
- +avr32_use_return_insn (int iscond)
- +{
- + unsigned int func_type = avr32_current_func_type ();
- + unsigned long saved_int_regs;
- +
- + /* Never use a return instruction before reload has run. */
- + if (!reload_completed)
- + return 0;
- +
- + /* Must adjust the stack for vararg functions. */
- + if (crtl->args.info.uses_anonymous_args)
- + return 0;
- +
- + /* If there a stack adjstment. */
- + if (get_frame_size ())
- + return 0;
- +
- + saved_int_regs = avr32_compute_save_reg_mask (TRUE);
- +
- + /* Conditional returns can not be performed in one instruction if we need
- + to restore registers from the stack */
- + if (iscond && saved_int_regs)
- + return 0;
- +
- + /* Conditional return can not be used for interrupt handlers. */
- + if (iscond && IS_INTERRUPT (func_type))
- + return 0;
- +
- + /* For interrupt handlers which needs to pop registers */
- + if (saved_int_regs && IS_INTERRUPT (func_type))
- + return 0;
- +
- +
- + /* If there are saved registers but the LR isn't saved, then we need two
- + instructions for the return. */
- + if (saved_int_regs && !(saved_int_regs & (1 << ASM_REGNUM (LR_REGNUM))))
- + return 0;
- +
- +
- + return 1;
- +}
- +
- +
- +/* Generate some function prologue info in the assembly file. */
- +void
- +avr32_target_asm_function_prologue (FILE * f, HOST_WIDE_INT frame_size)
- +{
- + unsigned long func_type = avr32_current_func_type ();
- +
- + if (IS_NAKED (func_type))
- + fprintf (f,
- + "\t# Function is naked: Prologue and epilogue provided by programmer\n");
- +
- + if (IS_FLASHVAULT (func_type))
- + {
- + fprintf(f,
- + "\t.ident \"flashvault\"\n\t# Function is defined with flashvault attribute.\n");
- + }
- +
- + if (IS_FLASHVAULT_IMPL (func_type))
- + {
- + fprintf(f,
- + "\t.ident \"flashvault\"\n\t# Function is defined with flashvault_impl attribute.\n");
- +
- + /* Save information on flashvault function declaration. */
- + tree fv_attribute = lookup_attribute ("flashvault_impl", DECL_ATTRIBUTES(current_function_decl));
- + if (fv_attribute != NULL_TREE)
- + {
- + tree vector_tree = TREE_VALUE(fv_attribute);
- + if (vector_tree != NULL_TREE)
- + {
- + unsigned int vector_num;
- + const char * name;
- +
- + vector_num = (unsigned int) TREE_INT_CST_LOW (TREE_VALUE (vector_tree));
- +
- + name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
- +
- + flashvault_decl_list_add (vector_num, name);
- + }
- + }
- + }
- +
- + if (IS_INTERRUPT (func_type))
- + {
- + switch (func_type)
- + {
- + case AVR32_FT_ISR_FULL:
- + fprintf (f,
- + "\t# Interrupt Function: Fully shadowed register file\n");
- + break;
- + case AVR32_FT_ISR_HALF:
- + fprintf (f,
- + "\t# Interrupt Function: Half shadowed register file\n");
- + break;
- + default:
- + case AVR32_FT_ISR_NONE:
- + fprintf (f, "\t# Interrupt Function: No shadowed register file\n");
- + break;
- + }
- + }
- +
- +
- + fprintf (f, "\t# args = %i, frame = %li, pretend = %i\n",
- + crtl->args.size, frame_size,
- + crtl->args.pretend_args_size);
- +
- + fprintf (f, "\t# frame_needed = %i, leaf_function = %i\n",
- + frame_pointer_needed, current_function_is_leaf);
- +
- + fprintf (f, "\t# uses_anonymous_args = %i\n",
- + crtl->args.info.uses_anonymous_args);
- +
- + if (crtl->calls_eh_return)
- + fprintf (f, "\t# Calls __builtin_eh_return.\n");
- +
- +}
- +
- +
- +/* Generate and emit an insn that we will recognize as a pushm or stm.
- + Unfortunately, since this insn does not reflect very well the actual
- + semantics of the operation, we need to annotate the insn for the benefit
- + of DWARF2 frame unwind information. */
- +
- +int avr32_convert_to_reglist16 (int reglist8_vect);
- +
- +static rtx
- +emit_multi_reg_push (int reglist, int usePUSHM)
- +{
- + rtx insn;
- + rtx dwarf;
- + rtx tmp;
- + rtx reg;
- + int i;
- + int nr_regs;
- + int index = 0;
- +
- + if (usePUSHM)
- + {
- + insn = emit_insn (gen_pushm (gen_rtx_CONST_INT (SImode, reglist)));
- + reglist = avr32_convert_to_reglist16 (reglist);
- + }
- + else
- + {
- + insn = emit_insn (gen_stm (stack_pointer_rtx,
- + gen_rtx_CONST_INT (SImode, reglist),
- + gen_rtx_CONST_INT (SImode, 1)));
- + }
- +
- + nr_regs = avr32_get_reg_mask_size (reglist) / 4;
- + dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (nr_regs + 1));
- +
- + for (i = 15; i >= 0; i--)
- + {
- + if (reglist & (1 << i))
- + {
- + reg = gen_rtx_REG (SImode, INTERNAL_REGNUM (i));
- + tmp = gen_rtx_SET (VOIDmode,
- + gen_rtx_MEM (SImode,
- + plus_constant (stack_pointer_rtx,
- + 4 * index)), reg);
- + RTX_FRAME_RELATED_P (tmp) = 1;
- + XVECEXP (dwarf, 0, 1 + index++) = tmp;
- + }
- + }
- +
- + tmp = gen_rtx_SET (SImode,
- + stack_pointer_rtx,
- + gen_rtx_PLUS (SImode,
- + stack_pointer_rtx,
- + GEN_INT (-4 * nr_regs)));
- + RTX_FRAME_RELATED_P (tmp) = 1;
- + XVECEXP (dwarf, 0, 0) = tmp;
- + REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
- + REG_NOTES (insn));
- + return insn;
- +}
- +
- +rtx
- +avr32_gen_load_multiple (rtx * regs, int count, rtx from,
- + int write_back, int in_struct_p, int scalar_p)
- +{
- +
- + rtx result;
- + int i = 0, j;
- +
- + result =
- + gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count + (write_back ? 1 : 0)));
- +
- + if (write_back)
- + {
- + XVECEXP (result, 0, 0)
- + = gen_rtx_SET (GET_MODE (from), from,
- + plus_constant (from, count * 4));
- + i = 1;
- + count++;
- + }
- +
- +
- + for (j = 0; i < count; i++, j++)
- + {
- + rtx unspec;
- + rtx mem = gen_rtx_MEM (SImode, plus_constant (from, j * 4));
- + MEM_IN_STRUCT_P (mem) = in_struct_p;
- + MEM_SCALAR_P (mem) = scalar_p;
- + unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, mem), UNSPEC_LDM);
- + XVECEXP (result, 0, i) = gen_rtx_SET (VOIDmode, regs[j], unspec);
- + }
- +
- + return result;
- +}
- +
- +
- +rtx
- +avr32_gen_store_multiple (rtx * regs, int count, rtx to,
- + int in_struct_p, int scalar_p)
- +{
- + rtx result;
- + int i = 0, j;
- +
- + result = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
- +
- + for (j = 0; i < count; i++, j++)
- + {
- + rtx mem = gen_rtx_MEM (SImode, plus_constant (to, j * 4));
- + MEM_IN_STRUCT_P (mem) = in_struct_p;
- + MEM_SCALAR_P (mem) = scalar_p;
- + XVECEXP (result, 0, i)
- + = gen_rtx_SET (VOIDmode, mem,
- + gen_rtx_UNSPEC (VOIDmode,
- + gen_rtvec (1, regs[j]),
- + UNSPEC_STORE_MULTIPLE));
- + }
- +
- + return result;
- +}
- +
- +
- +/* Move a block of memory if it is word aligned or we support unaligned
- + word memory accesses. The size must be maximum 64 bytes. */
- +int
- +avr32_gen_movmemsi (rtx * operands)
- +{
- + HOST_WIDE_INT bytes_to_go;
- + rtx src, dst;
- + rtx st_src, st_dst;
- + int src_offset = 0, dst_offset = 0;
- + int block_size;
- + int dst_in_struct_p, src_in_struct_p;
- + int dst_scalar_p, src_scalar_p;
- + int unaligned;
- +
- + if (GET_CODE (operands[2]) != CONST_INT
- + || GET_CODE (operands[3]) != CONST_INT
- + || INTVAL (operands[2]) > 64
- + || ((INTVAL (operands[3]) & 3) && !TARGET_UNALIGNED_WORD))
- + return 0;
- +
- + unaligned = (INTVAL (operands[3]) & 3) != 0;
- +
- + block_size = 4;
- +
- + st_dst = XEXP (operands[0], 0);
- + st_src = XEXP (operands[1], 0);
- +
- + dst_in_struct_p = MEM_IN_STRUCT_P (operands[0]);
- + dst_scalar_p = MEM_SCALAR_P (operands[0]);
- + src_in_struct_p = MEM_IN_STRUCT_P (operands[1]);
- + src_scalar_p = MEM_SCALAR_P (operands[1]);
- +
- + dst = copy_to_mode_reg (SImode, st_dst);
- + src = copy_to_mode_reg (SImode, st_src);
- +
- + bytes_to_go = INTVAL (operands[2]);
- +
- + while (bytes_to_go)
- + {
- + enum machine_mode move_mode;
- + /* (Seems to be a problem with reloads for the movti pattern so this is
- + disabled until that problem is resolved)
- + UPDATE: Problem seems to be solved now.... */
- + if (bytes_to_go >= GET_MODE_SIZE (TImode) && !unaligned
- + /* Do not emit ldm/stm for UC3 as ld.d/st.d is more optimal. */
- + && !TARGET_ARCH_UC)
- + move_mode = TImode;
- + else if ((bytes_to_go >= GET_MODE_SIZE (DImode)) && !unaligned)
- + move_mode = DImode;
- + else if (bytes_to_go >= GET_MODE_SIZE (SImode))
- + move_mode = SImode;
- + else
- + move_mode = QImode;
- +
- + {
- + rtx src_mem;
- + rtx dst_mem = gen_rtx_MEM (move_mode,
- + gen_rtx_PLUS (SImode, dst,
- + GEN_INT (dst_offset)));
- + dst_offset += GET_MODE_SIZE (move_mode);
- + if ( 0 /* This causes an error in GCC. Think there is
- + something wrong in the gcse pass which causes REQ_EQUIV notes
- + to be wrong so disabling it for now. */
- + && move_mode == TImode
- + && INTVAL (operands[2]) > GET_MODE_SIZE (TImode) )
- + {
- + src_mem = gen_rtx_MEM (move_mode,
- + gen_rtx_POST_INC (SImode, src));
- + }
- + else
- + {
- + src_mem = gen_rtx_MEM (move_mode,
- + gen_rtx_PLUS (SImode, src,
- + GEN_INT (src_offset)));
- + src_offset += GET_MODE_SIZE (move_mode);
- + }
- +
- + bytes_to_go -= GET_MODE_SIZE (move_mode);
- +
- + MEM_IN_STRUCT_P (dst_mem) = dst_in_struct_p;
- + MEM_SCALAR_P (dst_mem) = dst_scalar_p;
- +
- + MEM_IN_STRUCT_P (src_mem) = src_in_struct_p;
- + MEM_SCALAR_P (src_mem) = src_scalar_p;
- + emit_move_insn (dst_mem, src_mem);
- +
- + }
- + }
- +
- + return 1;
- +}
- +
- +
- +/* Expand the prologue instruction. */
- +void
- +avr32_expand_prologue (void)
- +{
- + rtx insn, dwarf;
- + unsigned long saved_reg_mask;
- + int reglist8 = 0;
- +
- + /* Naked functions do not have a prologue. */
- + if (IS_NAKED (avr32_current_func_type ()))
- + return;
- +
- + saved_reg_mask = avr32_compute_save_reg_mask (TRUE);
- +
- + if (saved_reg_mask)
- + {
- + /* Must push used registers. */
- +
- + /* Should we use POPM or LDM? */
- + int usePUSHM = TRUE;
- + reglist8 = 0;
- + if (((saved_reg_mask & (1 << 0)) ||
- + (saved_reg_mask & (1 << 1)) ||
- + (saved_reg_mask & (1 << 2)) || (saved_reg_mask & (1 << 3))))
- + {
- + /* One of R0-R3 should at least be pushed. */
- + if (((saved_reg_mask & (1 << 0)) &&
- + (saved_reg_mask & (1 << 1)) &&
- + (saved_reg_mask & (1 << 2)) && (saved_reg_mask & (1 << 3))))
- + {
- + /* All should be pushed. */
- + reglist8 |= 0x01;
- + }
- + else
- + {
- + usePUSHM = FALSE;
- + }
- + }
- +
- + if (((saved_reg_mask & (1 << 4)) ||
- + (saved_reg_mask & (1 << 5)) ||
- + (saved_reg_mask & (1 << 6)) || (saved_reg_mask & (1 << 7))))
- + {
- + /* One of R4-R7 should at least be pushed */
- + if (((saved_reg_mask & (1 << 4)) &&
- + (saved_reg_mask & (1 << 5)) &&
- + (saved_reg_mask & (1 << 6)) && (saved_reg_mask & (1 << 7))))
- + {
- + if (usePUSHM)
- + /* All should be pushed */
- + reglist8 |= 0x02;
- + }
- + else
- + {
- + usePUSHM = FALSE;
- + }
- + }
- +
- + if (((saved_reg_mask & (1 << 8)) || (saved_reg_mask & (1 << 9))))
- + {
- + /* One of R8-R9 should at least be pushed. */
- + if (((saved_reg_mask & (1 << 8)) && (saved_reg_mask & (1 << 9))))
- + {
- + if (usePUSHM)
- + /* All should be pushed. */
- + reglist8 |= 0x04;
- + }
- + else
- + {
- + usePUSHM = FALSE;
- + }
- + }
- +
- + if (saved_reg_mask & (1 << 10))
- + reglist8 |= 0x08;
- +
- + if (saved_reg_mask & (1 << 11))
- + reglist8 |= 0x10;
- +
- + if (saved_reg_mask & (1 << 12))
- + reglist8 |= 0x20;
- +
- + if ((saved_reg_mask & (1 << ASM_REGNUM (LR_REGNUM)))
- + && !IS_FLASHVAULT (avr32_current_func_type ()))
- + {
- + /* Push LR */
- + reglist8 |= 0x40;
- + }
- +
- + if (usePUSHM)
- + {
- + insn = emit_multi_reg_push (reglist8, TRUE);
- + }
- + else
- + {
- + insn = emit_multi_reg_push (saved_reg_mask, FALSE);
- + }
- + RTX_FRAME_RELATED_P (insn) = 1;
- +
- + /* Prevent this instruction from being scheduled after any other
- + instructions. */
- + emit_insn (gen_blockage ());
- + }
- +
- + /* Set frame pointer */
- + if (frame_pointer_needed)
- + {
- + insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
- + RTX_FRAME_RELATED_P (insn) = 1;
- + }
- +
- + if (get_frame_size () > 0)
- + {
- + if (avr32_const_ok_for_constraint_p (get_frame_size (), 'K', "Ks21"))
- + {
- + insn = emit_insn (gen_rtx_SET (SImode,
- + stack_pointer_rtx,
- + gen_rtx_PLUS (SImode,
- + stack_pointer_rtx,
- + gen_rtx_CONST_INT
- + (SImode,
- + -get_frame_size
- + ()))));
- + RTX_FRAME_RELATED_P (insn) = 1;
- + }
- + else
- + {
- + /* Immediate is larger than k21 We must either check if we can use
- + one of the pushed reegisters as temporary storage or we must
- + make us a temp register by pushing a register to the stack. */
- + rtx temp_reg, const_pool_entry, insn;
- + if (saved_reg_mask)
- + {
- + temp_reg =
- + gen_rtx_REG (SImode,
- + INTERNAL_REGNUM (avr32_get_saved_reg
- + (saved_reg_mask)));
- + }
- + else
- + {
- + temp_reg = gen_rtx_REG (SImode, INTERNAL_REGNUM (7));
- + emit_move_insn (gen_rtx_MEM
- + (SImode,
- + gen_rtx_PRE_DEC (SImode, stack_pointer_rtx)),
- + temp_reg);
- + }
- +
- + const_pool_entry =
- + force_const_mem (SImode,
- + gen_rtx_CONST_INT (SImode, get_frame_size ()));
- + emit_move_insn (temp_reg, const_pool_entry);
- +
- + insn = emit_insn (gen_rtx_SET (SImode,
- + stack_pointer_rtx,
- + gen_rtx_MINUS (SImode,
- + stack_pointer_rtx,
- + temp_reg)));
- +
- + dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
- + gen_rtx_PLUS (SImode, stack_pointer_rtx,
- + GEN_INT (-get_frame_size ())));
- + REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
- + dwarf, REG_NOTES (insn));
- + RTX_FRAME_RELATED_P (insn) = 1;
- +
- + if (!saved_reg_mask)
- + {
- + insn =
- + emit_move_insn (temp_reg,
- + gen_rtx_MEM (SImode,
- + gen_rtx_POST_INC (SImode,
- + gen_rtx_REG
- + (SImode,
- + 13))));
- + }
- +
- + /* Mark the temp register as dead */
- + REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_DEAD, temp_reg,
- + REG_NOTES (insn));
- +
- +
- + }
- +
- + /* Prevent the the stack adjustment to be scheduled after any
- + instructions using the frame pointer. */
- + emit_insn (gen_blockage ());
- + }
- +
- + /* Load GOT */
- + if (flag_pic)
- + {
- + avr32_load_pic_register ();
- +
- + /* gcc does not know that load or call instructions might use the pic
- + register so it might schedule these instructions before the loading
- + of the pic register. To avoid this emit a barrier for now. TODO!
- + Find out a better way to let gcc know which instructions might use
- + the pic register. */
- + emit_insn (gen_blockage ());
- + }
- + return;
- +}
- +
- +
- +void
- +avr32_set_return_address (rtx source, rtx scratch)
- +{
- + rtx addr;
- + unsigned long saved_regs;
- +
- + saved_regs = avr32_compute_save_reg_mask (TRUE);
- +
- + if (!(saved_regs & (1 << ASM_REGNUM (LR_REGNUM))))
- + emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
- + else
- + {
- + if (frame_pointer_needed)
- + addr = gen_rtx_REG (Pmode, FRAME_POINTER_REGNUM);
- + else
- + if (avr32_const_ok_for_constraint_p (get_frame_size (), 'K', "Ks16"))
- + {
- + addr = plus_constant (stack_pointer_rtx, get_frame_size ());
- + }
- + else
- + {
- + emit_insn (gen_movsi (scratch, GEN_INT (get_frame_size ())));
- + addr = scratch;
- + }
- + emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
- + }
- +}
- +
- +
- +/* Return the length of INSN. LENGTH is the initial length computed by
- + attributes in the machine-description file. */
- +int
- +avr32_adjust_insn_length (rtx insn ATTRIBUTE_UNUSED,
- + int length ATTRIBUTE_UNUSED)
- +{
- + return length;
- +}
- +
- +
- +void
- +avr32_output_return_instruction (int single_ret_inst ATTRIBUTE_UNUSED,
- + int iscond ATTRIBUTE_UNUSED,
- + rtx cond ATTRIBUTE_UNUSED, rtx r12_imm)
- +{
- +
- + unsigned long saved_reg_mask;
- + int insert_ret = TRUE;
- + int reglist8 = 0;
- + int stack_adjustment = get_frame_size ();
- + unsigned int func_type = avr32_current_func_type ();
- + FILE *f = asm_out_file;
- +
- + /* Naked functions does not have an epilogue */
- + if (IS_NAKED (func_type))
- + return;
- +
- + saved_reg_mask = avr32_compute_save_reg_mask (FALSE);
- +
- + /* Reset frame pointer */
- + if (stack_adjustment > 0)
- + {
- + if (avr32_const_ok_for_constraint_p (stack_adjustment, 'I', "Is21"))
- + {
- + fprintf (f, "\tsub\tsp, %i # Reset Frame Pointer\n",
- + -stack_adjustment);
- + }
- + else
- + {
- + /* TODO! Is it safe to use r8 as scratch?? */
- + fprintf (f, "\tmov\tr8, lo(%i) # Reset Frame Pointer\n",
- + -stack_adjustment);
- + fprintf (f, "\torh\tr8, hi(%i) # Reset Frame Pointer\n",
- + -stack_adjustment);
- + fprintf (f, "\tadd\tsp, r8 # Reset Frame Pointer\n");
- + }
- + }
- +
- + if (saved_reg_mask)
- + {
- + /* Must pop used registers */
- +
- + /* Should we use POPM or LDM? */
- + int usePOPM = TRUE;
- + if (((saved_reg_mask & (1 << 0)) ||
- + (saved_reg_mask & (1 << 1)) ||
- + (saved_reg_mask & (1 << 2)) || (saved_reg_mask & (1 << 3))))
- + {
- + /* One of R0-R3 should at least be popped */
- + if (((saved_reg_mask & (1 << 0)) &&
- + (saved_reg_mask & (1 << 1)) &&
- + (saved_reg_mask & (1 << 2)) && (saved_reg_mask & (1 << 3))))
- + {
- + /* All should be popped */
- + reglist8 |= 0x01;
- + }
- + else
- + {
- + usePOPM = FALSE;
- + }
- + }
- +
- + if (((saved_reg_mask & (1 << 4)) ||
- + (saved_reg_mask & (1 << 5)) ||
- + (saved_reg_mask & (1 << 6)) || (saved_reg_mask & (1 << 7))))
- + {
- + /* One of R0-R3 should at least be popped */
- + if (((saved_reg_mask & (1 << 4)) &&
- + (saved_reg_mask & (1 << 5)) &&
- + (saved_reg_mask & (1 << 6)) && (saved_reg_mask & (1 << 7))))
- + {
- + if (usePOPM)
- + /* All should be popped */
- + reglist8 |= 0x02;
- + }
- + else
- + {
- + usePOPM = FALSE;
- + }
- + }
- +
- + if (((saved_reg_mask & (1 << 8)) || (saved_reg_mask & (1 << 9))))
- + {
- + /* One of R8-R9 should at least be pushed */
- + if (((saved_reg_mask & (1 << 8)) && (saved_reg_mask & (1 << 9))))
- + {
- + if (usePOPM)
- + /* All should be pushed */
- + reglist8 |= 0x04;
- + }
- + else
- + {
- + usePOPM = FALSE;
- + }
- + }
- +
- + if (saved_reg_mask & (1 << 10))
- + reglist8 |= 0x08;
- +
- + if (saved_reg_mask & (1 << 11))
- + reglist8 |= 0x10;
- +
- + if (saved_reg_mask & (1 << 12))
- + reglist8 |= 0x20;
- +
- + if (saved_reg_mask & (1 << ASM_REGNUM (LR_REGNUM)))
- + /* Pop LR */
- + reglist8 |= 0x40;
- +
- + if ((saved_reg_mask & (1 << ASM_REGNUM (PC_REGNUM)))
- + && !IS_FLASHVAULT_IMPL (func_type))
- + /* Pop LR into PC. */
- + reglist8 |= 0x80;
- +
- + if (usePOPM)
- + {
- + char reglist[64]; /* 64 bytes should be enough... */
- + avr32_make_reglist8 (reglist8, (char *) reglist);
- +
- + if (reglist8 & 0x80)
- + /* This instruction is also a return */
- + insert_ret = FALSE;
- +
- + if (r12_imm && !insert_ret)
- + fprintf (f, "\tpopm\t%s, r12=%li\n", reglist, INTVAL (r12_imm));
- + else
- + fprintf (f, "\tpopm\t%s\n", reglist);
- +
- + }
- + else
- + {
- + char reglist[64]; /* 64 bytes should be enough... */
- + avr32_make_reglist16 (saved_reg_mask, (char *) reglist);
- + if (saved_reg_mask & (1 << ASM_REGNUM (PC_REGNUM)))
- + /* This instruction is also a return */
- + insert_ret = FALSE;
- +
- + if (r12_imm && !insert_ret)
- + fprintf (f, "\tldm\tsp++, %s, r12=%li\n", reglist,
- + INTVAL (r12_imm));
- + else
- + fprintf (f, "\tldm\tsp++, %s\n", reglist);
- +
- + }
- +
- + }
- +
- + /* Stack adjustment for exception handler. */
- + if (crtl->calls_eh_return)
- + fprintf (f, "\tadd\tsp, r%d\n", ASM_REGNUM (EH_RETURN_STACKADJ_REGNO));
- +
- +
- + if (IS_INTERRUPT (func_type))
- + {
- + fprintf (f, "\trete\n");
- + }
- + else if (IS_FLASHVAULT (func_type))
- + {
- + /* Normal return from Secure System call, increment SS_RAR before
- + returning. Use R8 as scratch. */
- + fprintf (f,
- + "\t# Normal return from sscall.\n"
- + "\t# Increment SS_RAR before returning.\n"
- + "\t# Use R8 as scratch.\n"
- + "\tmfsr\tr8, 440\n"
- + "\tsub\tr8, -2\n"
- + "\tmtsr\t440, r8\n"
- + "\tretss\n");
- + }
- + else if (insert_ret)
- + {
- + if (r12_imm)
- + fprintf (f, "\tretal\t%li\n", INTVAL (r12_imm));
- + else
- + fprintf (f, "\tretal\tr12\n");
- + }
- +}
- +
- +void
- +avr32_make_reglist16 (int reglist16_vect, char *reglist16_string)
- +{
- + int i;
- + bool first_reg = true;
- + /* Make sure reglist16_string is empty. */
- + reglist16_string[0] = '\0';
- +
- + for (i = 0; i < 16; ++i)
- + {
- + if (reglist16_vect & (1 << i))
- + {
- + first_reg == true ? first_reg = false : strcat(reglist16_string,", ");
- + strcat (reglist16_string, reg_names[INTERNAL_REGNUM (i)]);
- + }
- + }
- +}
- +
- +int
- +avr32_convert_to_reglist16 (int reglist8_vect)
- +{
- + int reglist16_vect = 0;
- + if (reglist8_vect & 0x1)
- + reglist16_vect |= 0xF;
- + if (reglist8_vect & 0x2)
- + reglist16_vect |= 0xF0;
- + if (reglist8_vect & 0x4)
- + reglist16_vect |= 0x300;
- + if (reglist8_vect & 0x8)
- + reglist16_vect |= 0x400;
- + if (reglist8_vect & 0x10)
- + reglist16_vect |= 0x800;
- + if (reglist8_vect & 0x20)
- + reglist16_vect |= 0x1000;
- + if (reglist8_vect & 0x40)
- + reglist16_vect |= 0x4000;
- + if (reglist8_vect & 0x80)
- + reglist16_vect |= 0x8000;
- +
- + return reglist16_vect;
- +}
- +
- +void
- +avr32_make_reglist8 (int reglist8_vect, char *reglist8_string)
- +{
- + /* Make sure reglist8_string is empty. */
- + reglist8_string[0] = '\0';
- +
- + if (reglist8_vect & 0x1)
- + strcpy (reglist8_string, "r0-r3");
- + if (reglist8_vect & 0x2)
- + strlen (reglist8_string) ? strcat (reglist8_string, ", r4-r7") :
- + strcpy (reglist8_string, "r4-r7");
- + if (reglist8_vect & 0x4)
- + strlen (reglist8_string) ? strcat (reglist8_string, ", r8-r9") :
- + strcpy (reglist8_string, "r8-r9");
- + if (reglist8_vect & 0x8)
- + strlen (reglist8_string) ? strcat (reglist8_string, ", r10") :
- + strcpy (reglist8_string, "r10");
- + if (reglist8_vect & 0x10)
- + strlen (reglist8_string) ? strcat (reglist8_string, ", r11") :
- + strcpy (reglist8_string, "r11");
- + if (reglist8_vect & 0x20)
- + strlen (reglist8_string) ? strcat (reglist8_string, ", r12") :
- + strcpy (reglist8_string, "r12");
- + if (reglist8_vect & 0x40)
- + strlen (reglist8_string) ? strcat (reglist8_string, ", lr") :
- + strcpy (reglist8_string, "lr");
- + if (reglist8_vect & 0x80)
- + strlen (reglist8_string) ? strcat (reglist8_string, ", pc") :
- + strcpy (reglist8_string, "pc");
- +}
- +
- +
- +int
- +avr32_eh_return_data_regno (int n)
- +{
- + if (n >= 0 && n <= 3)
- + return 8 + n;
- + else
- + return INVALID_REGNUM;
- +}
- +
- +
- +/* Compute the distance from register FROM to register TO.
- + These can be the arg pointer, the frame pointer or
- + the stack pointer.
- + Typical stack layout looks like this:
- +
- + old stack pointer -> | |
- + ----
- + | | \
- + | | saved arguments for
- + | | vararg functions
- + arg_pointer -> | | /
- + --
- + | | \
- + | | call saved
- + | | registers
- + | | /
- + frame ptr -> --
- + | | \
- + | | local
- + | | variables
- + stack ptr --> | | /
- + --
- + | | \
- + | | outgoing
- + | | arguments
- + | | /
- + --
- +
- + For a given funciton some or all of these stack compomnents
- + may not be needed, giving rise to the possibility of
- + eliminating some of the registers.
- +
- + The values returned by this function must reflect the behaviour
- + of avr32_expand_prologue() and avr32_compute_save_reg_mask().
- +
- + The sign of the number returned reflects the direction of stack
- + growth, so the values are positive for all eliminations except
- + from the soft frame pointer to the hard frame pointer. */
- +int
- +avr32_initial_elimination_offset (int from, int to)
- +{
- + int i;
- + int call_saved_regs = 0;
- + unsigned long saved_reg_mask;
- + unsigned int local_vars = get_frame_size ();
- +
- + saved_reg_mask = avr32_compute_save_reg_mask (TRUE);
- +
- + for (i = 0; i < 16; ++i)
- + {
- + if (saved_reg_mask & (1 << i))
- + call_saved_regs += 4;
- + }
- +
- + switch (from)
- + {
- + case ARG_POINTER_REGNUM:
- + switch (to)
- + {
- + case STACK_POINTER_REGNUM:
- + return call_saved_regs + local_vars;
- + case FRAME_POINTER_REGNUM:
- + return call_saved_regs;
- + default:
- + abort ();
- + }
- + case FRAME_POINTER_REGNUM:
- + switch (to)
- + {
- + case STACK_POINTER_REGNUM:
- + return local_vars;
- + default:
- + abort ();
- + }
- + default:
- + abort ();
- + }
- +}
- +
- +
- +/*
- + Returns a rtx used when passing the next argument to a function.
- + avr32_init_cumulative_args() and avr32_function_arg_advance() sets which
- + register to use.
- +*/
- +rtx
- +avr32_function_arg (CUMULATIVE_ARGS * cum, enum machine_mode mode,
- + tree type, int named)
- +{
- + int index = -1;
- + //unsigned long func_type = avr32_current_func_type ();
- + //int last_reg_index = (IS_FLASHVAULT(func_type) || IS_FLASHVAULT_IMPL(func_type) || cum->flashvault_func ? LAST_CUM_REG_INDEX - 1 : LAST_CUM_REG_INDEX);
- + int last_reg_index = (cum->flashvault_func ? LAST_CUM_REG_INDEX - 1 : LAST_CUM_REG_INDEX);
- +
- + HOST_WIDE_INT arg_size, arg_rsize;
- + if (type)
- + {
- + arg_size = int_size_in_bytes (type);
- + }
- + else
- + {
- + arg_size = GET_MODE_SIZE (mode);
- + }
- + arg_rsize = PUSH_ROUNDING (arg_size);
- +
- + /*
- + The last time this macro is called, it is called with mode == VOIDmode,
- + and its result is passed to the call or call_value pattern as operands 2
- + and 3 respectively. */
- + if (mode == VOIDmode)
- + {
- + return gen_rtx_CONST_INT (SImode, 22); /* ToDo: fixme. */
- + }
- +
- + if ((*targetm.calls.must_pass_in_stack) (mode, type) || !named)
- + {
- + return NULL_RTX;
- + }
- +
- + if (arg_rsize == 8)
- + {
- + /* use r11:r10 or r9:r8. */
- + if (!(GET_USED_INDEX (cum, 1) || GET_USED_INDEX (cum, 2)))
- + index = 1;
- + else if ((last_reg_index == 4) &&
- + !(GET_USED_INDEX (cum, 3) || GET_USED_INDEX (cum, 4)))
- + index = 3;
- + else
- + index = -1;
- + }
- + else if (arg_rsize == 4)
- + { /* Use first available register */
- + index = 0;
- + while (index <= last_reg_index && GET_USED_INDEX (cum, index))
- + index++;
- + if (index > last_reg_index)
- + index = -1;
- + }
- +
- + SET_REG_INDEX (cum, index);
- +
- + if (GET_REG_INDEX (cum) >= 0)
- + return gen_rtx_REG (mode, avr32_function_arg_reglist[GET_REG_INDEX (cum)]);
- +
- + return NULL_RTX;
- +}
- +
- +
- +/* Set the register used for passing the first argument to a function. */
- +void
- +avr32_init_cumulative_args (CUMULATIVE_ARGS * cum,
- + tree fntype ATTRIBUTE_UNUSED,
- + rtx libname ATTRIBUTE_UNUSED,
- + tree fndecl)
- +{
- + /* Set all registers as unused. */
- + SET_INDEXES_UNUSED (cum);
- +
- + /* Reset uses_anonymous_args */
- + cum->uses_anonymous_args = 0;
- +
- + /* Reset size of stack pushed arguments */
- + cum->stack_pushed_args_size = 0;
- +
- + cum->flashvault_func = (fndecl && (has_attribute_p (fndecl,"flashvault") || has_attribute_p (fndecl,"flashvault_impl")));
- +}
- +
- +
- +/*
- + Set register used for passing the next argument to a function. Only the
- + Scratch Registers are used.
- +
- + number name
- + 15 r15 PC
- + 14 r14 LR
- + 13 r13 _SP_________
- + FIRST_CUM_REG 12 r12 _||_
- + 10 r11 ||
- + 11 r10 _||_ Scratch Registers
- + 8 r9 ||
- + LAST_SCRATCH_REG 9 r8 _\/_________
- + 6 r7 /\
- + 7 r6 ||
- + 4 r5 ||
- + 5 r4 ||
- + 2 r3 ||
- + 3 r2 ||
- + 0 r1 ||
- + 1 r0 _||_________
- +
- +*/
- +void
- +avr32_function_arg_advance (CUMULATIVE_ARGS * cum, enum machine_mode mode,
- + tree type, int named ATTRIBUTE_UNUSED)
- +{
- + HOST_WIDE_INT arg_size, arg_rsize;
- +
- + if (type)
- + {
- + arg_size = int_size_in_bytes (type);
- + }
- + else
- + {
- + arg_size = GET_MODE_SIZE (mode);
- + }
- + arg_rsize = PUSH_ROUNDING (arg_size);
- +
- + /* If the argument had to be passed in stack, no register is used. */
- + if ((*targetm.calls.must_pass_in_stack) (mode, type))
- + {
- + cum->stack_pushed_args_size += PUSH_ROUNDING (int_size_in_bytes (type));
- + return;
- + }
- +
- + /* Mark the used registers as "used". */
- + if (GET_REG_INDEX (cum) >= 0)
- + {
- + SET_USED_INDEX (cum, GET_REG_INDEX (cum));
- + if (arg_rsize == 8)
- + {
- + SET_USED_INDEX (cum, (GET_REG_INDEX (cum) + 1));
- + }
- + }
- + else
- + {
- + /* Had to use stack */
- + cum->stack_pushed_args_size += arg_rsize;
- + }
- +}
- +
- +
- +/*
- + Defines witch direction to go to find the next register to use if the
- + argument is larger then one register or for arguments shorter than an
- + int which is not promoted, such as the last part of structures with
- + size not a multiple of 4. */
- +enum direction
- +avr32_function_arg_padding (enum machine_mode mode ATTRIBUTE_UNUSED,
- + tree type)
- +{
- + /* Pad upward for all aggregates except byte and halfword sized aggregates
- + which can be passed in registers. */
- + if (type
- + && AGGREGATE_TYPE_P (type)
- + && (int_size_in_bytes (type) != 1)
- + && !((int_size_in_bytes (type) == 2)
- + && TYPE_ALIGN_UNIT (type) >= 2)
- + && (int_size_in_bytes (type) & 0x3))
- + {
- + return upward;
- + }
- +
- + return downward;
- +}
- +
- +
- +/* Return a rtx used for the return value from a function call. */
- +rtx
- +avr32_function_value (tree type, tree func, bool outgoing ATTRIBUTE_UNUSED)
- +{
- + if (avr32_return_in_memory (type, func))
- + return NULL_RTX;
- +
- + if (int_size_in_bytes (type) <= 4)
- + {
- + enum machine_mode mode = TYPE_MODE (type);
- + int unsignedp = 0;
- + PROMOTE_FUNCTION_MODE (mode, unsignedp, type);
- + return gen_rtx_REG (mode, RET_REGISTER);
- + }
- + else if (int_size_in_bytes (type) <= 8)
- + return gen_rtx_REG (TYPE_MODE (type), INTERNAL_REGNUM (11));
- +
- + return NULL_RTX;
- +}
- +
- +
- +/* Return a rtx used for the return value from a library function call. */
- +rtx
- +avr32_libcall_value (enum machine_mode mode)
- +{
- +
- + if (GET_MODE_SIZE (mode) <= 4)
- + return gen_rtx_REG (mode, RET_REGISTER);
- + else if (GET_MODE_SIZE (mode) <= 8)
- + return gen_rtx_REG (mode, INTERNAL_REGNUM (11));
- + else
- + return NULL_RTX;
- +}
- +
- +
- +/* Return TRUE if X references a SYMBOL_REF. */
- +int
- +symbol_mentioned_p (rtx x)
- +{
- + const char *fmt;
- + int i;
- +
- + if (GET_CODE (x) == SYMBOL_REF)
- + return 1;
- +
- + fmt = GET_RTX_FORMAT (GET_CODE (x));
- +
- + for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
- + {
- + if (fmt[i] == 'E')
- + {
- + int j;
- +
- + for (j = XVECLEN (x, i) - 1; j >= 0; j--)
- + if (symbol_mentioned_p (XVECEXP (x, i, j)))
- + return 1;
- + }
- + else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
- + return 1;
- + }
- +
- + return 0;
- +}
- +
- +
- +/* Return TRUE if X references a LABEL_REF. */
- +int
- +label_mentioned_p (rtx x)
- +{
- + const char *fmt;
- + int i;
- +
- + if (GET_CODE (x) == LABEL_REF)
- + return 1;
- +
- + fmt = GET_RTX_FORMAT (GET_CODE (x));
- + for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
- + {
- + if (fmt[i] == 'E')
- + {
- + int j;
- +
- + for (j = XVECLEN (x, i) - 1; j >= 0; j--)
- + if (label_mentioned_p (XVECEXP (x, i, j)))
- + return 1;
- + }
- + else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
- + return 1;
- + }
- +
- + return 0;
- +}
- +
- +
- +/* Return TRUE if X contains a MEM expression. */
- +int
- +mem_mentioned_p (rtx x)
- +{
- + const char *fmt;
- + int i;
- +
- + if (MEM_P (x))
- + return 1;
- +
- + fmt = GET_RTX_FORMAT (GET_CODE (x));
- + for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
- + {
- + if (fmt[i] == 'E')
- + {
- + int j;
- +
- + for (j = XVECLEN (x, i) - 1; j >= 0; j--)
- + if (mem_mentioned_p (XVECEXP (x, i, j)))
- + return 1;
- + }
- + else if (fmt[i] == 'e' && mem_mentioned_p (XEXP (x, i)))
- + return 1;
- + }
- +
- + return 0;
- +}
- +
- +
- +int
- +avr32_legitimate_pic_operand_p (rtx x)
- +{
- +
- + /* We can't have const, this must be broken down to a symbol. */
- + if (GET_CODE (x) == CONST)
- + return FALSE;
- +
- + /* Can't access symbols or labels via the constant pool either */
- + if ((GET_CODE (x) == SYMBOL_REF
- + && CONSTANT_POOL_ADDRESS_P (x)
- + && (symbol_mentioned_p (get_pool_constant (x))
- + || label_mentioned_p (get_pool_constant (x)))))
- + return FALSE;
- +
- + return TRUE;
- +}
- +
- +
- +rtx
- +legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
- + rtx reg)
- +{
- +
- + if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF)
- + {
- + int subregs = 0;
- +
- + if (reg == 0)
- + {
- + if (!can_create_pseudo_p ())
- + abort ();
- + else
- + reg = gen_reg_rtx (Pmode);
- +
- + subregs = 1;
- + }
- +
- + emit_move_insn (reg, orig);
- +
- + /* Only set current function as using pic offset table if flag_pic is
- + set. This is because this function is also used if
- + TARGET_HAS_ASM_ADDR_PSEUDOS is set. */
- + if (flag_pic)
- + crtl->uses_pic_offset_table = 1;
- +
- + /* Put a REG_EQUAL note on this insn, so that it can be optimized by
- + loop. */
- + return reg;
- + }
- + else if (GET_CODE (orig) == CONST)
- + {
- + rtx base, offset;
- +
- + if (flag_pic
- + && GET_CODE (XEXP (orig, 0)) == PLUS
- + && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
- + return orig;
- +
- + if (reg == 0)
- + {
- + if (!can_create_pseudo_p ())
- + abort ();
- + else
- + reg = gen_reg_rtx (Pmode);
- + }
- +
- + if (GET_CODE (XEXP (orig, 0)) == PLUS)
- + {
- + base =
- + legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
- + offset =
- + legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
- + base == reg ? 0 : reg);
- + }
- + else
- + abort ();
- +
- + if (GET_CODE (offset) == CONST_INT)
- + {
- + /* The base register doesn't really matter, we only want to test
- + the index for the appropriate mode. */
- + if (!avr32_const_ok_for_constraint_p (INTVAL (offset), 'I', "Is21"))
- + {
- + if (can_create_pseudo_p ())
- + offset = force_reg (Pmode, offset);
- + else
- + abort ();
- + }
- +
- + if (GET_CODE (offset) == CONST_INT)
- + return plus_constant (base, INTVAL (offset));
- + }
- +
- + return gen_rtx_PLUS (Pmode, base, offset);
- + }
- +
- + return orig;
- +}
- +
- +
- +/* Generate code to load the PIC register. */
- +void
- +avr32_load_pic_register (void)
- +{
- + rtx l1, pic_tmp;
- + rtx global_offset_table;
- +
- + if ((crtl->uses_pic_offset_table == 0) || TARGET_NO_INIT_GOT)
- + return;
- +
- + if (!flag_pic)
- + abort ();
- +
- + l1 = gen_label_rtx ();
- +
- + global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
- + pic_tmp =
- + gen_rtx_CONST (Pmode,
- + gen_rtx_MINUS (SImode, gen_rtx_LABEL_REF (Pmode, l1),
- + global_offset_table));
- + emit_insn (gen_pic_load_addr
- + (pic_offset_table_rtx, force_const_mem (SImode, pic_tmp)));
- + emit_insn (gen_pic_compute_got_from_pc (pic_offset_table_rtx, l1));
- +
- + /* Need to emit this whether or not we obey regdecls, since setjmp/longjmp
- + can cause life info to screw up. */
- + emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
- +}
- +
- +
- +/* This hook should return true if values of type type are returned at the most
- + significant end of a register (in other words, if they are padded at the
- + least significant end). You can assume that type is returned in a register;
- + the caller is required to check this. Note that the register provided by
- + FUNCTION_VALUE must be able to hold the complete return value. For example,
- + if a 1-, 2- or 3-byte structure is returned at the most significant end of a
- + 4-byte register, FUNCTION_VALUE should provide an SImode rtx. */
- +bool
- +avr32_return_in_msb (tree type ATTRIBUTE_UNUSED)
- +{
- + /* if ( AGGREGATE_TYPE_P (type) ) if ((int_size_in_bytes(type) == 1) ||
- + ((int_size_in_bytes(type) == 2) && TYPE_ALIGN_UNIT(type) >= 2)) return
- + false; else return true; */
- +
- + return false;
- +}
- +
- +
- +/*
- + Returns one if a certain function value is going to be returned in memory
- + and zero if it is going to be returned in a register.
- +
- + BLKmode and all other modes that is larger than 64 bits are returned in
- + memory.
- +*/
- +bool
- +avr32_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED)
- +{
- + if (TYPE_MODE (type) == VOIDmode)
- + return false;
- +
- + if (int_size_in_bytes (type) > (2 * UNITS_PER_WORD)
- + || int_size_in_bytes (type) == -1)
- + {
- + return true;
- + }
- +
- + /* If we have an aggregate then use the same mechanism as when checking if
- + it should be passed on the stack. */
- + if (type
- + && AGGREGATE_TYPE_P (type)
- + && (*targetm.calls.must_pass_in_stack) (TYPE_MODE (type), type))
- + return true;
- +
- + return false;
- +}
- +
- +
- +/* Output the constant part of the trampoline.
- + lddpc r0, pc[0x8:e] ; load static chain register
- + lddpc pc, pc[0x8:e] ; jump to subrutine
- + .long 0 ; Address to static chain,
- + ; filled in by avr32_initialize_trampoline()
- + .long 0 ; Address to subrutine,
- + ; filled in by avr32_initialize_trampoline()
- +*/
- +void
- +avr32_trampoline_template (FILE * file)
- +{
- + fprintf (file, "\tlddpc r0, pc[8]\n");
- + fprintf (file, "\tlddpc pc, pc[8]\n");
- + /* make room for the address of the static chain. */
- + fprintf (file, "\t.long\t0\n");
- + /* make room for the address to the subrutine. */
- + fprintf (file, "\t.long\t0\n");
- +}
- +
- +
- +/* Initialize the variable parts of a trampoline. */
- +void
- +avr32_initialize_trampoline (rtx addr, rtx fnaddr, rtx static_chain)
- +{
- + /* Store the address to the static chain. */
- + emit_move_insn (gen_rtx_MEM
- + (SImode, plus_constant (addr, TRAMPOLINE_SIZE - 4)),
- + static_chain);
- +
- + /* Store the address to the function. */
- + emit_move_insn (gen_rtx_MEM (SImode, plus_constant (addr, TRAMPOLINE_SIZE)),
- + fnaddr);
- +
- + emit_insn (gen_cache (gen_rtx_REG (SImode, 13),
- + gen_rtx_CONST_INT (SImode,
- + AVR32_CACHE_INVALIDATE_ICACHE)));
- +}
- +
- +
- +/* Return nonzero if X is valid as an addressing register. */
- +int
- +avr32_address_register_rtx_p (rtx x, int strict_p)
- +{
- + int regno;
- +
- + if (!register_operand(x, GET_MODE(x)))
- + return 0;
- +
- + /* If strict we require the register to be a hard register. */
- + if (strict_p
- + && !REG_P(x))
- + return 0;
- +
- + regno = REGNO (x);
- +
- + if (strict_p)
- + return REGNO_OK_FOR_BASE_P (regno);
- +
- + return (regno <= LAST_REGNUM || regno >= FIRST_PSEUDO_REGISTER);
- +}
- +
- +
- +/* Return nonzero if INDEX is valid for an address index operand. */
- +int
- +avr32_legitimate_index_p (enum machine_mode mode, rtx index, int strict_p)
- +{
- + enum rtx_code code = GET_CODE (index);
- +
- + if (GET_MODE_SIZE (mode) > 8)
- + return 0;
- +
- + /* Standard coprocessor addressing modes. */
- + if (code == CONST_INT)
- + {
- + return CONST_OK_FOR_CONSTRAINT_P (INTVAL (index), 'K', "Ks16");
- + }
- +
- + if (avr32_address_register_rtx_p (index, strict_p))
- + return 1;
- +
- + if (code == MULT)
- + {
- + rtx xiop0 = XEXP (index, 0);
- + rtx xiop1 = XEXP (index, 1);
- + return ((avr32_address_register_rtx_p (xiop0, strict_p)
- + && power_of_two_operand (xiop1, SImode)
- + && (INTVAL (xiop1) <= 8))
- + || (avr32_address_register_rtx_p (xiop1, strict_p)
- + && power_of_two_operand (xiop0, SImode)
- + && (INTVAL (xiop0) <= 8)));
- + }
- + else if (code == ASHIFT)
- + {
- + rtx op = XEXP (index, 1);
- +
- + return (avr32_address_register_rtx_p (XEXP (index, 0), strict_p)
- + && GET_CODE (op) == CONST_INT
- + && INTVAL (op) > 0 && INTVAL (op) <= 3);
- + }
- +
- + return 0;
- +}
- +
- +
- +/*
- + Used in the GO_IF_LEGITIMATE_ADDRESS macro. Returns a nonzero value if
- + the RTX x is a legitimate memory address.
- +
- + Returns NO_REGS if the address is not legatime, GENERAL_REGS or ALL_REGS
- + if it is.
- +*/
- +
- +
- +/* Forward declaration */
- +int is_minipool_label (rtx label);
- +
- +int
- +avr32_legitimate_address (enum machine_mode mode, rtx x, int strict)
- +{
- +
- + switch (GET_CODE (x))
- + {
- + case REG:
- + return avr32_address_register_rtx_p (x, strict);
- + case CONST_INT:
- + return ((mode==SImode) && TARGET_RMW_ADDRESSABLE_DATA
- + && CONST_OK_FOR_CONSTRAINT_P(INTVAL(x), 'K', "Ks17"));
- + case CONST:
- + {
- + rtx label = avr32_find_symbol (x);
- + if (label
- + &&
- + (/*
- + If we enable (const (plus (symbol_ref ...))) type constant
- + pool entries we must add support for it in the predicates and
- + in the minipool generation in avr32_reorg().
- + (CONSTANT_POOL_ADDRESS_P (label)
- + && !(flag_pic
- + && (symbol_mentioned_p (get_pool_constant (label))
- + || label_mentioned_p (get_pool_constant (label)))))
- + ||*/
- + ((GET_CODE (label) == LABEL_REF)
- + && GET_CODE (XEXP (label, 0)) == CODE_LABEL
- + && is_minipool_label (XEXP (label, 0)))
- + /*|| ((GET_CODE (label) == SYMBOL_REF)
- + && mode == SImode
- + && SYMBOL_REF_RMW_ADDR(label))*/))
- + {
- + return TRUE;
- + }
- + }
- + break;
- + case LABEL_REF:
- + if (GET_CODE (XEXP (x, 0)) == CODE_LABEL
- + && is_minipool_label (XEXP (x, 0)))
- + {
- + return TRUE;
- + }
- + break;
- + case SYMBOL_REF:
- + {
- + if (CONSTANT_POOL_ADDRESS_P (x)
- + && !(flag_pic
- + && (symbol_mentioned_p (get_pool_constant (x))
- + || label_mentioned_p (get_pool_constant (x)))))
- + return TRUE;
- + else if (SYMBOL_REF_RCALL_FUNCTION_P (x)
- + || (mode == SImode
- + && SYMBOL_REF_RMW_ADDR (x)))
- + return TRUE;
- + break;
- + }
- + case PRE_DEC: /* (pre_dec (...)) */
- + case POST_INC: /* (post_inc (...)) */
- + return avr32_address_register_rtx_p (XEXP (x, 0), strict);
- + case PLUS: /* (plus (...) (...)) */
- + {
- + rtx xop0 = XEXP (x, 0);
- + rtx xop1 = XEXP (x, 1);
- +
- + return ((avr32_address_register_rtx_p (xop0, strict)
- + && avr32_legitimate_index_p (mode, xop1, strict))
- + || (avr32_address_register_rtx_p (xop1, strict)
- + && avr32_legitimate_index_p (mode, xop0, strict)));
- + }
- + default:
- + break;
- + }
- +
- + return FALSE;
- +}
- +
- +
- +int
- +avr32_const_ok_for_move (HOST_WIDE_INT c)
- +{
- + if ( TARGET_V2_INSNS )
- + return ( avr32_const_ok_for_constraint_p (c, 'K', "Ks21")
- + /* movh instruction */
- + || avr32_hi16_immediate_operand (GEN_INT(c), VOIDmode) );
- + else
- + return avr32_const_ok_for_constraint_p (c, 'K', "Ks21");
- +}
- +
- +
- +int
- +avr32_const_double_immediate (rtx value)
- +{
- + HOST_WIDE_INT hi, lo;
- +
- + if (GET_CODE (value) != CONST_DOUBLE)
- + return FALSE;
- +
- + if (SCALAR_FLOAT_MODE_P (GET_MODE (value)))
- + {
- + HOST_WIDE_INT target_float[2];
- + hi = lo = 0;
- + real_to_target (target_float, CONST_DOUBLE_REAL_VALUE (value),
- + GET_MODE (value));
- + lo = target_float[0];
- + hi = target_float[1];
- + }
- + else
- + {
- + hi = CONST_DOUBLE_HIGH (value);
- + lo = CONST_DOUBLE_LOW (value);
- + }
- +
- + if (avr32_const_ok_for_constraint_p (lo, 'K', "Ks21")
- + && (GET_MODE (value) == SFmode
- + || avr32_const_ok_for_constraint_p (hi, 'K', "Ks21")))
- + {
- + return TRUE;
- + }
- +
- + return FALSE;
- +}
- +
- +
- +int
- +avr32_legitimate_constant_p (rtx x)
- +{
- + switch (GET_CODE (x))
- + {
- + case CONST_INT:
- + /* Check if we should put large immediate into constant pool
- + or load them directly with mov/orh.*/
- + if (!avr32_imm_in_const_pool)
- + return 1;
- +
- + return avr32_const_ok_for_move (INTVAL (x));
- + case CONST_DOUBLE:
- + /* Check if we should put large immediate into constant pool
- + or load them directly with mov/orh.*/
- + if (!avr32_imm_in_const_pool)
- + return 1;
- +
- + if (GET_MODE (x) == SFmode
- + || GET_MODE (x) == DFmode || GET_MODE (x) == DImode)
- + return avr32_const_double_immediate (x);
- + else
- + return 0;
- + case LABEL_REF:
- + case SYMBOL_REF:
- + return avr32_find_symbol (x) && (flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS);
- + case CONST:
- + case HIGH:
- + case CONST_VECTOR:
- + return 0;
- + default:
- + printf ("%s():\n", __FUNCTION__);
- + debug_rtx (x);
- + return 1;
- + }
- +}
- +
- +
- +/* Strip any special encoding from labels */
- +const char *
- +avr32_strip_name_encoding (const char *name)
- +{
- + const char *stripped = name;
- +
- + while (1)
- + {
- + switch (stripped[0])
- + {
- + case '#':
- + stripped = strchr (name + 1, '#') + 1;
- + break;
- + case '*':
- + stripped = &stripped[1];
- + break;
- + default:
- + return stripped;
- + }
- + }
- +}
- +
- +
- +
- +/* Do anything needed before RTL is emitted for each function. */
- +static struct machine_function *
- +avr32_init_machine_status (void)
- +{
- + struct machine_function *machine;
- + machine =
- + (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
- +
- +#if AVR32_FT_UNKNOWN != 0
- + machine->func_type = AVR32_FT_UNKNOWN;
- +#endif
- +
- + machine->minipool_label_head = 0;
- + machine->minipool_label_tail = 0;
- + machine->ifcvt_after_reload = 0;
- + return machine;
- +}
- +
- +
- +void
- +avr32_init_expanders (void)
- +{
- + /* Arrange to initialize and mark the machine per-function status. */
- + init_machine_status = avr32_init_machine_status;
- +}
- +
- +
- +/* Return an RTX indicating where the return address to the
- + calling function can be found. */
- +rtx
- +avr32_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
- +{
- + if (count != 0)
- + return NULL_RTX;
- +
- + return get_hard_reg_initial_val (Pmode, LR_REGNUM);
- +}
- +
- +
- +void
- +avr32_encode_section_info (tree decl, rtx rtl, int first)
- +{
- + default_encode_section_info(decl, rtl, first);
- +
- + if ( TREE_CODE (decl) == VAR_DECL
- + && (GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF)
- + && (lookup_attribute ("rmw_addressable", DECL_ATTRIBUTES (decl))
- + || TARGET_RMW_ADDRESSABLE_DATA) ){
- + if ( !TARGET_RMW || flag_pic )
- + return;
- + // {
- + // warning ("Using RMW addressable data with an arch that does not support RMW instructions.");
- + // return;
- + // }
- + //
- + //if ( flag_pic )
- + // {
- + // warning ("Using RMW addressable data with together with -fpic switch. Can not use RMW instruction when compiling with -fpic.");
- + // return;
- + // }
- + SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= (1 << SYMBOL_FLAG_RMW_ADDR_SHIFT);
- + }
- +}
- +
- +
- +void
- +avr32_asm_output_label (FILE * stream, const char *name)
- +{
- + name = avr32_strip_name_encoding (name);
- +
- + /* Print the label. */
- + assemble_name (stream, name);
- + fprintf (stream, ":\n");
- +}
- +
- +
- +void
- +avr32_asm_weaken_label (FILE * stream, const char *name)
- +{
- + fprintf (stream, "\t.weak ");
- + assemble_name (stream, name);
- + fprintf (stream, "\n");
- +}
- +
- +
- +/*
- + Checks if a labelref is equal to a reserved word in the assembler. If it is,
- + insert a '_' before the label name.
- +*/
- +void
- +avr32_asm_output_labelref (FILE * stream, const char *name)
- +{
- + int verbatim = FALSE;
- + const char *stripped = name;
- + int strip_finished = FALSE;
- +
- + while (!strip_finished)
- + {
- + switch (stripped[0])
- + {
- + case '#':
- + stripped = strchr (name + 1, '#') + 1;
- + break;
- + case '*':
- + stripped = &stripped[1];
- + verbatim = TRUE;
- + break;
- + default:
- + strip_finished = TRUE;
- + break;
- + }
- + }
- +
- + if (verbatim)
- + fputs (stripped, stream);
- + else
- + asm_fprintf (stream, "%U%s", stripped);
- +}
- +
- +
- +/*
- + Check if the comparison in compare_exp is redundant
- + for the condition given in next_cond given that the
- + needed flags are already set by an earlier instruction.
- + Uses cc_prev_status to check this.
- +
- + Returns NULL_RTX if the compare is not redundant
- + or the new condition to use in the conditional
- + instruction if the compare is redundant.
- +*/
- +static rtx
- +is_compare_redundant (rtx compare_exp, rtx next_cond)
- +{
- + int z_flag_valid = FALSE;
- + int n_flag_valid = FALSE;
- + rtx new_cond;
- +
- + if (GET_CODE (compare_exp) != COMPARE
- + && GET_CODE (compare_exp) != AND)
- + return NULL_RTX;
- +
- +
- + if (rtx_equal_p (cc_prev_status.mdep.value, compare_exp))
- + {
- + /* cc0 already contains the correct comparison -> delete cmp insn */
- + return next_cond;
- + }
- +
- + if (GET_MODE (compare_exp) != SImode)
- + return NULL_RTX;
- +
- + switch (cc_prev_status.mdep.flags)
- + {
- + case CC_SET_VNCZ:
- + case CC_SET_NCZ:
- + n_flag_valid = TRUE;
- + case CC_SET_CZ:
- + case CC_SET_Z:
- + z_flag_valid = TRUE;
- + }
- +
- + if (cc_prev_status.mdep.value
- + && GET_CODE (compare_exp) == COMPARE
- + && REG_P (XEXP (compare_exp, 0))
- + && REGNO (XEXP (compare_exp, 0)) == REGNO (cc_prev_status.mdep.value)
- + && GET_CODE (XEXP (compare_exp, 1)) == CONST_INT
- + && next_cond != NULL_RTX)
- + {
- + if (INTVAL (XEXP (compare_exp, 1)) == 0
- + && z_flag_valid
- + && (GET_CODE (next_cond) == EQ || GET_CODE (next_cond) == NE))
- + /* We can skip comparison Z flag is already reflecting ops[0] */
- + return next_cond;
- + else if (n_flag_valid
- + && ((INTVAL (XEXP (compare_exp, 1)) == 0
- + && (GET_CODE (next_cond) == GE
- + || GET_CODE (next_cond) == LT))
- + || (INTVAL (XEXP (compare_exp, 1)) == -1
- + && (GET_CODE (next_cond) == GT
- + || GET_CODE (next_cond) == LE))))
- + {
- + /* We can skip comparison N flag is already reflecting ops[0],
- + which means that we can use the mi/pl conditions to check if
- + ops[0] is GE or LT 0. */
- + if ((GET_CODE (next_cond) == GE) || (GET_CODE (next_cond) == GT))
- + new_cond =
- + gen_rtx_UNSPEC (GET_MODE (next_cond), gen_rtvec (2, cc0_rtx, const0_rtx),
- + UNSPEC_COND_PL);
- + else
- + new_cond =
- + gen_rtx_UNSPEC (GET_MODE (next_cond), gen_rtvec (2, cc0_rtx, const0_rtx),
- + UNSPEC_COND_MI);
- + return new_cond;
- + }
- + }
- + return NULL_RTX;
- +}
- +
- +
- +/* Updates cc_status. */
- +void
- +avr32_notice_update_cc (rtx exp, rtx insn)
- +{
- + enum attr_cc attr_cc = get_attr_cc (insn);
- +
- + if ( attr_cc == CC_SET_Z_IF_NOT_V2 )
- + {
- + if (TARGET_V2_INSNS)
- + attr_cc = CC_NONE;
- + else
- + attr_cc = CC_SET_Z;
- + }
- +
- + switch (attr_cc)
- + {
- + case CC_CALL_SET:
- + CC_STATUS_INIT;
- + /* Check if the function call returns a value in r12 */
- + if (REG_P (recog_data.operand[0])
- + && REGNO (recog_data.operand[0]) == RETVAL_REGNUM)
- + {
- + cc_status.flags = 0;
- + cc_status.mdep.value =
- + gen_rtx_COMPARE (SImode, recog_data.operand[0], const0_rtx);
- + cc_status.mdep.flags = CC_SET_VNCZ;
- + cc_status.mdep.cond_exec_cmp_clobbered = 0;
- +
- + }
- + break;
- + case CC_COMPARE:
- + {
- + /* Check that compare will not be optimized away if so nothing should
- + be done */
- + rtx compare_exp = SET_SRC (exp);
- + /* Check if we have a tst expression. If so convert it to a
- + compare with 0. */
- + if ( REG_P (SET_SRC (exp)) )
- + compare_exp = gen_rtx_COMPARE (GET_MODE (SET_SRC (exp)),
- + SET_SRC (exp),
- + const0_rtx);
- +
- + if (!next_insn_emits_cmp (insn)
- + && (is_compare_redundant (compare_exp, get_next_insn_cond (insn)) == NULL_RTX))
- + {
- +
- + /* Reset the nonstandard flag */
- + CC_STATUS_INIT;
- + cc_status.flags = 0;
- + cc_status.mdep.value = compare_exp;
- + cc_status.mdep.flags = CC_SET_VNCZ;
- + cc_status.mdep.cond_exec_cmp_clobbered = 0;
- + }
- + }
- + break;
- + case CC_CMP_COND_INSN:
- + {
- + /* Conditional insn that emit the compare itself. */
- + rtx cmp;
- + rtx cmp_op0, cmp_op1;
- + rtx cond;
- + rtx dest;
- + rtx next_insn = next_nonnote_insn (insn);
- +
- + if ( GET_CODE (exp) == COND_EXEC )
- + {
- + cmp_op0 = XEXP (COND_EXEC_TEST (exp), 0);
- + cmp_op1 = XEXP (COND_EXEC_TEST (exp), 1);
- + cond = COND_EXEC_TEST (exp);
- + dest = SET_DEST (COND_EXEC_CODE (exp));
- + }
- + else
- + {
- + /* If then else conditional. compare operands are in operands
- + 4 and 5. */
- + cmp_op0 = recog_data.operand[4];
- + cmp_op1 = recog_data.operand[5];
- + cond = recog_data.operand[1];
- + dest = SET_DEST (exp);
- + }
- +
- + if ( GET_CODE (cmp_op0) == AND )
- + cmp = cmp_op0;
- + else
- + cmp = gen_rtx_COMPARE (GET_MODE (cmp_op0),
- + cmp_op0,
- + cmp_op1);
- +
- + /* Check if the conditional insns updates a register present
- + in the comparison, if so then we must reset the cc_status. */
- + if (REG_P (dest)
- + && (reg_mentioned_p (dest, cmp_op0)
- + || reg_mentioned_p (dest, cmp_op1))
- + && GET_CODE (exp) != COND_EXEC )
- + {
- + CC_STATUS_INIT;
- + }
- + else if (is_compare_redundant (cmp, cond) == NULL_RTX)
- + {
- + /* Reset the nonstandard flag */
- + CC_STATUS_INIT;
- + if ( GET_CODE (cmp_op0) == AND )
- + {
- + cc_status.flags = CC_INVERTED;
- + cc_status.mdep.flags = CC_SET_Z;
- + }
- + else
- + {
- + cc_status.flags = 0;
- + cc_status.mdep.flags = CC_SET_VNCZ;
- + }
- + cc_status.mdep.value = cmp;
- + cc_status.mdep.cond_exec_cmp_clobbered = 0;
- + }
- +
- +
- + /* Check if we have a COND_EXEC insn which updates one
- + of the registers in the compare status. */
- + if (REG_P (dest)
- + && (reg_mentioned_p (dest, cmp_op0)
- + || reg_mentioned_p (dest, cmp_op1))
- + && GET_CODE (exp) == COND_EXEC )
- + cc_status.mdep.cond_exec_cmp_clobbered = 1;
- +
- + if ( cc_status.mdep.cond_exec_cmp_clobbered
- + && GET_CODE (exp) == COND_EXEC
- + && next_insn != NULL
- + && INSN_P (next_insn)
- + && !(GET_CODE (PATTERN (next_insn)) == COND_EXEC
- + && rtx_equal_p (XEXP (COND_EXEC_TEST (PATTERN (next_insn)), 0), cmp_op0)
- + && rtx_equal_p (XEXP (COND_EXEC_TEST (PATTERN (next_insn)), 1), cmp_op1)
- + && (GET_CODE (COND_EXEC_TEST (PATTERN (next_insn))) == GET_CODE (cond)
- + || GET_CODE (COND_EXEC_TEST (PATTERN (next_insn))) == reverse_condition (GET_CODE (cond)))) )
- + {
- + /* We have a sequence of conditional insns where the compare status has been clobbered
- + since the compare no longer reflects the content of the values to compare. */
- + CC_STATUS_INIT;
- + cc_status.mdep.cond_exec_cmp_clobbered = 1;
- + }
- +
- + }
- + break;
- + case CC_BLD:
- + /* Bit load is kind of like an inverted testsi, because the Z flag is
- + inverted */
- + CC_STATUS_INIT;
- + cc_status.flags = CC_INVERTED;
- + cc_status.mdep.value = SET_SRC (exp);
- + cc_status.mdep.flags = CC_SET_Z;
- + cc_status.mdep.cond_exec_cmp_clobbered = 0;
- + break;
- + case CC_NONE:
- + /* Insn does not affect CC at all. Check if the instruction updates
- + some of the register currently reflected in cc0 */
- +
- + if ((GET_CODE (exp) == SET)
- + && (cc_status.value1 || cc_status.value2 || cc_status.mdep.value)
- + && (reg_mentioned_p (SET_DEST (exp), cc_status.value1)
- + || reg_mentioned_p (SET_DEST (exp), cc_status.value2)
- + || reg_mentioned_p (SET_DEST (exp), cc_status.mdep.value)))
- + {
- + CC_STATUS_INIT;
- + }
- +
- + /* If this is a parallel we must step through each of the parallel
- + expressions */
- + if (GET_CODE (exp) == PARALLEL)
- + {
- + int i;
- + for (i = 0; i < XVECLEN (exp, 0); ++i)
- + {
- + rtx vec_exp = XVECEXP (exp, 0, i);
- + if ((GET_CODE (vec_exp) == SET)
- + && (cc_status.value1 || cc_status.value2
- + || cc_status.mdep.value)
- + && (reg_mentioned_p (SET_DEST (vec_exp), cc_status.value1)
- + || reg_mentioned_p (SET_DEST (vec_exp),
- + cc_status.value2)
- + || reg_mentioned_p (SET_DEST (vec_exp),
- + cc_status.mdep.value)))
- + {
- + CC_STATUS_INIT;
- + }
- + }
- + }
- +
- + /* Check if we have memory opartions with post_inc or pre_dec on the
- + register currently reflected in cc0 */
- + if (GET_CODE (exp) == SET
- + && GET_CODE (SET_SRC (exp)) == MEM
- + && (GET_CODE (XEXP (SET_SRC (exp), 0)) == POST_INC
- + || GET_CODE (XEXP (SET_SRC (exp), 0)) == PRE_DEC)
- + &&
- + (reg_mentioned_p
- + (XEXP (XEXP (SET_SRC (exp), 0), 0), cc_status.value1)
- + || reg_mentioned_p (XEXP (XEXP (SET_SRC (exp), 0), 0),
- + cc_status.value2)
- + || reg_mentioned_p (XEXP (XEXP (SET_SRC (exp), 0), 0),
- + cc_status.mdep.value)))
- + CC_STATUS_INIT;
- +
- + if (GET_CODE (exp) == SET
- + && GET_CODE (SET_DEST (exp)) == MEM
- + && (GET_CODE (XEXP (SET_DEST (exp), 0)) == POST_INC
- + || GET_CODE (XEXP (SET_DEST (exp), 0)) == PRE_DEC)
- + &&
- + (reg_mentioned_p
- + (XEXP (XEXP (SET_DEST (exp), 0), 0), cc_status.value1)
- + || reg_mentioned_p (XEXP (XEXP (SET_DEST (exp), 0), 0),
- + cc_status.value2)
- + || reg_mentioned_p (XEXP (XEXP (SET_DEST (exp), 0), 0),
- + cc_status.mdep.value)))
- + CC_STATUS_INIT;
- + break;
- +
- + case CC_SET_VNCZ:
- + CC_STATUS_INIT;
- + cc_status.mdep.value = recog_data.operand[0];
- + cc_status.mdep.flags = CC_SET_VNCZ;
- + cc_status.mdep.cond_exec_cmp_clobbered = 0;
- + break;
- +
- + case CC_SET_NCZ:
- + CC_STATUS_INIT;
- + cc_status.mdep.value = recog_data.operand[0];
- + cc_status.mdep.flags = CC_SET_NCZ;
- + cc_status.mdep.cond_exec_cmp_clobbered = 0;
- + break;
- +
- + case CC_SET_CZ:
- + CC_STATUS_INIT;
- + cc_status.mdep.value = recog_data.operand[0];
- + cc_status.mdep.flags = CC_SET_CZ;
- + cc_status.mdep.cond_exec_cmp_clobbered = 0;
- + break;
- +
- + case CC_SET_Z:
- + CC_STATUS_INIT;
- + cc_status.mdep.value = recog_data.operand[0];
- + cc_status.mdep.flags = CC_SET_Z;
- + cc_status.mdep.cond_exec_cmp_clobbered = 0;
- + break;
- +
- + case CC_CLOBBER:
- + CC_STATUS_INIT;
- + break;
- +
- + default:
- + CC_STATUS_INIT;
- + }
- +}
- +
- +
- +/*
- + Outputs to stdio stream stream the assembler syntax for an instruction
- + operand x. x is an RTL expression.
- +*/
- +void
- +avr32_print_operand (FILE * stream, rtx x, int code)
- +{
- + int error = 0;
- +
- + if ( code == '?' )
- + {
- + /* Predicable instruction, print condition code */
- +
- + /* If the insn should not be conditional then do nothing. */
- + if ( current_insn_predicate == NULL_RTX )
- + return;
- +
- + /* Set x to the predicate to force printing
- + the condition later on. */
- + x = current_insn_predicate;
- +
- + /* Reverse condition if useing bld insn. */
- + if ( GET_CODE (XEXP(current_insn_predicate,0)) == AND )
- + x = reversed_condition (current_insn_predicate);
- + }
- + else if ( code == '!' )
- + {
- + /* Output compare for conditional insn if needed. */
- + rtx new_cond;
- + gcc_assert ( current_insn_predicate != NULL_RTX );
- + new_cond = avr32_output_cmp(current_insn_predicate,
- + GET_MODE(XEXP(current_insn_predicate,0)),
- + XEXP(current_insn_predicate,0),
- + XEXP(current_insn_predicate,1));
- +
- + /* Check if the new condition is a special avr32 condition
- + specified using UNSPECs. If so we must handle it differently. */
- + if ( GET_CODE (new_cond) == UNSPEC )
- + {
- + current_insn_predicate =
- + gen_rtx_UNSPEC (CCmode,
- + gen_rtvec (2,
- + XEXP(current_insn_predicate,0),
- + XEXP(current_insn_predicate,1)),
- + XINT (new_cond, 1));
- + }
- + else
- + {
- + PUT_CODE(current_insn_predicate, GET_CODE(new_cond));
- + }
- + return;
- + }
- +
- + switch (GET_CODE (x))
- + {
- + case UNSPEC:
- + switch (XINT (x, 1))
- + {
- + case UNSPEC_COND_PL:
- + if (code == 'i')
- + fputs ("mi", stream);
- + else
- + fputs ("pl", stream);
- + break;
- + case UNSPEC_COND_MI:
- + if (code == 'i')
- + fputs ("pl", stream);
- + else
- + fputs ("mi", stream);
- + break;
- + default:
- + error = 1;
- + }
- + break;
- + case EQ:
- + if (code == 'i')
- + fputs ("ne", stream);
- + else
- + fputs ("eq", stream);
- + break;
- + case NE:
- + if (code == 'i')
- + fputs ("eq", stream);
- + else
- + fputs ("ne", stream);
- + break;
- + case GT:
- + if (code == 'i')
- + fputs ("le", stream);
- + else
- + fputs ("gt", stream);
- + break;
- + case GTU:
- + if (code == 'i')
- + fputs ("ls", stream);
- + else
- + fputs ("hi", stream);
- + break;
- + case LT:
- + if (code == 'i')
- + fputs ("ge", stream);
- + else
- + fputs ("lt", stream);
- + break;
- + case LTU:
- + if (code == 'i')
- + fputs ("hs", stream);
- + else
- + fputs ("lo", stream);
- + break;
- + case GE:
- + if (code == 'i')
- + fputs ("lt", stream);
- + else
- + fputs ("ge", stream);
- + break;
- + case GEU:
- + if (code == 'i')
- + fputs ("lo", stream);
- + else
- + fputs ("hs", stream);
- + break;
- + case LE:
- + if (code == 'i')
- + fputs ("gt", stream);
- + else
- + fputs ("le", stream);
- + break;
- + case LEU:
- + if (code == 'i')
- + fputs ("hi", stream);
- + else
- + fputs ("ls", stream);
- + break;
- + case CONST_INT:
- + {
- + HOST_WIDE_INT value = INTVAL (x);
- +
- + switch (code)
- + {
- + case 'm':
- + if ( HOST_BITS_PER_WIDE_INT > BITS_PER_WORD )
- + {
- + /* A const_int can be used to represent DImode constants. */
- + value >>= BITS_PER_WORD;
- + }
- + /* We might get a const_int immediate for setting a DI register,
- + we then must then return the correct sign extended DI. The most
- + significant word is just a sign extension. */
- + else if (value < 0)
- + value = -1;
- + else
- + value = 0;
- + break;
- + case 'i':
- + value++;
- + break;
- + case 'p':
- + {
- + /* Set to bit position of first bit set in immediate */
- + int i, bitpos = 32;
- + for (i = 0; i < 32; i++)
- + if (value & (1 << i))
- + {
- + bitpos = i;
- + break;
- + }
- + value = bitpos;
- + }
- + break;
- + case 'z':
- + {
- + /* Set to bit position of first bit cleared in immediate */
- + int i, bitpos = 32;
- + for (i = 0; i < 32; i++)
- + if (!(value & (1 << i)))
- + {
- + bitpos = i;
- + break;
- + }
- + value = bitpos;
- + }
- + break;
- + case 'r':
- + {
- + /* Reglist 8 */
- + char op[50];
- + op[0] = '\0';
- +
- + if (value & 0x01)
- + strcpy (op, "r0-r3");
- + if (value & 0x02)
- + strlen (op) ? strcat (op, ", r4-r7") : strcpy (op,"r4-r7");
- + if (value & 0x04)
- + strlen (op) ? strcat (op, ", r8-r9") : strcpy (op,"r8-r9");
- + if (value & 0x08)
- + strlen (op) ? strcat (op, ", r10") : strcpy (op,"r10");
- + if (value & 0x10)
- + strlen (op) ? strcat (op, ", r11") : strcpy (op,"r11");
- + if (value & 0x20)
- + strlen (op) ? strcat (op, ", r12") : strcpy (op,"r12");
- + if (value & 0x40)
- + strlen (op) ? strcat (op, ", lr") : strcpy (op, "lr");
- + if (value & 0x80)
- + strlen (op) ? strcat (op, ", pc") : strcpy (op, "pc");
- +
- + fputs (op, stream);
- + return;
- + }
- + case 's':
- + {
- + /* Reglist 16 */
- + char reglist16_string[100];
- + int i;
- + bool first_reg = true;
- + reglist16_string[0] = '\0';
- +
- + for (i = 0; i < 16; ++i)
- + {
- + if (value & (1 << i))
- + {
- + first_reg == true ? first_reg = false : strcat(reglist16_string,", ");
- + strcat(reglist16_string,reg_names[INTERNAL_REGNUM(i)]);
- + }
- + }
- + fputs (reglist16_string, stream);
- + return;
- + }
- + case 'h':
- + /* Print halfword part of word */
- + fputs (value ? "b" : "t", stream);
- + return;
- + }
- +
- + /* Print Value */
- + fprintf (stream, "%d", value);
- + break;
- + }
- + case CONST_DOUBLE:
- + {
- + HOST_WIDE_INT hi, lo;
- + if (SCALAR_FLOAT_MODE_P (GET_MODE (x)))
- + {
- + HOST_WIDE_INT target_float[2];
- + hi = lo = 0;
- + real_to_target (target_float, CONST_DOUBLE_REAL_VALUE (x),
- + GET_MODE (x));
- + /* For doubles the most significant part starts at index 0. */
- + if (GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
- + {
- + hi = target_float[0];
- + lo = target_float[1];
- + }
- + else
- + {
- + lo = target_float[0];
- + }
- + }
- + else
- + {
- + hi = CONST_DOUBLE_HIGH (x);
- + lo = CONST_DOUBLE_LOW (x);
- + }
- +
- + if (code == 'm')
- + fprintf (stream, "%ld", hi);
- + else
- + fprintf (stream, "%ld", lo);
- +
- + break;
- + }
- + case CONST:
- + output_addr_const (stream, XEXP (XEXP (x, 0), 0));
- + fprintf (stream, "+%ld", INTVAL (XEXP (XEXP (x, 0), 1)));
- + break;
- + case REG:
- + /* Swap register name if the register is DImode or DFmode. */
- + if (GET_MODE (x) == DImode || GET_MODE (x) == DFmode)
- + {
- + /* Double register must have an even numbered address */
- + gcc_assert (!(REGNO (x) % 2));
- + if (code == 'm')
- + fputs (reg_names[true_regnum (x)], stream);
- + else
- + fputs (reg_names[true_regnum (x) + 1], stream);
- + }
- + else if (GET_MODE (x) == TImode)
- + {
- + switch (code)
- + {
- + case 'T':
- + fputs (reg_names[true_regnum (x)], stream);
- + break;
- + case 'U':
- + fputs (reg_names[true_regnum (x) + 1], stream);
- + break;
- + case 'L':
- + fputs (reg_names[true_regnum (x) + 2], stream);
- + break;
- + case 'B':
- + fputs (reg_names[true_regnum (x) + 3], stream);
- + break;
- + default:
- + fprintf (stream, "%s, %s, %s, %s",
- + reg_names[true_regnum (x) + 3],
- + reg_names[true_regnum (x) + 2],
- + reg_names[true_regnum (x) + 1],
- + reg_names[true_regnum (x)]);
- + break;
- + }
- + }
- + else
- + {
- + fputs (reg_names[true_regnum (x)], stream);
- + }
- + break;
- + case CODE_LABEL:
- + case LABEL_REF:
- + case SYMBOL_REF:
- + output_addr_const (stream, x);
- + break;
- + case MEM:
- + switch (GET_CODE (XEXP (x, 0)))
- + {
- + case LABEL_REF:
- + case SYMBOL_REF:
- + output_addr_const (stream, XEXP (x, 0));
- + break;
- + case MEM:
- + switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
- + {
- + case SYMBOL_REF:
- + output_addr_const (stream, XEXP (XEXP (x, 0), 0));
- + break;
- + default:
- + error = 1;
- + break;
- + }
- + break;
- + case REG:
- + avr32_print_operand (stream, XEXP (x, 0), 0);
- + if (code != 'p')
- + fputs ("[0]", stream);
- + break;
- + case PRE_DEC:
- + fputs ("--", stream);
- + avr32_print_operand (stream, XEXP (XEXP (x, 0), 0), 0);
- + break;
- + case POST_INC:
- + avr32_print_operand (stream, XEXP (XEXP (x, 0), 0), 0);
- + fputs ("++", stream);
- + break;
- + case PLUS:
- + {
- + rtx op0 = XEXP (XEXP (x, 0), 0);
- + rtx op1 = XEXP (XEXP (x, 0), 1);
- + rtx base = NULL_RTX, offset = NULL_RTX;
- +
- + if (avr32_address_register_rtx_p (op0, 1))
- + {
- + base = op0;
- + offset = op1;
- + }
- + else if (avr32_address_register_rtx_p (op1, 1))
- + {
- + /* Operands are switched. */
- + base = op1;
- + offset = op0;
- + }
- +
- + gcc_assert (base && offset
- + && avr32_address_register_rtx_p (base, 1)
- + && avr32_legitimate_index_p (GET_MODE (x), offset,
- + 1));
- +
- + avr32_print_operand (stream, base, 0);
- + fputs ("[", stream);
- + avr32_print_operand (stream, offset, 0);
- + fputs ("]", stream);
- + break;
- + }
- + case CONST:
- + output_addr_const (stream, XEXP (XEXP (XEXP (x, 0), 0), 0));
- + fprintf (stream, " + %ld",
- + INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)));
- + break;
- + case CONST_INT:
- + avr32_print_operand (stream, XEXP (x, 0), 0);
- + break;
- + default:
- + error = 1;
- + }
- + break;
- + case MULT:
- + {
- + int value = INTVAL (XEXP (x, 1));
- +
- + /* Convert immediate in multiplication into a shift immediate */
- + switch (value)
- + {
- + case 2:
- + value = 1;
- + break;
- + case 4:
- + value = 2;
- + break;
- + case 8:
- + value = 3;
- + break;
- + default:
- + value = 0;
- + }
- + fprintf (stream, "%s << %i", reg_names[true_regnum (XEXP (x, 0))],
- + value);
- + break;
- + }
- + case ASHIFT:
- + if (GET_CODE (XEXP (x, 1)) == CONST_INT)
- + fprintf (stream, "%s << %i", reg_names[true_regnum (XEXP (x, 0))],
- + (int) INTVAL (XEXP (x, 1)));
- + else if (REG_P (XEXP (x, 1)))
- + fprintf (stream, "%s << %s", reg_names[true_regnum (XEXP (x, 0))],
- + reg_names[true_regnum (XEXP (x, 1))]);
- + else
- + {
- + error = 1;
- + }
- + break;
- + case LSHIFTRT:
- + if (GET_CODE (XEXP (x, 1)) == CONST_INT)
- + fprintf (stream, "%s >> %i", reg_names[true_regnum (XEXP (x, 0))],
- + (int) INTVAL (XEXP (x, 1)));
- + else if (REG_P (XEXP (x, 1)))
- + fprintf (stream, "%s >> %s", reg_names[true_regnum (XEXP (x, 0))],
- + reg_names[true_regnum (XEXP (x, 1))]);
- + else
- + {
- + error = 1;
- + }
- + fprintf (stream, ">>");
- + break;
- + case PARALLEL:
- + {
- + /* Load store multiple */
- + int i;
- + int count = XVECLEN (x, 0);
- + int reglist16 = 0;
- + char reglist16_string[100];
- +
- + for (i = 0; i < count; ++i)
- + {
- + rtx vec_elm = XVECEXP (x, 0, i);
- + if (GET_MODE (vec_elm) != SET)
- + {
- + debug_rtx (vec_elm);
- + internal_error ("Unknown element in parallel expression!");
- + }
- + if (GET_MODE (XEXP (vec_elm, 0)) == REG)
- + {
- + /* Load multiple */
- + reglist16 |= 1 << ASM_REGNUM (REGNO (XEXP (vec_elm, 0)));
- + }
- + else
- + {
- + /* Store multiple */
- + reglist16 |= 1 << ASM_REGNUM (REGNO (XEXP (vec_elm, 1)));
- + }
- + }
- +
- + avr32_make_reglist16 (reglist16, reglist16_string);
- + fputs (reglist16_string, stream);
- +
- + break;
- + }
- +
- + case PLUS:
- + {
- + rtx op0 = XEXP (x, 0);
- + rtx op1 = XEXP (x, 1);
- + rtx base = NULL_RTX, offset = NULL_RTX;
- +
- + if (avr32_address_register_rtx_p (op0, 1))
- + {
- + base = op0;
- + offset = op1;
- + }
- + else if (avr32_address_register_rtx_p (op1, 1))
- + {
- + /* Operands are switched. */
- + base = op1;
- + offset = op0;
- + }
- +
- + gcc_assert (base && offset
- + && avr32_address_register_rtx_p (base, 1)
- + && avr32_legitimate_index_p (GET_MODE (x), offset, 1));
- +
- + avr32_print_operand (stream, base, 0);
- + fputs ("[", stream);
- + avr32_print_operand (stream, offset, 0);
- + fputs ("]", stream);
- + break;
- + }
- +
- + default:
- + error = 1;
- + }
- +
- + if (error)
- + {
- + debug_rtx (x);
- + internal_error ("Illegal expression for avr32_print_operand");
- + }
- +}
- +
- +rtx
- +avr32_get_note_reg_equiv (rtx insn)
- +{
- + rtx note;
- +
- + note = find_reg_note (insn, REG_EQUIV, NULL_RTX);
- +
- + if (note != NULL_RTX)
- + return XEXP (note, 0);
- + else
- + return NULL_RTX;
- +}
- +
- +
- +/*
- + Outputs to stdio stream stream the assembler syntax for an instruction
- + operand that is a memory reference whose address is x. x is an RTL
- + expression.
- +
- + ToDo: fixme.
- +*/
- +void
- +avr32_print_operand_address (FILE * stream, rtx x)
- +{
- + fprintf (stream, "(%d) /* address */", REGNO (x));
- +}
- +
- +
- +/* Return true if _GLOBAL_OFFSET_TABLE_ symbol is mentioned. */
- +bool
- +avr32_got_mentioned_p (rtx addr)
- +{
- + if (GET_CODE (addr) == MEM)
- + addr = XEXP (addr, 0);
- + while (GET_CODE (addr) == CONST)
- + addr = XEXP (addr, 0);
- + if (GET_CODE (addr) == SYMBOL_REF)
- + {
- + return streq (XSTR (addr, 0), "_GLOBAL_OFFSET_TABLE_");
- + }
- + if (GET_CODE (addr) == PLUS || GET_CODE (addr) == MINUS)
- + {
- + bool l1, l2;
- +
- + l1 = avr32_got_mentioned_p (XEXP (addr, 0));
- + l2 = avr32_got_mentioned_p (XEXP (addr, 1));
- + return l1 || l2;
- + }
- + return false;
- +}
- +
- +
- +/* Find the symbol in an address expression. */
- +rtx
- +avr32_find_symbol (rtx addr)
- +{
- + if (GET_CODE (addr) == MEM)
- + addr = XEXP (addr, 0);
- +
- + while (GET_CODE (addr) == CONST)
- + addr = XEXP (addr, 0);
- +
- + if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == LABEL_REF)
- + return addr;
- + if (GET_CODE (addr) == PLUS)
- + {
- + rtx l1, l2;
- +
- + l1 = avr32_find_symbol (XEXP (addr, 0));
- + l2 = avr32_find_symbol (XEXP (addr, 1));
- + if (l1 != NULL_RTX && l2 == NULL_RTX)
- + return l1;
- + else if (l1 == NULL_RTX && l2 != NULL_RTX)
- + return l2;
- + }
- +
- + return NULL_RTX;
- +}
- +
- +
- +/* Routines for manipulation of the constant pool. */
- +
- +/* AVR32 instructions cannot load a large constant directly into a
- + register; they have to come from a pc relative load. The constant
- + must therefore be placed in the addressable range of the pc
- + relative load. Depending on the precise pc relative load
- + instruction the range is somewhere between 256 bytes and 4k. This
- + means that we often have to dump a constant inside a function, and
- + generate code to branch around it.
- +
- + It is important to minimize this, since the branches will slow
- + things down and make the code larger.
- +
- + Normally we can hide the table after an existing unconditional
- + branch so that there is no interruption of the flow, but in the
- + worst case the code looks like this:
- +
- + lddpc rn, L1
- + ...
- + rjmp L2
- + align
- + L1: .long value
- + L2:
- + ...
- +
- + lddpc rn, L3
- + ...
- + rjmp L4
- + align
- + L3: .long value
- + L4:
- + ...
- +
- + We fix this by performing a scan after scheduling, which notices
- + which instructions need to have their operands fetched from the
- + constant table and builds the table.
- +
- + The algorithm starts by building a table of all the constants that
- + need fixing up and all the natural barriers in the function (places
- + where a constant table can be dropped without breaking the flow).
- + For each fixup we note how far the pc-relative replacement will be
- + able to reach and the offset of the instruction into the function.
- +
- + Having built the table we then group the fixes together to form
- + tables that are as large as possible (subject to addressing
- + constraints) and emit each table of constants after the last
- + barrier that is within range of all the instructions in the group.
- + If a group does not contain a barrier, then we forcibly create one
- + by inserting a jump instruction into the flow. Once the table has
- + been inserted, the insns are then modified to reference the
- + relevant entry in the pool.
- +
- + Possible enhancements to the algorithm (not implemented) are:
- +
- + 1) For some processors and object formats, there may be benefit in
- + aligning the pools to the start of cache lines; this alignment
- + would need to be taken into account when calculating addressability
- + of a pool. */
- +
- +/* These typedefs are located at the start of this file, so that
- + they can be used in the prototypes there. This comment is to
- + remind readers of that fact so that the following structures
- + can be understood more easily.
- +
- + typedef struct minipool_node Mnode;
- + typedef struct minipool_fixup Mfix; */
- +
- +struct minipool_node
- +{
- + /* Doubly linked chain of entries. */
- + Mnode *next;
- + Mnode *prev;
- + /* The maximum offset into the code that this entry can be placed. While
- + pushing fixes for forward references, all entries are sorted in order of
- + increasing max_address. */
- + HOST_WIDE_INT max_address;
- + /* Similarly for an entry inserted for a backwards ref. */
- + HOST_WIDE_INT min_address;
- + /* The number of fixes referencing this entry. This can become zero if we
- + "unpush" an entry. In this case we ignore the entry when we come to
- + emit the code. */
- + int refcount;
- + /* The offset from the start of the minipool. */
- + HOST_WIDE_INT offset;
- + /* The value in table. */
- + rtx value;
- + /* The mode of value. */
- + enum machine_mode mode;
- + /* The size of the value. */
- + int fix_size;
- +};
- +
- +
- +struct minipool_fixup
- +{
- + Mfix *next;
- + rtx insn;
- + HOST_WIDE_INT address;
- + rtx *loc;
- + enum machine_mode mode;
- + int fix_size;
- + rtx value;
- + Mnode *minipool;
- + HOST_WIDE_INT forwards;
- + HOST_WIDE_INT backwards;
- +};
- +
- +
- +/* Fixes less than a word need padding out to a word boundary. */
- +#define MINIPOOL_FIX_SIZE(mode, value) \
- + (IS_FORCE_MINIPOOL(value) ? 0 : \
- + (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4))
- +
- +#define IS_FORCE_MINIPOOL(x) \
- + (GET_CODE(x) == UNSPEC && \
- + XINT(x, 1) == UNSPEC_FORCE_MINIPOOL)
- +
- +static Mnode *minipool_vector_head;
- +static Mnode *minipool_vector_tail;
- +
- +/* The linked list of all minipool fixes required for this function. */
- +Mfix *minipool_fix_head;
- +Mfix *minipool_fix_tail;
- +/* The fix entry for the current minipool, once it has been placed. */
- +Mfix *minipool_barrier;
- +
- +
- +/* Determines if INSN is the start of a jump table. Returns the end
- + of the TABLE or NULL_RTX. */
- +static rtx
- +is_jump_table (rtx insn)
- +{
- + rtx table;
- +
- + if (GET_CODE (insn) == JUMP_INSN
- + && JUMP_LABEL (insn) != NULL
- + && ((table = next_real_insn (JUMP_LABEL (insn)))
- + == next_real_insn (insn))
- + && table != NULL
- + && GET_CODE (table) == JUMP_INSN
- + && (GET_CODE (PATTERN (table)) == ADDR_VEC
- + || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
- + return table;
- +
- + return NULL_RTX;
- +}
- +
- +
- +static HOST_WIDE_INT
- +get_jump_table_size (rtx insn)
- +{
- + /* ADDR_VECs only take room if read-only data does into the text section. */
- + if (JUMP_TABLES_IN_TEXT_SECTION
- +#if !defined(READONLY_DATA_SECTION_ASM_OP)
- + || 1
- +#endif
- + )
- + {
- + rtx body = PATTERN (insn);
- + int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
- +
- + return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
- + }
- +
- + return 0;
- +}
- +
- +
- +/* Move a minipool fix MP from its current location to before MAX_MP.
- + If MAX_MP is NULL, then MP doesn't need moving, but the addressing
- + constraints may need updating. */
- +static Mnode *
- +move_minipool_fix_forward_ref (Mnode * mp, Mnode * max_mp,
- + HOST_WIDE_INT max_address)
- +{
- + /* This should never be true and the code below assumes these are
- + different. */
- + if (mp == max_mp)
- + abort ();
- +
- + if (max_mp == NULL)
- + {
- + if (max_address < mp->max_address)
- + mp->max_address = max_address;
- + }
- + else
- + {
- + if (max_address > max_mp->max_address - mp->fix_size)
- + mp->max_address = max_mp->max_address - mp->fix_size;
- + else
- + mp->max_address = max_address;
- +
- + /* Unlink MP from its current position. Since max_mp is non-null,
- + mp->prev must be non-null. */
- + mp->prev->next = mp->next;
- + if (mp->next != NULL)
- + mp->next->prev = mp->prev;
- + else
- + minipool_vector_tail = mp->prev;
- +
- + /* Re-insert it before MAX_MP. */
- + mp->next = max_mp;
- + mp->prev = max_mp->prev;
- + max_mp->prev = mp;
- +
- + if (mp->prev != NULL)
- + mp->prev->next = mp;
- + else
- + minipool_vector_head = mp;
- + }
- +
- + /* Save the new entry. */
- + max_mp = mp;
- +
- + /* Scan over the preceding entries and adjust their addresses as required.
- + */
- + while (mp->prev != NULL
- + && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
- + {
- + mp->prev->max_address = mp->max_address - mp->prev->fix_size;
- + mp = mp->prev;
- + }
- +
- + return max_mp;
- +}
- +
- +
- +/* Add a constant to the minipool for a forward reference. Returns the
- + node added or NULL if the constant will not fit in this pool. */
- +static Mnode *
- +add_minipool_forward_ref (Mfix * fix)
- +{
- + /* If set, max_mp is the first pool_entry that has a lower constraint than
- + the one we are trying to add. */
- + Mnode *max_mp = NULL;
- + HOST_WIDE_INT max_address = fix->address + fix->forwards;
- + Mnode *mp;
- +
- + /* If this fix's address is greater than the address of the first entry,
- + then we can't put the fix in this pool. We subtract the size of the
- + current fix to ensure that if the table is fully packed we still have
- + enough room to insert this value by suffling the other fixes forwards. */
- + if (minipool_vector_head &&
- + fix->address >= minipool_vector_head->max_address - fix->fix_size)
- + return NULL;
- +
- + /* Scan the pool to see if a constant with the same value has already been
- + added. While we are doing this, also note the location where we must
- + insert the constant if it doesn't already exist. */
- + for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
- + {
- + if (GET_CODE (fix->value) == GET_CODE (mp->value)
- + && fix->mode == mp->mode
- + && (GET_CODE (fix->value) != CODE_LABEL
- + || (CODE_LABEL_NUMBER (fix->value)
- + == CODE_LABEL_NUMBER (mp->value)))
- + && rtx_equal_p (fix->value, mp->value))
- + {
- + /* More than one fix references this entry. */
- + mp->refcount++;
- + return move_minipool_fix_forward_ref (mp, max_mp, max_address);
- + }
- +
- + /* Note the insertion point if necessary. */
- + if (max_mp == NULL && mp->max_address > max_address)
- + max_mp = mp;
- +
- + }
- +
- + /* The value is not currently in the minipool, so we need to create a new
- + entry for it. If MAX_MP is NULL, the entry will be put on the end of
- + the list since the placement is less constrained than any existing
- + entry. Otherwise, we insert the new fix before MAX_MP and, if
- + necessary, adjust the constraints on the other entries. */
- + mp = xmalloc (sizeof (*mp));
- + mp->fix_size = fix->fix_size;
- + mp->mode = fix->mode;
- + mp->value = fix->value;
- + mp->refcount = 1;
- + /* Not yet required for a backwards ref. */
- + mp->min_address = -65536;
- +
- + if (max_mp == NULL)
- + {
- + mp->max_address = max_address;
- + mp->next = NULL;
- + mp->prev = minipool_vector_tail;
- +
- + if (mp->prev == NULL)
- + {
- + minipool_vector_head = mp;
- + minipool_vector_label = gen_label_rtx ();
- + }
- + else
- + mp->prev->next = mp;
- +
- + minipool_vector_tail = mp;
- + }
- + else
- + {
- + if (max_address > max_mp->max_address - mp->fix_size)
- + mp->max_address = max_mp->max_address - mp->fix_size;
- + else
- + mp->max_address = max_address;
- +
- + mp->next = max_mp;
- + mp->prev = max_mp->prev;
- + max_mp->prev = mp;
- + if (mp->prev != NULL)
- + mp->prev->next = mp;
- + else
- + minipool_vector_head = mp;
- + }
- +
- + /* Save the new entry. */
- + max_mp = mp;
- +
- + /* Scan over the preceding entries and adjust their addresses as required.
- + */
- + while (mp->prev != NULL
- + && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
- + {
- + mp->prev->max_address = mp->max_address - mp->prev->fix_size;
- + mp = mp->prev;
- + }
- +
- + return max_mp;
- +}
- +
- +
- +static Mnode *
- +move_minipool_fix_backward_ref (Mnode * mp, Mnode * min_mp,
- + HOST_WIDE_INT min_address)
- +{
- + HOST_WIDE_INT offset;
- +
- + /* This should never be true, and the code below assumes these are
- + different. */
- + if (mp == min_mp)
- + abort ();
- +
- + if (min_mp == NULL)
- + {
- + if (min_address > mp->min_address)
- + mp->min_address = min_address;
- + }
- + else
- + {
- + /* We will adjust this below if it is too loose. */
- + mp->min_address = min_address;
- +
- + /* Unlink MP from its current position. Since min_mp is non-null,
- + mp->next must be non-null. */
- + mp->next->prev = mp->prev;
- + if (mp->prev != NULL)
- + mp->prev->next = mp->next;
- + else
- + minipool_vector_head = mp->next;
- +
- + /* Reinsert it after MIN_MP. */
- + mp->prev = min_mp;
- + mp->next = min_mp->next;
- + min_mp->next = mp;
- + if (mp->next != NULL)
- + mp->next->prev = mp;
- + else
- + minipool_vector_tail = mp;
- + }
- +
- + min_mp = mp;
- +
- + offset = 0;
- + for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
- + {
- + mp->offset = offset;
- + if (mp->refcount > 0)
- + offset += mp->fix_size;
- +
- + if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
- + mp->next->min_address = mp->min_address + mp->fix_size;
- + }
- +
- + return min_mp;
- +}
- +
- +
- +/* Add a constant to the minipool for a backward reference. Returns the
- + node added or NULL if the constant will not fit in this pool.
- +
- + Note that the code for insertion for a backwards reference can be
- + somewhat confusing because the calculated offsets for each fix do
- + not take into account the size of the pool (which is still under
- + construction. */
- +static Mnode *
- +add_minipool_backward_ref (Mfix * fix)
- +{
- + /* If set, min_mp is the last pool_entry that has a lower constraint than
- + the one we are trying to add. */
- + Mnode *min_mp = NULL;
- + /* This can be negative, since it is only a constraint. */
- + HOST_WIDE_INT min_address = fix->address - fix->backwards;
- + Mnode *mp;
- +
- + /* If we can't reach the current pool from this insn, or if we can't insert
- + this entry at the end of the pool without pushing other fixes out of
- + range, then we don't try. This ensures that we can't fail later on. */
- + if (min_address >= minipool_barrier->address
- + || (minipool_vector_tail->min_address + fix->fix_size
- + >= minipool_barrier->address))
- + return NULL;
- +
- + /* Scan the pool to see if a constant with the same value has already been
- + added. While we are doing this, also note the location where we must
- + insert the constant if it doesn't already exist. */
- + for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
- + {
- + if (GET_CODE (fix->value) == GET_CODE (mp->value)
- + && fix->mode == mp->mode
- + && (GET_CODE (fix->value) != CODE_LABEL
- + || (CODE_LABEL_NUMBER (fix->value)
- + == CODE_LABEL_NUMBER (mp->value)))
- + && rtx_equal_p (fix->value, mp->value)
- + /* Check that there is enough slack to move this entry to the end
- + of the table (this is conservative). */
- + && (mp->max_address
- + > (minipool_barrier->address
- + + minipool_vector_tail->offset
- + + minipool_vector_tail->fix_size)))
- + {
- + mp->refcount++;
- + return move_minipool_fix_backward_ref (mp, min_mp, min_address);
- + }
- +
- + if (min_mp != NULL)
- + mp->min_address += fix->fix_size;
- + else
- + {
- + /* Note the insertion point if necessary. */
- + if (mp->min_address < min_address)
- + {
- + min_mp = mp;
- + }
- + else if (mp->max_address
- + < minipool_barrier->address + mp->offset + fix->fix_size)
- + {
- + /* Inserting before this entry would push the fix beyond its
- + maximum address (which can happen if we have re-located a
- + forwards fix); force the new fix to come after it. */
- + min_mp = mp;
- + min_address = mp->min_address + fix->fix_size;
- + }
- + }
- + }
- +
- + /* We need to create a new entry. */
- + mp = xmalloc (sizeof (*mp));
- + mp->fix_size = fix->fix_size;
- + mp->mode = fix->mode;
- + mp->value = fix->value;
- + mp->refcount = 1;
- + mp->max_address = minipool_barrier->address + 65536;
- +
- + mp->min_address = min_address;
- +
- + if (min_mp == NULL)
- + {
- + mp->prev = NULL;
- + mp->next = minipool_vector_head;
- +
- + if (mp->next == NULL)
- + {
- + minipool_vector_tail = mp;
- + minipool_vector_label = gen_label_rtx ();
- + }
- + else
- + mp->next->prev = mp;
- +
- + minipool_vector_head = mp;
- + }
- + else
- + {
- + mp->next = min_mp->next;
- + mp->prev = min_mp;
- + min_mp->next = mp;
- +
- + if (mp->next != NULL)
- + mp->next->prev = mp;
- + else
- + minipool_vector_tail = mp;
- + }
- +
- + /* Save the new entry. */
- + min_mp = mp;
- +
- + if (mp->prev)
- + mp = mp->prev;
- + else
- + mp->offset = 0;
- +
- + /* Scan over the following entries and adjust their offsets. */
- + while (mp->next != NULL)
- + {
- + if (mp->next->min_address < mp->min_address + mp->fix_size)
- + mp->next->min_address = mp->min_address + mp->fix_size;
- +
- + if (mp->refcount)
- + mp->next->offset = mp->offset + mp->fix_size;
- + else
- + mp->next->offset = mp->offset;
- +
- + mp = mp->next;
- + }
- +
- + return min_mp;
- +}
- +
- +
- +static void
- +assign_minipool_offsets (Mfix * barrier)
- +{
- + HOST_WIDE_INT offset = 0;
- + Mnode *mp;
- +
- + minipool_barrier = barrier;
- +
- + for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
- + {
- + mp->offset = offset;
- +
- + if (mp->refcount > 0)
- + offset += mp->fix_size;
- + }
- +}
- +
- +
- +/* Print a symbolic form of X to the debug file, F. */
- +static void
- +avr32_print_value (FILE * f, rtx x)
- +{
- + switch (GET_CODE (x))
- + {
- + case CONST_INT:
- + fprintf (f, "0x%x", (int) INTVAL (x));
- + return;
- +
- + case CONST_DOUBLE:
- + fprintf (f, "<0x%lx,0x%lx>", (long) XWINT (x, 2), (long) XWINT (x, 3));
- + return;
- +
- + case CONST_VECTOR:
- + {
- + int i;
- +
- + fprintf (f, "<");
- + for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
- + {
- + fprintf (f, "0x%x", (int) INTVAL (CONST_VECTOR_ELT (x, i)));
- + if (i < (CONST_VECTOR_NUNITS (x) - 1))
- + fputc (',', f);
- + }
- + fprintf (f, ">");
- + }
- + return;
- +
- + case CONST_STRING:
- + fprintf (f, "\"%s\"", XSTR (x, 0));
- + return;
- +
- + case SYMBOL_REF:
- + fprintf (f, "`%s'", XSTR (x, 0));
- + return;
- +
- + case LABEL_REF:
- + fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
- + return;
- +
- + case CONST:
- + avr32_print_value (f, XEXP (x, 0));
- + return;
- +
- + case PLUS:
- + avr32_print_value (f, XEXP (x, 0));
- + fprintf (f, "+");
- + avr32_print_value (f, XEXP (x, 1));
- + return;
- +
- + case PC:
- + fprintf (f, "pc");
- + return;
- +
- + default:
- + fprintf (f, "????");
- + return;
- + }
- +}
- +
- +
- +int
- +is_minipool_label (rtx label)
- +{
- + minipool_labels *cur_mp_label = cfun->machine->minipool_label_head;
- +
- + if (GET_CODE (label) != CODE_LABEL)
- + return FALSE;
- +
- + while (cur_mp_label)
- + {
- + if (CODE_LABEL_NUMBER (label)
- + == CODE_LABEL_NUMBER (cur_mp_label->label))
- + return TRUE;
- + cur_mp_label = cur_mp_label->next;
- + }
- + return FALSE;
- +}
- +
- +
- +static void
- +new_minipool_label (rtx label)
- +{
- + if (!cfun->machine->minipool_label_head)
- + {
- + cfun->machine->minipool_label_head =
- + ggc_alloc (sizeof (minipool_labels));
- + cfun->machine->minipool_label_tail = cfun->machine->minipool_label_head;
- + cfun->machine->minipool_label_head->label = label;
- + cfun->machine->minipool_label_head->next = 0;
- + cfun->machine->minipool_label_head->prev = 0;
- + }
- + else
- + {
- + cfun->machine->minipool_label_tail->next =
- + ggc_alloc (sizeof (minipool_labels));
- + cfun->machine->minipool_label_tail->next->label = label;
- + cfun->machine->minipool_label_tail->next->next = 0;
- + cfun->machine->minipool_label_tail->next->prev =
- + cfun->machine->minipool_label_tail;
- + cfun->machine->minipool_label_tail =
- + cfun->machine->minipool_label_tail->next;
- + }
- +}
- +
- +
- +/* Output the literal table */
- +static void
- +dump_minipool (rtx scan)
- +{
- + Mnode *mp;
- + Mnode *nmp;
- +
- + if (dump_file)
- + fprintf (dump_file,
- + ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
- + INSN_UID (scan), (unsigned long) minipool_barrier->address, 4);
- +
- + scan = emit_insn_after (gen_consttable_start (), scan);
- + scan = emit_insn_after (gen_align_4 (), scan);
- + scan = emit_label_after (minipool_vector_label, scan);
- + new_minipool_label (minipool_vector_label);
- +
- + for (mp = minipool_vector_head; mp != NULL; mp = nmp)
- + {
- + if (mp->refcount > 0)
- + {
- + if (dump_file)
- + {
- + fprintf (dump_file,
- + ";; Offset %u, min %ld, max %ld ",
- + (unsigned) mp->offset, (unsigned long) mp->min_address,
- + (unsigned long) mp->max_address);
- + avr32_print_value (dump_file, mp->value);
- + fputc ('\n', dump_file);
- + }
- +
- + switch (mp->fix_size)
- + {
- +#ifdef HAVE_consttable_4
- + case 4:
- + scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
- + break;
- +
- +#endif
- +#ifdef HAVE_consttable_8
- + case 8:
- + scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
- + break;
- +
- +#endif
- +#ifdef HAVE_consttable_16
- + case 16:
- + scan = emit_insn_after (gen_consttable_16 (mp->value), scan);
- + break;
- +
- +#endif
- + case 0:
- + /* This can happen for force-minipool entries which just are
- + there to force the minipool to be generate. */
- + break;
- + default:
- + abort ();
- + break;
- + }
- + }
- +
- + nmp = mp->next;
- + free (mp);
- + }
- +
- + minipool_vector_head = minipool_vector_tail = NULL;
- + scan = emit_insn_after (gen_consttable_end (), scan);
- + scan = emit_barrier_after (scan);
- +}
- +
- +
- +/* Return the cost of forcibly inserting a barrier after INSN. */
- +static int
- +avr32_barrier_cost (rtx insn)
- +{
- + /* Basing the location of the pool on the loop depth is preferable, but at
- + the moment, the basic block information seems to be corrupt by this
- + stage of the compilation. */
- + int base_cost = 50;
- + rtx next = next_nonnote_insn (insn);
- +
- + if (next != NULL && GET_CODE (next) == CODE_LABEL)
- + base_cost -= 20;
- +
- + switch (GET_CODE (insn))
- + {
- + case CODE_LABEL:
- + /* It will always be better to place the table before the label, rather
- + than after it. */
- + return 50;
- +
- + case INSN:
- + case CALL_INSN:
- + return base_cost;
- +
- + case JUMP_INSN:
- + return base_cost - 10;
- +
- + default:
- + return base_cost + 10;
- + }
- +}
- +
- +
- +/* Find the best place in the insn stream in the range
- + (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
- + Create the barrier by inserting a jump and add a new fix entry for
- + it. */
- +static Mfix *
- +create_fix_barrier (Mfix * fix, HOST_WIDE_INT max_address)
- +{
- + HOST_WIDE_INT count = 0;
- + rtx barrier;
- + rtx from = fix->insn;
- + rtx selected = from;
- + int selected_cost;
- + HOST_WIDE_INT selected_address;
- + Mfix *new_fix;
- + HOST_WIDE_INT max_count = max_address - fix->address;
- + rtx label = gen_label_rtx ();
- +
- + selected_cost = avr32_barrier_cost (from);
- + selected_address = fix->address;
- +
- + while (from && count < max_count)
- + {
- + rtx tmp;
- + int new_cost;
- +
- + /* This code shouldn't have been called if there was a natural barrier
- + within range. */
- + if (GET_CODE (from) == BARRIER)
- + abort ();
- +
- + /* Count the length of this insn. */
- + count += get_attr_length (from);
- +
- + /* If there is a jump table, add its length. */
- + tmp = is_jump_table (from);
- + if (tmp != NULL)
- + {
- + count += get_jump_table_size (tmp);
- +
- + /* Jump tables aren't in a basic block, so base the cost on the
- + dispatch insn. If we select this location, we will still put
- + the pool after the table. */
- + new_cost = avr32_barrier_cost (from);
- +
- + if (count < max_count && new_cost <= selected_cost)
- + {
- + selected = tmp;
- + selected_cost = new_cost;
- + selected_address = fix->address + count;
- + }
- +
- + /* Continue after the dispatch table. */
- + from = NEXT_INSN (tmp);
- + continue;
- + }
- +
- + new_cost = avr32_barrier_cost (from);
- +
- + if (count < max_count && new_cost <= selected_cost)
- + {
- + selected = from;
- + selected_cost = new_cost;
- + selected_address = fix->address + count;
- + }
- +
- + from = NEXT_INSN (from);
- + }
- +
- + /* Create a new JUMP_INSN that branches around a barrier. */
- + from = emit_jump_insn_after (gen_jump (label), selected);
- + JUMP_LABEL (from) = label;
- + barrier = emit_barrier_after (from);
- + emit_label_after (label, barrier);
- +
- + /* Create a minipool barrier entry for the new barrier. */
- + new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (*new_fix));
- + new_fix->insn = barrier;
- + new_fix->address = selected_address;
- + new_fix->next = fix->next;
- + fix->next = new_fix;
- +
- + return new_fix;
- +}
- +
- +
- +/* Record that there is a natural barrier in the insn stream at
- + ADDRESS. */
- +static void
- +push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
- +{
- + Mfix *fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (*fix));
- +
- + fix->insn = insn;
- + fix->address = address;
- +
- + fix->next = NULL;
- + if (minipool_fix_head != NULL)
- + minipool_fix_tail->next = fix;
- + else
- + minipool_fix_head = fix;
- +
- + minipool_fix_tail = fix;
- +}
- +
- +
- +/* Record INSN, which will need fixing up to load a value from the
- + minipool. ADDRESS is the offset of the insn since the start of the
- + function; LOC is a pointer to the part of the insn which requires
- + fixing; VALUE is the constant that must be loaded, which is of type
- + MODE. */
- +static void
- +push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx * loc,
- + enum machine_mode mode, rtx value)
- +{
- + Mfix *fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (*fix));
- + rtx body = PATTERN (insn);
- +
- + fix->insn = insn;
- + fix->address = address;
- + fix->loc = loc;
- + fix->mode = mode;
- + fix->fix_size = MINIPOOL_FIX_SIZE (mode, value);
- + fix->value = value;
- +
- + if (GET_CODE (body) == PARALLEL)
- + {
- + /* Mcall : Ks16 << 2 */
- + fix->forwards = ((1 << 15) - 1) << 2;
- + fix->backwards = (1 << 15) << 2;
- + }
- + else if (GET_CODE (body) == SET
- + && GET_MODE_SIZE (GET_MODE (SET_DEST (body))) == 4)
- + {
- + if (optimize_size)
- + {
- + /* Lddpc : Ku7 << 2 */
- + fix->forwards = ((1 << 7) - 1) << 2;
- + fix->backwards = 0;
- + }
- + else
- + {
- + /* Ld.w : Ks16 */
- + fix->forwards = ((1 << 15) - 4);
- + fix->backwards = (1 << 15);
- + }
- + }
- + else if (GET_CODE (body) == SET
- + && GET_MODE_SIZE (GET_MODE (SET_DEST (body))) == 8)
- + {
- + /* Ld.d : Ks16 */
- + fix->forwards = ((1 << 15) - 4);
- + fix->backwards = (1 << 15);
- + }
- + else if (GET_CODE (body) == UNSPEC_VOLATILE
- + && XINT (body, 1) == VUNSPEC_MVRC)
- + {
- + /* Coprocessor load */
- + /* Ldc : Ku8 << 2 */
- + fix->forwards = ((1 << 8) - 1) << 2;
- + fix->backwards = 0;
- + }
- + else
- + {
- + /* Assume worst case which is lddpc insn. */
- + fix->forwards = ((1 << 7) - 1) << 2;
- + fix->backwards = 0;
- + }
- +
- + fix->minipool = NULL;
- +
- + /* If an insn doesn't have a range defined for it, then it isn't expecting
- + to be reworked by this code. Better to abort now than to generate duff
- + assembly code. */
- + if (fix->forwards == 0 && fix->backwards == 0)
- + abort ();
- +
- + if (dump_file)
- + {
- + fprintf (dump_file,
- + ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
- + GET_MODE_NAME (mode),
- + INSN_UID (insn), (unsigned long) address,
- + -1 * (long) fix->backwards, (long) fix->forwards);
- + avr32_print_value (dump_file, fix->value);
- + fprintf (dump_file, "\n");
- + }
- +
- + /* Add it to the chain of fixes. */
- + fix->next = NULL;
- +
- + if (minipool_fix_head != NULL)
- + minipool_fix_tail->next = fix;
- + else
- + minipool_fix_head = fix;
- +
- + minipool_fix_tail = fix;
- +}
- +
- +
- +/* Scan INSN and note any of its operands that need fixing.
- + If DO_PUSHES is false we do not actually push any of the fixups
- + needed. The function returns TRUE is any fixups were needed/pushed.
- + This is used by avr32_memory_load_p() which needs to know about loads
- + of constants that will be converted into minipool loads. */
- +static bool
- +note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
- +{
- + bool result = false;
- + int opno;
- +
- + extract_insn (insn);
- +
- + if (!constrain_operands (1))
- + fatal_insn_not_found (insn);
- +
- + if (recog_data.n_alternatives == 0)
- + return false;
- +
- + /* Fill in recog_op_alt with information about the constraints of this
- + insn. */
- + preprocess_constraints ();
- +
- + for (opno = 0; opno < recog_data.n_operands; opno++)
- + {
- + rtx op;
- +
- + /* Things we need to fix can only occur in inputs. */
- + if (recog_data.operand_type[opno] != OP_IN)
- + continue;
- +
- + op = recog_data.operand[opno];
- +
- + if (avr32_const_pool_ref_operand (op, GET_MODE (op)))
- + {
- + if (do_pushes)
- + {
- + rtx cop = avoid_constant_pool_reference (op);
- +
- + /* Casting the address of something to a mode narrower than a
- + word can cause avoid_constant_pool_reference() to return the
- + pool reference itself. That's no good to us here. Lets
- + just hope that we can use the constant pool value directly.
- + */
- + if (op == cop)
- + cop = get_pool_constant (XEXP (op, 0));
- +
- + push_minipool_fix (insn, address,
- + recog_data.operand_loc[opno],
- + recog_data.operand_mode[opno], cop);
- + }
- +
- + result = true;
- + }
- + else if (TARGET_HAS_ASM_ADDR_PSEUDOS
- + && avr32_address_operand (op, GET_MODE (op)))
- + {
- + /* Handle pseudo instructions using a direct address. These pseudo
- + instructions might need entries in the constant pool and we must
- + therefor create a constant pool for them, in case the
- + assembler/linker needs to insert entries. */
- + if (do_pushes)
- + {
- + /* Push a dummy constant pool entry so that the .cpool
- + directive should be inserted on the appropriate place in the
- + code even if there are no real constant pool entries. This
- + is used by the assembler and linker to know where to put
- + generated constant pool entries. */
- + push_minipool_fix (insn, address,
- + recog_data.operand_loc[opno],
- + recog_data.operand_mode[opno],
- + gen_rtx_UNSPEC (VOIDmode,
- + gen_rtvec (1, const0_rtx),
- + UNSPEC_FORCE_MINIPOOL));
- + result = true;
- + }
- + }
- + }
- + return result;
- +}
- +
- +
- +static int
- +avr32_insn_is_cast (rtx insn)
- +{
- +
- + if (NONJUMP_INSN_P (insn)
- + && GET_CODE (PATTERN (insn)) == SET
- + && (GET_CODE (SET_SRC (PATTERN (insn))) == ZERO_EXTEND
- + || GET_CODE (SET_SRC (PATTERN (insn))) == SIGN_EXTEND)
- + && REG_P (XEXP (SET_SRC (PATTERN (insn)), 0))
- + && REG_P (SET_DEST (PATTERN (insn))))
- + return true;
- + return false;
- +}
- +
- +
- +/* Replace all occurances of reg FROM with reg TO in X. */
- +rtx
- +avr32_replace_reg (rtx x, rtx from, rtx to)
- +{
- + int i, j;
- + const char *fmt;
- +
- + gcc_assert ( REG_P (from) && REG_P (to) );
- +
- + /* Allow this function to make replacements in EXPR_LISTs. */
- + if (x == 0)
- + return 0;
- +
- + if (rtx_equal_p (x, from))
- + return to;
- +
- + if (GET_CODE (x) == SUBREG)
- + {
- + rtx new = avr32_replace_reg (SUBREG_REG (x), from, to);
- +
- + if (GET_CODE (new) == CONST_INT)
- + {
- + x = simplify_subreg (GET_MODE (x), new,
- + GET_MODE (SUBREG_REG (x)),
- + SUBREG_BYTE (x));
- + gcc_assert (x);
- + }
- + else
- + SUBREG_REG (x) = new;
- +
- + return x;
- + }
- + else if (GET_CODE (x) == ZERO_EXTEND)
- + {
- + rtx new = avr32_replace_reg (XEXP (x, 0), from, to);
- +
- + if (GET_CODE (new) == CONST_INT)
- + {
- + x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
- + new, GET_MODE (XEXP (x, 0)));
- + gcc_assert (x);
- + }
- + else
- + XEXP (x, 0) = new;
- +
- + return x;
- + }
- +
- + fmt = GET_RTX_FORMAT (GET_CODE (x));
- + for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
- + {
- + if (fmt[i] == 'e')
- + XEXP (x, i) = avr32_replace_reg (XEXP (x, i), from, to);
- + else if (fmt[i] == 'E')
- + for (j = XVECLEN (x, i) - 1; j >= 0; j--)
- + XVECEXP (x, i, j) = avr32_replace_reg (XVECEXP (x, i, j), from, to);
- + }
- +
- + return x;
- +}
- +
- +
- +/* FIXME: The level of nesting in this function is way too deep. It needs to be
- + torn apart. */
- +static void
- +avr32_reorg_optimization (void)
- +{
- + rtx first = get_first_nonnote_insn ();
- + rtx insn;
- +
- + if (TARGET_MD_REORG_OPTIMIZATION && (optimize_size || (optimize > 0)))
- + {
- +
- + /* Scan through all insns looking for cast operations. */
- + if (dump_file)
- + {
- + fprintf (dump_file, ";; Deleting redundant cast operations:\n");
- + }
- + for (insn = first; insn; insn = NEXT_INSN (insn))
- + {
- + rtx reg, src_reg, scan;
- + enum machine_mode mode;
- + int unused_cast;
- + rtx label_ref;
- +
- + if (avr32_insn_is_cast (insn)
- + && (GET_MODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == QImode
- + || GET_MODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == HImode))
- + {
- + mode = GET_MODE (XEXP (SET_SRC (PATTERN (insn)), 0));
- + reg = SET_DEST (PATTERN (insn));
- + src_reg = XEXP (SET_SRC (PATTERN (insn)), 0);
- + }
- + else
- + {
- + continue;
- + }
- +
- + unused_cast = false;
- + label_ref = NULL_RTX;
- + for (scan = NEXT_INSN (insn); scan; scan = NEXT_INSN (scan))
- + {
- + /* Check if we have reached the destination of a simple
- + conditional jump which we have already scanned past. If so,
- + we can safely continue scanning. */
- + if (LABEL_P (scan) && label_ref != NULL_RTX)
- + {
- + if (CODE_LABEL_NUMBER (scan) ==
- + CODE_LABEL_NUMBER (XEXP (label_ref, 0)))
- + label_ref = NULL_RTX;
- + else
- + break;
- + }
- +
- + if (!INSN_P (scan))
- + continue;
- +
- + /* For conditional jumps we can manage to keep on scanning if
- + we meet the destination label later on before any new jump
- + insns occure. */
- + if (GET_CODE (scan) == JUMP_INSN)
- + {
- + if (any_condjump_p (scan) && label_ref == NULL_RTX)
- + label_ref = condjump_label (scan);
- + else
- + break;
- + }
- +
- + /* Check if we have a call and the register is used as an argument. */
- + if (CALL_P (scan)
- + && find_reg_fusage (scan, USE, reg) )
- + break;
- +
- + if (!reg_mentioned_p (reg, PATTERN (scan)))
- + continue;
- +
- + /* Check if casted register is used in this insn */
- + if ((regno_use_in (REGNO (reg), PATTERN (scan)) != NULL_RTX)
- + && (GET_MODE (regno_use_in (REGNO (reg), PATTERN (scan))) ==
- + GET_MODE (reg)))
- + {
- + /* If not used in the source to the set or in a memory
- + expression in the destiantion then the register is used
- + as a destination and is really dead. */
- + if (single_set (scan)
- + && GET_CODE (PATTERN (scan)) == SET
- + && REG_P (SET_DEST (PATTERN (scan)))
- + && !regno_use_in (REGNO (reg), SET_SRC (PATTERN (scan)))
- + && label_ref == NULL_RTX)
- + {
- + unused_cast = true;
- + }
- + break;
- + }
- +
- + /* Check if register is dead or set in this insn */
- + if (dead_or_set_p (scan, reg))
- + {
- + unused_cast = true;
- + break;
- + }
- + }
- +
- + /* Check if we have unresolved conditional jumps */
- + if (label_ref != NULL_RTX)
- + continue;
- +
- + if (unused_cast)
- + {
- + if (REGNO (reg) == REGNO (XEXP (SET_SRC (PATTERN (insn)), 0)))
- + {
- + /* One operand cast, safe to delete */
- + if (dump_file)
- + {
- + fprintf (dump_file,
- + ";; INSN %i removed, casted register %i value not used.\n",
- + INSN_UID (insn), REGNO (reg));
- + }
- + SET_INSN_DELETED (insn);
- + /* Force the instruction to be recognized again */
- + INSN_CODE (insn) = -1;
- + }
- + else
- + {
- + /* Two operand cast, which really could be substituted with
- + a move, if the source register is dead after the cast
- + insn and then the insn which sets the source register
- + could instead directly set the destination register for
- + the cast. As long as there are no insns in between which
- + uses the register. */
- + rtx link = NULL_RTX;
- + rtx set;
- + rtx src_reg = XEXP (SET_SRC (PATTERN (insn)), 0);
- + unused_cast = false;
- +
- + if (!find_reg_note (insn, REG_DEAD, src_reg))
- + continue;
- +
- + /* Search for the insn which sets the source register */
- + for (scan = PREV_INSN (insn);
- + scan && GET_CODE (scan) != CODE_LABEL;
- + scan = PREV_INSN (scan))
- + {
- + if (! INSN_P (scan))
- + continue;
- +
- + set = single_set (scan);
- + // Fix for bug #11763 : the following if condition
- + // has been modified and else part is included to
- + // set the link to NULL_RTX.
- + // if (set && rtx_equal_p (src_reg, SET_DEST (set)))
- + if (set && (REGNO(src_reg) == REGNO(SET_DEST(set))))
- + {
- + if (rtx_equal_p (src_reg, SET_DEST (set)))
- + {
- + link = scan;
- + break;
- + }
- + else
- + {
- + link = NULL_RTX;
- + break;
- + }
- + }
- + }
- +
- +
- + /* Found no link or link is a call insn where we can not
- + change the destination register */
- + if (link == NULL_RTX || CALL_P (link))
- + continue;
- +
- + /* Scan through all insn between link and insn */
- + for (scan = NEXT_INSN (link); scan; scan = NEXT_INSN (scan))
- + {
- + /* Don't try to trace forward past a CODE_LABEL if we
- + haven't seen INSN yet. Ordinarily, we will only
- + find the setting insn in LOG_LINKS if it is in the
- + same basic block. However, cross-jumping can insert
- + code labels in between the load and the call, and
- + can result in situations where a single call insn
- + may have two targets depending on where we came
- + from. */
- +
- + if (GET_CODE (scan) == CODE_LABEL)
- + break;
- +
- + if (!INSN_P (scan))
- + continue;
- +
- + /* Don't try to trace forward past a JUMP. To optimize
- + safely, we would have to check that all the
- + instructions at the jump destination did not use REG.
- + */
- +
- + if (GET_CODE (scan) == JUMP_INSN)
- + {
- + break;
- + }
- +
- + if (!reg_mentioned_p (src_reg, PATTERN (scan)))
- + continue;
- +
- + /* We have reached the cast insn */
- + if (scan == insn)
- + {
- + /* We can remove cast and replace the destination
- + register of the link insn with the destination
- + of the cast */
- + if (dump_file)
- + {
- + fprintf (dump_file,
- + ";; INSN %i removed, casted value unused. "
- + "Destination of removed cast operation: register %i, folded into INSN %i.\n",
- + INSN_UID (insn), REGNO (reg),
- + INSN_UID (link));
- + }
- + /* Update link insn */
- + SET_DEST (PATTERN (link)) =
- + gen_rtx_REG (mode, REGNO (reg));
- + /* Force the instruction to be recognized again */
- + INSN_CODE (link) = -1;
- +
- + /* Delete insn */
- + SET_INSN_DELETED (insn);
- + /* Force the instruction to be recognized again */
- + INSN_CODE (insn) = -1;
- + break;
- + }
- + }
- + }
- + }
- + }
- + }
- +
- + if (TARGET_MD_REORG_OPTIMIZATION && (optimize_size || (optimize > 0)))
- + {
- +
- + /* Scan through all insns looking for shifted add operations */
- + if (dump_file)
- + {
- + fprintf (dump_file,
- + ";; Deleting redundant shifted add operations:\n");
- + }
- + for (insn = first; insn; insn = NEXT_INSN (insn))
- + {
- + rtx reg, mem_expr, scan, op0, op1;
- + int add_only_used_as_pointer;
- +
- + if (INSN_P (insn)
- + && GET_CODE (PATTERN (insn)) == SET
- + && GET_CODE (SET_SRC (PATTERN (insn))) == PLUS
- + && (GET_CODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == MULT
- + || GET_CODE (XEXP (SET_SRC (PATTERN (insn)), 0)) == ASHIFT)
- + && GET_CODE (XEXP (XEXP (SET_SRC (PATTERN (insn)), 0), 1)) ==
- + CONST_INT && REG_P (SET_DEST (PATTERN (insn)))
- + && REG_P (XEXP (SET_SRC (PATTERN (insn)), 1))
- + && REG_P (XEXP (XEXP (SET_SRC (PATTERN (insn)), 0), 0)))
- + {
- + reg = SET_DEST (PATTERN (insn));
- + mem_expr = SET_SRC (PATTERN (insn));
- + op0 = XEXP (XEXP (mem_expr, 0), 0);
- + op1 = XEXP (mem_expr, 1);
- + }
- + else
- + {
- + continue;
- + }
- +
- + /* Scan forward the check if the result of the shifted add
- + operation is only used as an address in memory operations and
- + that the operands to the shifted add are not clobbered. */
- + add_only_used_as_pointer = false;
- + for (scan = NEXT_INSN (insn); scan; scan = NEXT_INSN (scan))
- + {
- + if (!INSN_P (scan))
- + continue;
- +
- + /* Don't try to trace forward past a JUMP or CALL. To optimize
- + safely, we would have to check that all the instructions at
- + the jump destination did not use REG. */
- +
- + if (GET_CODE (scan) == JUMP_INSN)
- + {
- + break;
- + }
- +
- + /* If used in a call insn then we cannot optimize it away */
- + if (CALL_P (scan) && find_regno_fusage (scan, USE, REGNO (reg)))
- + break;
- +
- + /* If any of the operands of the shifted add are clobbered we
- + cannot optimize the shifted adda away */
- + if ((reg_set_p (op0, scan) && (REGNO (op0) != REGNO (reg)))
- + || (reg_set_p (op1, scan) && (REGNO (op1) != REGNO (reg))))
- + break;
- +
- + if (!reg_mentioned_p (reg, PATTERN (scan)))
- + continue;
- +
- + /* If used any other place than as a pointer or as the
- + destination register we failed */
- + if (!(single_set (scan)
- + && GET_CODE (PATTERN (scan)) == SET
- + && ((MEM_P (SET_DEST (PATTERN (scan)))
- + && REG_P (XEXP (SET_DEST (PATTERN (scan)), 0))
- + && REGNO (XEXP (SET_DEST (PATTERN (scan)), 0)) == REGNO (reg))
- + || (MEM_P (SET_SRC (PATTERN (scan)))
- + && REG_P (XEXP (SET_SRC (PATTERN (scan)), 0))
- + && REGNO (XEXP
- + (SET_SRC (PATTERN (scan)), 0)) == REGNO (reg))))
- + && !(GET_CODE (PATTERN (scan)) == SET
- + && REG_P (SET_DEST (PATTERN (scan)))
- + && !regno_use_in (REGNO (reg),
- + SET_SRC (PATTERN (scan)))))
- + break;
- +
- + /* We cannot replace the pointer in TImode insns
- + as these has a differene addressing mode than the other
- + memory insns. */
- + if ( GET_MODE (SET_DEST (PATTERN (scan))) == TImode )
- + break;
- +
- + /* Check if register is dead or set in this insn */
- + if (dead_or_set_p (scan, reg))
- + {
- + add_only_used_as_pointer = true;
- + break;
- + }
- + }
- +
- + if (add_only_used_as_pointer)
- + {
- + /* Lets delete the add insn and replace all memory references
- + which uses the pointer with the full expression. */
- + if (dump_file)
- + {
- + fprintf (dump_file,
- + ";; Deleting INSN %i since address expression can be folded into all "
- + "memory references using this expression\n",
- + INSN_UID (insn));
- + }
- + SET_INSN_DELETED (insn);
- + /* Force the instruction to be recognized again */
- + INSN_CODE (insn) = -1;
- +
- + for (scan = NEXT_INSN (insn); scan; scan = NEXT_INSN (scan))
- + {
- + if (!INSN_P (scan))
- + continue;
- +
- + if (!reg_mentioned_p (reg, PATTERN (scan)))
- + continue;
- +
- + /* If used any other place than as a pointer or as the
- + destination register we failed */
- + if ((single_set (scan)
- + && GET_CODE (PATTERN (scan)) == SET
- + && ((MEM_P (SET_DEST (PATTERN (scan)))
- + && REG_P (XEXP (SET_DEST (PATTERN (scan)), 0))
- + && REGNO (XEXP (SET_DEST (PATTERN (scan)), 0)) ==
- + REGNO (reg)) || (MEM_P (SET_SRC (PATTERN (scan)))
- + &&
- + REG_P (XEXP
- + (SET_SRC (PATTERN (scan)),
- + 0))
- + &&
- + REGNO (XEXP
- + (SET_SRC (PATTERN (scan)),
- + 0)) == REGNO (reg)))))
- + {
- + if (dump_file)
- + {
- + fprintf (dump_file,
- + ";; Register %i replaced by indexed address in INSN %i\n",
- + REGNO (reg), INSN_UID (scan));
- + }
- + if (MEM_P (SET_DEST (PATTERN (scan))))
- + XEXP (SET_DEST (PATTERN (scan)), 0) = mem_expr;
- + else
- + XEXP (SET_SRC (PATTERN (scan)), 0) = mem_expr;
- + }
- +
- + /* Check if register is dead or set in this insn */
- + if (dead_or_set_p (scan, reg))
- + {
- + break;
- + }
- +
- + }
- + }
- + }
- + }
- +
- +
- + if (TARGET_MD_REORG_OPTIMIZATION && (optimize_size || (optimize > 0)))
- + {
- +
- + /* Scan through all insns looking for conditional register to
- + register move operations */
- + if (dump_file)
- + {
- + fprintf (dump_file,
- + ";; Folding redundant conditional move operations:\n");
- + }
- + for (insn = first; insn; insn = next_nonnote_insn (insn))
- + {
- + rtx src_reg, dst_reg, scan, test;
- +
- + if (INSN_P (insn)
- + && GET_CODE (PATTERN (insn)) == COND_EXEC
- + && GET_CODE (COND_EXEC_CODE (PATTERN (insn))) == SET
- + && REG_P (SET_SRC (COND_EXEC_CODE (PATTERN (insn))))
- + && REG_P (SET_DEST (COND_EXEC_CODE (PATTERN (insn))))
- + && find_reg_note (insn, REG_DEAD, SET_SRC (COND_EXEC_CODE (PATTERN (insn)))))
- + {
- + src_reg = SET_SRC (COND_EXEC_CODE (PATTERN (insn)));
- + dst_reg = SET_DEST (COND_EXEC_CODE (PATTERN (insn)));
- + test = COND_EXEC_TEST (PATTERN (insn));
- + }
- + else
- + {
- + continue;
- + }
- +
- + /* Scan backward through the rest of insns in this if-then or if-else
- + block and check if we can fold the move into another of the conditional
- + insns in the same block. */
- + scan = prev_nonnote_insn (insn);
- + while (INSN_P (scan)
- + && GET_CODE (PATTERN (scan)) == COND_EXEC
- + && rtx_equal_p (COND_EXEC_TEST (PATTERN (scan)), test))
- + {
- + rtx pattern = COND_EXEC_CODE (PATTERN (scan));
- + if ( GET_CODE (pattern) == PARALLEL )
- + pattern = XVECEXP (pattern, 0, 0);
- +
- + if ( reg_set_p (src_reg, pattern) )
- + {
- + /* Fold in the destination register for the cond. move
- + into this insn. */
- + SET_DEST (pattern) = dst_reg;
- + if (dump_file)
- + {
- + fprintf (dump_file,
- + ";; Deleting INSN %i since this operation can be folded into INSN %i\n",
- + INSN_UID (insn), INSN_UID (scan));
- + }
- +
- + /* Scan and check if any of the insns in between uses the src_reg. We
- + must then replace it with the dst_reg. */
- + while ( (scan = next_nonnote_insn (scan)) != insn ){
- + avr32_replace_reg (scan, src_reg, dst_reg);
- + }
- + /* Delete the insn. */
- + SET_INSN_DELETED (insn);
- +
- + /* Force the instruction to be recognized again */
- + INSN_CODE (insn) = -1;
- + break;
- + }
- +
- + /* If the destination register is used but not set in this insn
- + we cannot fold. */
- + if ( reg_mentioned_p (dst_reg, pattern) )
- + break;
- +
- + scan = prev_nonnote_insn (scan);
- + }
- + }
- + }
- +
- +}
- +
- +
- +/* Exported to toplev.c.
- +
- + Do a final pass over the function, just before delayed branch
- + scheduling. */
- +static void
- +avr32_reorg (void)
- +{
- + rtx insn;
- + HOST_WIDE_INT address = 0;
- + Mfix *fix;
- +
- + minipool_fix_head = minipool_fix_tail = NULL;
- +
- + /* The first insn must always be a note, or the code below won't scan it
- + properly. */
- + insn = get_insns ();
- + if (GET_CODE (insn) != NOTE)
- + abort ();
- +
- + /* Scan all the insns and record the operands that will need fixing. */
- + for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
- + {
- + if (GET_CODE (insn) == BARRIER)
- + push_minipool_barrier (insn, address);
- + else if (INSN_P (insn))
- + {
- + rtx table;
- +
- + note_invalid_constants (insn, address, true);
- + address += get_attr_length (insn);
- +
- + /* If the insn is a vector jump, add the size of the table and skip
- + the table. */
- + if ((table = is_jump_table (insn)) != NULL)
- + {
- + address += get_jump_table_size (table);
- + insn = table;
- + }
- + }
- + }
- +
- + fix = minipool_fix_head;
- +
- + /* Now scan the fixups and perform the required changes. */
- + while (fix)
- + {
- + Mfix *ftmp;
- + Mfix *fdel;
- + Mfix *last_added_fix;
- + Mfix *last_barrier = NULL;
- + Mfix *this_fix;
- +
- + /* Skip any further barriers before the next fix. */
- + while (fix && GET_CODE (fix->insn) == BARRIER)
- + fix = fix->next;
- +
- + /* No more fixes. */
- + if (fix == NULL)
- + break;
- +
- + last_added_fix = NULL;
- +
- + for (ftmp = fix; ftmp; ftmp = ftmp->next)
- + {
- + if (GET_CODE (ftmp->insn) == BARRIER)
- + {
- + if (ftmp->address >= minipool_vector_head->max_address)
- + break;
- +
- + last_barrier = ftmp;
- + }
- + else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
- + break;
- +
- + last_added_fix = ftmp; /* Keep track of the last fix added.
- + */
- + }
- +
- + /* If we found a barrier, drop back to that; any fixes that we could
- + have reached but come after the barrier will now go in the next
- + mini-pool. */
- + if (last_barrier != NULL)
- + {
- + /* Reduce the refcount for those fixes that won't go into this pool
- + after all. */
- + for (fdel = last_barrier->next;
- + fdel && fdel != ftmp; fdel = fdel->next)
- + {
- + fdel->minipool->refcount--;
- + fdel->minipool = NULL;
- + }
- +
- + ftmp = last_barrier;
- + }
- + else
- + {
- + /* ftmp is first fix that we can't fit into this pool and there no
- + natural barriers that we could use. Insert a new barrier in the
- + code somewhere between the previous fix and this one, and
- + arrange to jump around it. */
- + HOST_WIDE_INT max_address;
- +
- + /* The last item on the list of fixes must be a barrier, so we can
- + never run off the end of the list of fixes without last_barrier
- + being set. */
- + if (ftmp == NULL)
- + abort ();
- +
- + max_address = minipool_vector_head->max_address;
- + /* Check that there isn't another fix that is in range that we
- + couldn't fit into this pool because the pool was already too
- + large: we need to put the pool before such an instruction. */
- + if (ftmp->address < max_address)
- + max_address = ftmp->address;
- +
- + last_barrier = create_fix_barrier (last_added_fix, max_address);
- + }
- +
- + assign_minipool_offsets (last_barrier);
- +
- + while (ftmp)
- + {
- + if (GET_CODE (ftmp->insn) != BARRIER
- + && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
- + == NULL))
- + break;
- +
- + ftmp = ftmp->next;
- + }
- +
- + /* Scan over the fixes we have identified for this pool, fixing them up
- + and adding the constants to the pool itself. */
- + for (this_fix = fix; this_fix && ftmp != this_fix;
- + this_fix = this_fix->next)
- + if (GET_CODE (this_fix->insn) != BARRIER
- + /* Do nothing for entries present just to force the insertion of
- + a minipool. */
- + && !IS_FORCE_MINIPOOL (this_fix->value))
- + {
- + rtx addr = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
- + minipool_vector_label),
- + this_fix->minipool->offset);
- + *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
- + }
- +
- + dump_minipool (last_barrier->insn);
- + fix = ftmp;
- + }
- +
- + /* Free the minipool memory. */
- + obstack_free (&minipool_obstack, minipool_startobj);
- +
- + avr32_reorg_optimization ();
- +}
- +
- +
- +/* Hook for doing some final scanning of instructions. Does nothing yet...*/
- +void
- +avr32_final_prescan_insn (rtx insn ATTRIBUTE_UNUSED,
- + rtx * opvec ATTRIBUTE_UNUSED,
- + int noperands ATTRIBUTE_UNUSED)
- +{
- + return;
- +}
- +
- +
- +/* Function for changing the condition on the next instruction,
- + should be used when emmiting compare instructions and
- + the condition of the next instruction needs to change.
- +*/
- +int
- +set_next_insn_cond (rtx cur_insn, rtx new_cond)
- +{
- + rtx next_insn = next_nonnote_insn (cur_insn);
- + if ((next_insn != NULL_RTX)
- + && (INSN_P (next_insn)))
- + {
- + if ((GET_CODE (PATTERN (next_insn)) == SET)
- + && (GET_CODE (SET_SRC (PATTERN (next_insn))) == IF_THEN_ELSE))
- + {
- + /* Branch instructions */
- + XEXP (SET_SRC (PATTERN (next_insn)), 0) = new_cond;
- + /* Force the instruction to be recognized again */
- + INSN_CODE (next_insn) = -1;
- + return TRUE;
- + }
- + else if ((GET_CODE (PATTERN (next_insn)) == SET)
- + && avr32_comparison_operator (SET_SRC (PATTERN (next_insn)),
- + GET_MODE (SET_SRC (PATTERN (next_insn)))))
- + {
- + /* scc with no compare */
- + SET_SRC (PATTERN (next_insn)) = new_cond;
- + /* Force the instruction to be recognized again */
- + INSN_CODE (next_insn) = -1;
- + return TRUE;
- + }
- + else if (GET_CODE (PATTERN (next_insn)) == COND_EXEC)
- + {
- + if ( GET_CODE (new_cond) == UNSPEC )
- + {
- + COND_EXEC_TEST (PATTERN (next_insn)) =
- + gen_rtx_UNSPEC (CCmode,
- + gen_rtvec (2,
- + XEXP (COND_EXEC_TEST (PATTERN (next_insn)), 0),
- + XEXP (COND_EXEC_TEST (PATTERN (next_insn)), 1)),
- + XINT (new_cond, 1));
- + }
- + else
- + {
- + PUT_CODE(COND_EXEC_TEST (PATTERN (next_insn)), GET_CODE(new_cond));
- + }
- + }
- + }
- +
- + return FALSE;
- +}
- +
- +
- +/* Function for obtaining the condition for the next instruction after cur_insn.
- +*/
- +rtx
- +get_next_insn_cond (rtx cur_insn)
- +{
- + rtx next_insn = next_nonnote_insn (cur_insn);
- + rtx cond = NULL_RTX;
- + if (next_insn != NULL_RTX
- + && INSN_P (next_insn))
- + {
- + if ((GET_CODE (PATTERN (next_insn)) == SET)
- + && (GET_CODE (SET_SRC (PATTERN (next_insn))) == IF_THEN_ELSE))
- + {
- + /* Branch and cond if then else instructions */
- + cond = XEXP (SET_SRC (PATTERN (next_insn)), 0);
- + }
- + else if ((GET_CODE (PATTERN (next_insn)) == SET)
- + && avr32_comparison_operator (SET_SRC (PATTERN (next_insn)),
- + GET_MODE (SET_SRC (PATTERN (next_insn)))))
- + {
- + /* scc with no compare */
- + cond = SET_SRC (PATTERN (next_insn));
- + }
- + else if (GET_CODE (PATTERN (next_insn)) == COND_EXEC)
- + {
- + cond = COND_EXEC_TEST (PATTERN (next_insn));
- + }
- + }
- + return cond;
- +}
- +
- +
- +/* Check if the next insn is a conditional insn that will emit a compare
- + for itself.
- +*/
- +rtx
- +next_insn_emits_cmp (rtx cur_insn)
- +{
- + rtx next_insn = next_nonnote_insn (cur_insn);
- + rtx cond = NULL_RTX;
- + if (next_insn != NULL_RTX
- + && INSN_P (next_insn))
- + {
- + if ( ((GET_CODE (PATTERN (next_insn)) == SET)
- + && (GET_CODE (SET_SRC (PATTERN (next_insn))) == IF_THEN_ELSE)
- + && (XEXP (XEXP (SET_SRC (PATTERN (next_insn)), 0),0) != cc0_rtx))
- + || GET_CODE (PATTERN (next_insn)) == COND_EXEC )
- + return TRUE;
- + }
- + return FALSE;
- +}
- +
- +
- +rtx
- +avr32_output_cmp (rtx cond, enum machine_mode mode, rtx op0, rtx op1)
- +{
- +
- + rtx new_cond = NULL_RTX;
- + rtx ops[2];
- + rtx compare_pattern;
- + ops[0] = op0;
- + ops[1] = op1;
- +
- + if ( GET_CODE (op0) == AND )
- + compare_pattern = op0;
- + else
- + compare_pattern = gen_rtx_COMPARE (mode, op0, op1);
- +
- + new_cond = is_compare_redundant (compare_pattern, cond);
- +
- + if (new_cond != NULL_RTX)
- + return new_cond;
- +
- + /* Check if we are inserting a bit-load instead of a compare. */
- + if ( GET_CODE (op0) == AND )
- + {
- + ops[0] = XEXP (op0, 0);
- + ops[1] = XEXP (op0, 1);
- + output_asm_insn ("bld\t%0, %p1", ops);
- + return cond;
- + }
- +
- + /* Insert compare */
- + switch (mode)
- + {
- + case QImode:
- + output_asm_insn ("cp.b\t%0, %1", ops);
- + break;
- + case HImode:
- + output_asm_insn ("cp.h\t%0, %1", ops);
- + break;
- + case SImode:
- + output_asm_insn ("cp.w\t%0, %1", ops);
- + break;
- + case DImode:
- + if (GET_CODE (op1) != REG)
- + output_asm_insn ("cp.w\t%0, %1\ncpc\t%m0", ops);
- + else
- + output_asm_insn ("cp.w\t%0, %1\ncpc\t%m0, %m1", ops);
- + break;
- + default:
- + internal_error ("Unknown comparison mode");
- + break;
- + }
- +
- + return cond;
- +}
- +
- +
- +int
- +avr32_load_multiple_operation (rtx op,
- + enum machine_mode mode ATTRIBUTE_UNUSED)
- +{
- + int count = XVECLEN (op, 0);
- + unsigned int dest_regno;
- + rtx src_addr;
- + rtx elt;
- + int i = 1, base = 0;
- +
- + if (count <= 1 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
- + return 0;
- +
- + /* Check to see if this might be a write-back. */
- + if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
- + {
- + i++;
- + base = 1;
- +
- + /* Now check it more carefully. */
- + if (GET_CODE (SET_DEST (elt)) != REG
- + || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
- + || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
- + || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 1) * 4)
- + return 0;
- + }
- +
- + /* Perform a quick check so we don't blow up below. */
- + if (count <= 1
- + || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
- + || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != REG
- + || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != UNSPEC)
- + return 0;
- +
- + dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, i - 1)));
- + src_addr = XEXP (SET_SRC (XVECEXP (op, 0, i - 1)), 0);
- +
- + for (; i < count; i++)
- + {
- + elt = XVECEXP (op, 0, i);
- +
- + if (GET_CODE (elt) != SET
- + || GET_CODE (SET_DEST (elt)) != REG
- + || GET_MODE (SET_DEST (elt)) != SImode
- + || GET_CODE (SET_SRC (elt)) != UNSPEC)
- + return 0;
- + }
- +
- + return 1;
- +}
- +
- +
- +int
- +avr32_store_multiple_operation (rtx op,
- + enum machine_mode mode ATTRIBUTE_UNUSED)
- +{
- + int count = XVECLEN (op, 0);
- + int src_regno;
- + rtx dest_addr;
- + rtx elt;
- + int i = 1;
- +
- + if (count <= 1 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
- + return 0;
- +
- + /* Perform a quick check so we don't blow up below. */
- + if (count <= i
- + || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
- + || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != MEM
- + || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != UNSPEC)
- + return 0;
- +
- + src_regno = REGNO (SET_SRC (XVECEXP (op, 0, i - 1)));
- + dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, i - 1)), 0);
- +
- + for (; i < count; i++)
- + {
- + elt = XVECEXP (op, 0, i);
- +
- + if (GET_CODE (elt) != SET
- + || GET_CODE (SET_DEST (elt)) != MEM
- + || GET_MODE (SET_DEST (elt)) != SImode
- + || GET_CODE (SET_SRC (elt)) != UNSPEC)
- + return 0;
- + }
- +
- + return 1;
- +}
- +
- +
- +int
- +avr32_valid_macmac_bypass (rtx insn_out, rtx insn_in)
- +{
- + /* Check if they use the same accumulator */
- + if (rtx_equal_p
- + (SET_DEST (PATTERN (insn_out)), SET_DEST (PATTERN (insn_in))))
- + {
- + return TRUE;
- + }
- +
- + return FALSE;
- +}
- +
- +
- +int
- +avr32_valid_mulmac_bypass (rtx insn_out, rtx insn_in)
- +{
- + /*
- + Check if the mul instruction produces the accumulator for the mac
- + instruction. */
- + if (rtx_equal_p
- + (SET_DEST (PATTERN (insn_out)), SET_DEST (PATTERN (insn_in))))
- + {
- + return TRUE;
- + }
- + return FALSE;
- +}
- +
- +
- +int
- +avr32_store_bypass (rtx insn_out, rtx insn_in)
- +{
- + /* Only valid bypass if the output result is used as an src in the store
- + instruction, NOT if used as a pointer or base. */
- + if (rtx_equal_p
- + (SET_DEST (PATTERN (insn_out)), SET_SRC (PATTERN (insn_in))))
- + {
- + return TRUE;
- + }
- +
- + return FALSE;
- +}
- +
- +
- +int
- +avr32_mul_waw_bypass (rtx insn_out, rtx insn_in)
- +{
- + /* Check if the register holding the result from the mul instruction is
- + used as a result register in the input instruction. */
- + if (rtx_equal_p
- + (SET_DEST (PATTERN (insn_out)), SET_DEST (PATTERN (insn_in))))
- + {
- + return TRUE;
- + }
- +
- + return FALSE;
- +}
- +
- +
- +int
- +avr32_valid_load_double_bypass (rtx insn_out, rtx insn_in)
- +{
- + /* Check if the first loaded word in insn_out is used in insn_in. */
- + rtx dst_reg;
- + rtx second_loaded_reg;
- +
- + /* If this is a double alu operation then the bypass is not valid */
- + if ((get_attr_type (insn_in) == TYPE_ALU
- + || get_attr_type (insn_in) == TYPE_ALU2)
- + && (GET_MODE_SIZE (GET_MODE (SET_DEST (PATTERN (insn_out)))) > 4))
- + return FALSE;
- +
- + /* Get the destination register in the load */
- + if (!REG_P (SET_DEST (PATTERN (insn_out))))
- + return FALSE;
- +
- + dst_reg = SET_DEST (PATTERN (insn_out));
- + second_loaded_reg = gen_rtx_REG (SImode, REGNO (dst_reg) + 1);
- +
- + if (!reg_mentioned_p (second_loaded_reg, PATTERN (insn_in)))
- + return TRUE;
- +
- + return FALSE;
- +}
- +
- +
- +int
- +avr32_valid_load_quad_bypass (rtx insn_out, rtx insn_in)
- +{
- + /*
- + Check if the two first loaded word in insn_out are used in insn_in. */
- + rtx dst_reg;
- + rtx third_loaded_reg, fourth_loaded_reg;
- +
- + /* Get the destination register in the load */
- + if (!REG_P (SET_DEST (PATTERN (insn_out))))
- + return FALSE;
- +
- + dst_reg = SET_DEST (PATTERN (insn_out));
- + third_loaded_reg = gen_rtx_REG (SImode, REGNO (dst_reg) + 2);
- + fourth_loaded_reg = gen_rtx_REG (SImode, REGNO (dst_reg) + 3);
- +
- + if (!reg_mentioned_p (third_loaded_reg, PATTERN (insn_in))
- + && !reg_mentioned_p (fourth_loaded_reg, PATTERN (insn_in)))
- + {
- + return TRUE;
- + }
- +
- + return FALSE;
- +}
- +
- +
- +rtx
- +avr32_ifcvt_modify_test (ce_if_block_t *ce_info, rtx test )
- +{
- + rtx branch_insn;
- + rtx cmp_test;
- + rtx compare_op0;
- + rtx compare_op1;
- +
- +
- + if ( !ce_info
- + || test == NULL_RTX
- + || !reg_mentioned_p (cc0_rtx, test))
- + return test;
- +
- + branch_insn = BB_END (ce_info->test_bb);
- + cmp_test = PATTERN(prev_nonnote_insn (branch_insn));
- +
- + if (GET_CODE(cmp_test) != SET
- + || !CC0_P(XEXP(cmp_test, 0)) )
- + return cmp_test;
- +
- + if ( GET_CODE(SET_SRC(cmp_test)) == COMPARE ){
- + compare_op0 = XEXP(SET_SRC(cmp_test), 0);
- + compare_op1 = XEXP(SET_SRC(cmp_test), 1);
- + } else {
- + compare_op0 = SET_SRC(cmp_test);
- + compare_op1 = const0_rtx;
- + }
- +
- + return gen_rtx_fmt_ee (GET_CODE(test), GET_MODE (compare_op0),
- + compare_op0, compare_op1);
- +}
- +
- +
- +rtx
- +avr32_ifcvt_modify_insn (ce_if_block_t *ce_info, rtx pattern, rtx insn,
- + int *num_true_changes)
- +{
- + rtx test = COND_EXEC_TEST(pattern);
- + rtx op = COND_EXEC_CODE(pattern);
- + rtx cmp_insn;
- + rtx cond_exec_insn;
- + int inputs_set_outside_ifblock = 1;
- + basic_block current_bb = BLOCK_FOR_INSN (insn);
- + rtx bb_insn ;
- + enum machine_mode mode = GET_MODE (XEXP (op, 0));
- +
- + if (CC0_P(XEXP(test, 0)))
- + test = avr32_ifcvt_modify_test (ce_info,
- + test );
- +
- + /* We do not support multiple tests. */
- + if ( ce_info
- + && ce_info->num_multiple_test_blocks > 0 )
- + return NULL_RTX;
- +
- + pattern = gen_rtx_COND_EXEC (VOIDmode, test, op);
- +
- + if ( !reload_completed )
- + {
- + rtx start;
- + int num_insns;
- + int max_insns = MAX_CONDITIONAL_EXECUTE;
- +
- + if ( !ce_info )
- + return op;
- +
- + /* Check if the insn is not suitable for conditional
- + execution. */
- + start_sequence ();
- + cond_exec_insn = emit_insn (pattern);
- + if ( recog_memoized (cond_exec_insn) < 0
- + && can_create_pseudo_p () )
- + {
- + /* Insn is not suitable for conditional execution, try
- + to fix it up by using an extra scratch register or
- + by pulling the operation outside the if-then-else
- + and then emiting a conditional move inside the if-then-else. */
- + end_sequence ();
- + if ( GET_CODE (op) != SET
- + || !REG_P (SET_DEST (op))
- + || GET_CODE (SET_SRC (op)) == IF_THEN_ELSE
- + || GET_MODE_SIZE (mode) > UNITS_PER_WORD )
- + return NULL_RTX;
- +
- + /* Check if any of the input operands to the insn is set inside the
- + current block. */
- + if ( current_bb->index == ce_info->then_bb->index )
- + start = PREV_INSN (BB_HEAD (ce_info->then_bb));
- + else
- + start = PREV_INSN (BB_HEAD (ce_info->else_bb));
- +
- +
- + for ( bb_insn = next_nonnote_insn (start); bb_insn != insn; bb_insn = next_nonnote_insn (bb_insn) )
- + {
- + rtx set = single_set (bb_insn);
- +
- + if ( set && reg_mentioned_p (SET_DEST (set), SET_SRC (op)))
- + {
- + inputs_set_outside_ifblock = 0;
- + break;
- + }
- + }
- +
- + cmp_insn = prev_nonnote_insn (BB_END (ce_info->test_bb));
- +
- +
- + /* Check if we can insert more insns. */
- + num_insns = ( ce_info->num_then_insns +
- + ce_info->num_else_insns +
- + ce_info->num_cond_clobber_insns +
- + ce_info->num_extra_move_insns );
- +
- + if ( ce_info->num_else_insns != 0 )
- + max_insns *=2;
- +
- + if ( num_insns >= max_insns )
- + return NULL_RTX;
- +
- + /* Check if we have an instruction which might be converted to
- + conditional form if we give it a scratch register to clobber. */
- + {
- + rtx clobber_insn;
- + rtx scratch_reg = gen_reg_rtx (mode);
- + rtx new_pattern = copy_rtx (pattern);
- + rtx set_src = SET_SRC (COND_EXEC_CODE (new_pattern));
- +
- + rtx clobber = gen_rtx_CLOBBER (mode, scratch_reg);
- + rtx vec[2] = { COND_EXEC_CODE (new_pattern), clobber };
- + COND_EXEC_CODE (new_pattern) = gen_rtx_PARALLEL (mode, gen_rtvec_v (2, vec));
- +
- + start_sequence ();
- + clobber_insn = emit_insn (new_pattern);
- +
- + if ( recog_memoized (clobber_insn) >= 0
- + && ( ( GET_RTX_LENGTH (GET_CODE (set_src)) == 2
- + && CONST_INT_P (XEXP (set_src, 1))
- + && avr32_const_ok_for_constraint_p (INTVAL (XEXP (set_src, 1)), 'K', "Ks08") )
- + || !ce_info->else_bb
- + || current_bb->index == ce_info->else_bb->index ))
- + {
- + end_sequence ();
- + /* Force the insn to be recognized again. */
- + INSN_CODE (insn) = -1;
- +
- + /* If this is the first change in this IF-block then
- + signal that we have made a change. */
- + if ( ce_info->num_cond_clobber_insns == 0
- + && ce_info->num_extra_move_insns == 0 )
- + *num_true_changes += 1;
- +
- + ce_info->num_cond_clobber_insns++;
- +
- + if (dump_file)
- + fprintf (dump_file,
- + "\nReplacing INSN %d with an insn using a scratch register for later ifcvt passes...\n",
- + INSN_UID (insn));
- +
- + return COND_EXEC_CODE (new_pattern);
- + }
- + end_sequence ();
- + }
- +
- + if ( inputs_set_outside_ifblock )
- + {
- + /* Check if the insn before the cmp is an and which used
- + together with the cmp can be optimized into a bld. If
- + so then we should try to put the insn before the and
- + so that we can catch the bld peephole. */
- + rtx set;
- + rtx insn_before_cmp_insn = prev_nonnote_insn (cmp_insn);
- + if (insn_before_cmp_insn
- + && (set = single_set (insn_before_cmp_insn))
- + && GET_CODE (SET_SRC (set)) == AND
- + && one_bit_set_operand (XEXP (SET_SRC (set), 1), SImode)
- + /* Also make sure that the insn does not set any
- + of the input operands to the insn we are pulling out. */
- + && !reg_mentioned_p (SET_DEST (set), SET_SRC (op)) )
- + cmp_insn = prev_nonnote_insn (cmp_insn);
- +
- + /* We can try to put the operation outside the if-then-else
- + blocks and insert a move. */
- + if ( !insn_invalid_p (insn)
- + /* Do not allow conditional insns to be moved outside the
- + if-then-else. */
- + && !reg_mentioned_p (cc0_rtx, insn)
- + /* We cannot move memory loads outside of the if-then-else
- + since the memory access should not be perfomed if the
- + condition is not met. */
- + && !mem_mentioned_p (SET_SRC (op)) )
- + {
- + rtx scratch_reg = gen_reg_rtx (mode);
- + rtx op_pattern = copy_rtx (op);
- + rtx new_insn, seq;
- + rtx link, prev_link;
- + op = copy_rtx (op);
- + /* Emit the operation to a temp reg before the compare,
- + and emit a move inside the if-then-else, hoping that the
- + whole if-then-else can be converted to conditional
- + execution. */
- + SET_DEST (op_pattern) = scratch_reg;
- + start_sequence ();
- + new_insn = emit_insn (op_pattern);
- + seq = get_insns();
- + end_sequence ();
- +
- + /* Check again that the insn is valid. For some insns the insn might
- + become invalid if the destination register is changed. Ie. for mulacc
- + operations. */
- + if ( insn_invalid_p (new_insn) )
- + return NULL_RTX;
- +
- + emit_insn_before_setloc (seq, cmp_insn, INSN_LOCATOR (insn));
- +
- + if (dump_file)
- + fprintf (dump_file,
- + "\nMoving INSN %d out of IF-block by adding INSN %d...\n",
- + INSN_UID (insn), INSN_UID (new_insn));
- +
- + ce_info->extra_move_insns[ce_info->num_extra_move_insns] = insn;
- + ce_info->moved_insns[ce_info->num_extra_move_insns] = new_insn;
- + XEXP (op, 1) = scratch_reg;
- + /* Force the insn to be recognized again. */
- + INSN_CODE (insn) = -1;
- +
- + /* Move REG_DEAD notes to the moved insn. */
- + prev_link = NULL_RTX;
- + for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
- + {
- + if (REG_NOTE_KIND (link) == REG_DEAD)
- + {
- + /* Add the REG_DEAD note to the new insn. */
- + rtx dead_reg = XEXP (link, 0);
- + REG_NOTES (new_insn) = gen_rtx_EXPR_LIST (REG_DEAD, dead_reg, REG_NOTES (new_insn));
- + /* Remove the REG_DEAD note from the insn we convert to a move. */
- + if ( prev_link )
- + XEXP (prev_link, 1) = XEXP (link, 1);
- + else
- + REG_NOTES (insn) = XEXP (link, 1);
- + }
- + else
- + {
- + prev_link = link;
- + }
- + }
- + /* Add a REG_DEAD note to signal that the scratch register is dead. */
- + REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_DEAD, scratch_reg, REG_NOTES (insn));
- +
- + /* If this is the first change in this IF-block then
- + signal that we have made a change. */
- + if ( ce_info->num_cond_clobber_insns == 0
- + && ce_info->num_extra_move_insns == 0 )
- + *num_true_changes += 1;
- +
- + ce_info->num_extra_move_insns++;
- + return op;
- + }
- + }
- +
- + /* We failed to fixup the insns, so this if-then-else can not be made
- + conditional. Just return NULL_RTX so that the if-then-else conversion
- + for this if-then-else will be cancelled. */
- + return NULL_RTX;
- + }
- + end_sequence ();
- + return op;
- + }
- +
- + /* Signal that we have started if conversion after reload, which means
- + that it should be safe to split all the predicable clobber insns which
- + did not become cond_exec back into a simpler form if possible. */
- + cfun->machine->ifcvt_after_reload = 1;
- +
- + return pattern;
- +}
- +
- +
- +void
- +avr32_ifcvt_modify_cancel ( ce_if_block_t *ce_info, int *num_true_changes)
- +{
- + int n;
- +
- + if ( ce_info->num_extra_move_insns > 0
- + && ce_info->num_cond_clobber_insns == 0)
- + /* Signal that we did not do any changes after all. */
- + *num_true_changes -= 1;
- +
- + /* Remove any inserted move insns. */
- + for ( n = 0; n < ce_info->num_extra_move_insns; n++ )
- + {
- + rtx link, prev_link;
- +
- + /* Remove REG_DEAD note since we are not needing the scratch register anyway. */
- + prev_link = NULL_RTX;
- + for (link = REG_NOTES (ce_info->extra_move_insns[n]); link; link = XEXP (link, 1))
- + {
- + if (REG_NOTE_KIND (link) == REG_DEAD)
- + {
- + if ( prev_link )
- + XEXP (prev_link, 1) = XEXP (link, 1);
- + else
- + REG_NOTES (ce_info->extra_move_insns[n]) = XEXP (link, 1);
- + }
- + else
- + {
- + prev_link = link;
- + }
- + }
- +
- + /* Revert all reg_notes for the moved insn. */
- + for (link = REG_NOTES (ce_info->moved_insns[n]); link; link = XEXP (link, 1))
- + {
- + REG_NOTES (ce_info->extra_move_insns[n]) = gen_rtx_EXPR_LIST (REG_NOTE_KIND (link),
- + XEXP (link, 0),
- + REG_NOTES (ce_info->extra_move_insns[n]));
- + }
- +
- + /* Remove the moved insn. */
- + remove_insn ( ce_info->moved_insns[n] );
- + }
- +}
- +
- +
- +/* Function returning TRUE if INSN with OPERANDS is a splittable
- + conditional immediate clobber insn. We assume that the insn is
- + already a conditional immediate clobber insns and do not check
- + for that. */
- +int
- +avr32_cond_imm_clobber_splittable (rtx insn, rtx operands[])
- +{
- + if ( REGNO (operands[0]) == REGNO (operands[1]) )
- + {
- + if ( (GET_CODE (SET_SRC (XVECEXP (PATTERN (insn),0,0))) == PLUS
- + && !avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'I', "Is21"))
- + || (GET_CODE (SET_SRC (XVECEXP (PATTERN (insn),0,0))) == MINUS
- + && !avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks21")))
- + return FALSE;
- + }
- + else if ( (logical_binary_operator (SET_SRC (XVECEXP (PATTERN (insn),0,0)), VOIDmode)
- + || (GET_CODE (SET_SRC (XVECEXP (PATTERN (insn),0,0))) == PLUS
- + && !avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'I', "Is16"))
- + || (GET_CODE (SET_SRC (XVECEXP (PATTERN (insn),0,0))) == MINUS
- + && !avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks16"))) )
- + return FALSE;
- +
- + return TRUE;
- +}
- +
- +
- +/* Function for getting an integer value from a const_int or const_double
- + expression regardless of the HOST_WIDE_INT size. Each target cpu word
- + will be put into the val array where the LSW will be stored at the lowest
- + address and so forth. Assumes that const_expr is either a const_int or
- + const_double. Only valid for modes which have sizes that are a multiple
- + of the word size.
- +*/
- +void
- +avr32_get_intval (enum machine_mode mode, rtx const_expr, HOST_WIDE_INT *val)
- +{
- + int words_in_mode = GET_MODE_SIZE (mode)/UNITS_PER_WORD;
- + const int words_in_const_int = HOST_BITS_PER_WIDE_INT / BITS_PER_WORD;
- +
- + if ( GET_CODE(const_expr) == CONST_DOUBLE ){
- + HOST_WIDE_INT hi = CONST_DOUBLE_HIGH(const_expr);
- + HOST_WIDE_INT lo = CONST_DOUBLE_LOW(const_expr);
- + /* Evaluate hi and lo values of const_double. */
- + avr32_get_intval (mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0),
- + GEN_INT (lo),
- + &val[0]);
- + avr32_get_intval (mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0),
- + GEN_INT (hi),
- + &val[words_in_const_int]);
- + } else if ( GET_CODE(const_expr) == CONST_INT ){
- + HOST_WIDE_INT value = INTVAL(const_expr);
- + int word;
- + for ( word = 0; (word < words_in_mode) && (word < words_in_const_int); word++ ){
- + /* Shift word up to the MSW and shift down again to extract the
- + word and sign-extend. */
- + int lshift = (words_in_const_int - word - 1) * BITS_PER_WORD;
- + int rshift = (words_in_const_int-1) * BITS_PER_WORD;
- + val[word] = (value << lshift) >> rshift;
- + }
- +
- + for ( ; word < words_in_mode; word++ ){
- + /* Just put the sign bits in the remaining words. */
- + val[word] = value < 0 ? -1 : 0;
- + }
- + }
- +}
- +
- +
- +void
- +avr32_split_const_expr (enum machine_mode mode, enum machine_mode new_mode,
- + rtx expr, rtx *split_expr)
- +{
- + int i, word;
- + int words_in_intval = GET_MODE_SIZE (mode)/UNITS_PER_WORD;
- + int words_in_split_values = GET_MODE_SIZE (new_mode)/UNITS_PER_WORD;
- + const int words_in_const_int = HOST_BITS_PER_WIDE_INT / BITS_PER_WORD;
- + HOST_WIDE_INT *val = alloca (words_in_intval * UNITS_PER_WORD);
- +
- + avr32_get_intval (mode, expr, val);
- +
- + for ( i=0; i < (words_in_intval/words_in_split_values); i++ )
- + {
- + HOST_WIDE_INT value_lo = 0, value_hi = 0;
- + for ( word = 0; word < words_in_split_values; word++ )
- + {
- + if ( word >= words_in_const_int )
- + value_hi |= ((val[i * words_in_split_values + word] &
- + (((HOST_WIDE_INT)1 << BITS_PER_WORD)-1))
- + << (BITS_PER_WORD * (word - words_in_const_int)));
- + else
- + value_lo |= ((val[i * words_in_split_values + word] &
- + (((HOST_WIDE_INT)1 << BITS_PER_WORD)-1))
- + << (BITS_PER_WORD * word));
- + }
- + split_expr[i] = immed_double_const(value_lo, value_hi, new_mode);
- + }
- +}
- +
- +
- +/* Set up library functions to comply to AVR32 ABI */
- +static void
- +avr32_init_libfuncs (void)
- +{
- + /* Convert gcc run-time function names to AVR32 ABI names */
- +
- + /* Double-precision floating-point arithmetic. */
- + set_optab_libfunc (neg_optab, DFmode, NULL);
- +
- + /* Double-precision comparisons. */
- + set_optab_libfunc (eq_optab, DFmode, "__avr32_f64_cmp_eq");
- + set_optab_libfunc (ne_optab, DFmode, NULL);
- + set_optab_libfunc (lt_optab, DFmode, "__avr32_f64_cmp_lt");
- + set_optab_libfunc (le_optab, DFmode, NULL);
- + set_optab_libfunc (ge_optab, DFmode, "__avr32_f64_cmp_ge");
- + set_optab_libfunc (gt_optab, DFmode, NULL);
- +
- + /* Single-precision floating-point arithmetic. */
- + set_optab_libfunc (smul_optab, SFmode, "__avr32_f32_mul");
- + set_optab_libfunc (neg_optab, SFmode, NULL);
- +
- + /* Single-precision comparisons. */
- + set_optab_libfunc (eq_optab, SFmode, "__avr32_f32_cmp_eq");
- + set_optab_libfunc (ne_optab, SFmode, NULL);
- + set_optab_libfunc (lt_optab, SFmode, "__avr32_f32_cmp_lt");
- + set_optab_libfunc (le_optab, SFmode, NULL);
- + set_optab_libfunc (ge_optab, SFmode, "__avr32_f32_cmp_ge");
- + set_optab_libfunc (gt_optab, SFmode, NULL);
- +
- + /* Floating-point to integer conversions. */
- + set_conv_libfunc (sfix_optab, SImode, DFmode, "__avr32_f64_to_s32");
- + set_conv_libfunc (ufix_optab, SImode, DFmode, "__avr32_f64_to_u32");
- + set_conv_libfunc (sfix_optab, DImode, DFmode, "__avr32_f64_to_s64");
- + set_conv_libfunc (ufix_optab, DImode, DFmode, "__avr32_f64_to_u64");
- + set_conv_libfunc (sfix_optab, SImode, SFmode, "__avr32_f32_to_s32");
- + set_conv_libfunc (ufix_optab, SImode, SFmode, "__avr32_f32_to_u32");
- + set_conv_libfunc (sfix_optab, DImode, SFmode, "__avr32_f32_to_s64");
- + set_conv_libfunc (ufix_optab, DImode, SFmode, "__avr32_f32_to_u64");
- +
- + /* Conversions between floating types. */
- + set_conv_libfunc (trunc_optab, SFmode, DFmode, "__avr32_f64_to_f32");
- + set_conv_libfunc (sext_optab, DFmode, SFmode, "__avr32_f32_to_f64");
- +
- + /* Integer to floating-point conversions. Table 8. */
- + set_conv_libfunc (sfloat_optab, DFmode, SImode, "__avr32_s32_to_f64");
- + set_conv_libfunc (sfloat_optab, DFmode, DImode, "__avr32_s64_to_f64");
- + set_conv_libfunc (sfloat_optab, SFmode, SImode, "__avr32_s32_to_f32");
- + set_conv_libfunc (sfloat_optab, SFmode, DImode, "__avr32_s64_to_f32");
- + set_conv_libfunc (ufloat_optab, DFmode, SImode, "__avr32_u32_to_f64");
- + set_conv_libfunc (ufloat_optab, SFmode, SImode, "__avr32_u32_to_f32");
- + /* TODO: Add these to gcc library functions */
- + //set_conv_libfunc (ufloat_optab, DFmode, DImode, NULL);
- + //set_conv_libfunc (ufloat_optab, SFmode, DImode, NULL);
- +
- + /* Long long. Table 9. */
- + set_optab_libfunc (smul_optab, DImode, "__avr32_mul64");
- + set_optab_libfunc (sdiv_optab, DImode, "__avr32_sdiv64");
- + set_optab_libfunc (udiv_optab, DImode, "__avr32_udiv64");
- + set_optab_libfunc (smod_optab, DImode, "__avr32_smod64");
- + set_optab_libfunc (umod_optab, DImode, "__avr32_umod64");
- + set_optab_libfunc (ashl_optab, DImode, "__avr32_lsl64");
- + set_optab_libfunc (lshr_optab, DImode, "__avr32_lsr64");
- + set_optab_libfunc (ashr_optab, DImode, "__avr32_asr64");
- +
- + /* Floating point library functions which have fast versions. */
- + if ( TARGET_FAST_FLOAT )
- + {
- + set_optab_libfunc (sdiv_optab, DFmode, "__avr32_f64_div_fast");
- + set_optab_libfunc (smul_optab, DFmode, "__avr32_f64_mul_fast");
- + set_optab_libfunc (add_optab, DFmode, "__avr32_f64_add_fast");
- + set_optab_libfunc (sub_optab, DFmode, "__avr32_f64_sub_fast");
- + set_optab_libfunc (add_optab, SFmode, "__avr32_f32_add_fast");
- + set_optab_libfunc (sub_optab, SFmode, "__avr32_f32_sub_fast");
- + set_optab_libfunc (sdiv_optab, SFmode, "__avr32_f32_div_fast");
- + }
- + else
- + {
- + set_optab_libfunc (sdiv_optab, DFmode, "__avr32_f64_div");
- + set_optab_libfunc (smul_optab, DFmode, "__avr32_f64_mul");
- + set_optab_libfunc (add_optab, DFmode, "__avr32_f64_add");
- + set_optab_libfunc (sub_optab, DFmode, "__avr32_f64_sub");
- + set_optab_libfunc (add_optab, SFmode, "__avr32_f32_add");
- + set_optab_libfunc (sub_optab, SFmode, "__avr32_f32_sub");
- + set_optab_libfunc (sdiv_optab, SFmode, "__avr32_f32_div");
- + }
- +}
- +
- +
- +/* Record a flashvault declaration. */
- +static void
- +flashvault_decl_list_add (unsigned int vector_num, const char *name)
- +{
- + struct flashvault_decl_list *p;
- +
- + p = (struct flashvault_decl_list *)
- + xmalloc (sizeof (struct flashvault_decl_list));
- + p->next = flashvault_decl_list_head;
- + p->name = name;
- + p->vector_num = vector_num;
- + flashvault_decl_list_head = p;
- +}
- +
- +
- +static void
- +avr32_file_end (void)
- +{
- + struct flashvault_decl_list *p;
- + unsigned int num_entries = 0;
- +
- + /* Check if a list of flashvault declarations exists. */
- + if (flashvault_decl_list_head != NULL)
- + {
- + /* Calculate the number of entries in the table. */
- + for (p = flashvault_decl_list_head; p != NULL; p = p->next)
- + {
- + num_entries++;
- + }
- +
- + /* Generate the beginning of the flashvault data table. */
- + fputs ("\t.global __fv_table\n"
- + "\t.data\n"
- + "\t.align 2\n"
- + "\t.set .LFVTABLE, . + 0\n"
- + "\t.type __fv_table, @object\n", asm_out_file);
- + /* Each table entry is 8 bytes. */
- + fprintf (asm_out_file, "\t.size __fv_table, %u\n", (num_entries * 8));
- +
- + fputs("__fv_table:\n", asm_out_file);
- +
- + for (p = flashvault_decl_list_head; p != NULL; p = p->next)
- + {
- + /* Output table entry. */
- + fprintf (asm_out_file,
- + "\t.align 2\n"
- + "\t.int %u\n", p->vector_num);
- + fprintf (asm_out_file,
- + "\t.align 2\n"
- + "\t.int %s\n", p->name);
- + }
- + }
- +}
- --- /dev/null
- +++ b/gcc/config/avr32/avr32-elf.h
- @@ -0,0 +1,91 @@
- +/*
- + Elf specific definitions.
- + Copyright 2003,2004,2005,2006,2007,2008,2009 Atmel Corporation.
- +
- + This file is part of GCC.
- +
- + This program is free software; you can redistribute it and/or modify
- + it under the terms of the GNU General Public License as published by
- + the Free Software Foundation; either version 2 of the License, or
- + (at your option) any later version.
- +
- + This program is distributed in the hope that it will be useful,
- + but WITHOUT ANY WARRANTY; without even the implied warranty of
- + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- + GNU General Public License for more details.
- +
- + You should have received a copy of the GNU General Public License
- + along with this program; if not, write to the Free Software
- + Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
- +
- +
- +/*****************************************************************************
- + * Controlling the Compiler Driver, 'gcc'
- + *****************************************************************************/
- +
- +/* Run-time Target Specification. */
- +#undef TARGET_VERSION
- +#define TARGET_VERSION fputs (" (AVR32 GNU with ELF)", stderr);
- +
- +/*
- +Another C string constant used much like LINK_SPEC. The
- +difference between the two is that STARTFILE_SPEC is used at
- +the very beginning of the command given to the linker.
- +
- +If this macro is not defined, a default is provided that loads the
- +standard C startup file from the usual place. See gcc.c.
- +*/
- +#if 0
- +#undef STARTFILE_SPEC
- +#define STARTFILE_SPEC "crt0%O%s crti%O%s crtbegin%O%s"
- +#endif
- +#undef STARTFILE_SPEC
- +#define STARTFILE_SPEC "%{mflashvault: crtfv.o%s} %{!mflashvault: crt0.o%s} \
- + crti.o%s crtbegin.o%s"
- +
- +#undef LINK_SPEC
- +#define LINK_SPEC "%{muse-oscall:--defsym __do_not_use_oscall_coproc__=0} %{mrelax|O*:%{mno-relax|O0|O1: ;:--relax}} %{mpart=uc3a3revd:-mavr32elf_uc3a3256s;:%{mpart=*:-mavr32elf_%*}} %{mcpu=*:-mavr32elf_%*}"
- +
- +
- +/*
- +Another C string constant used much like LINK_SPEC. The
- +difference between the two is that ENDFILE_SPEC is used at
- +the very end of the command given to the linker.
- +
- +Do not define this macro if it does not need to do anything.
- +*/
- +#undef ENDFILE_SPEC
- +#define ENDFILE_SPEC "crtend%O%s crtn%O%s"
- +
- +
- +/* Target CPU builtins. */
- +#define TARGET_CPU_CPP_BUILTINS() \
- + do \
- + { \
- + builtin_define ("__avr32__"); \
- + builtin_define ("__AVR32__"); \
- + builtin_define ("__AVR32_ELF__"); \
- + builtin_define (avr32_part->macro); \
- + builtin_define (avr32_arch->macro); \
- + if (avr32_arch->uarch_type == UARCH_TYPE_AVR32A) \
- + builtin_define ("__AVR32_AVR32A__"); \
- + else \
- + builtin_define ("__AVR32_AVR32B__"); \
- + if (TARGET_UNALIGNED_WORD) \
- + builtin_define ("__AVR32_HAS_UNALIGNED_WORD__"); \
- + if (TARGET_SIMD) \
- + builtin_define ("__AVR32_HAS_SIMD__"); \
- + if (TARGET_DSP) \
- + builtin_define ("__AVR32_HAS_DSP__"); \
- + if (TARGET_RMW) \
- + builtin_define ("__AVR32_HAS_RMW__"); \
- + if (TARGET_BRANCH_PRED) \
- + builtin_define ("__AVR32_HAS_BRANCH_PRED__"); \
- + if (TARGET_FAST_FLOAT) \
- + builtin_define ("__AVR32_FAST_FLOAT__"); \
- + if (TARGET_FLASHVAULT) \
- + builtin_define ("__AVR32_FLASHVAULT__"); \
- + if (TARGET_NO_MUL_INSNS) \
- + builtin_define ("__AVR32_NO_MUL__"); \
- + } \
- + while (0)
- --- /dev/null
- +++ b/gcc/config/avr32/avr32.h
- @@ -0,0 +1,3316 @@
- +/*
- + Definitions of target machine for AVR32.
- + Copyright 2003,2004,2005,2006,2007,2008,2009,2010 Atmel Corporation.
- +
- + This file is part of GCC.
- +
- + This program is free software; you can redistribute it and/or modify
- + it under the terms of the GNU General Public License as published by
- + the Free Software Foundation; either version 2 of the License, or
- + (at your option) any later version.
- +
- + This program is distributed in the hope that it will be useful,
- + but WITHOUT ANY WARRANTY; without even the implied warranty of
- + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- + GNU General Public License for more details.
- +
- + You should have received a copy of the GNU General Public License
- + along with this program; if not, write to the Free Software
- + Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
- +
- +#ifndef GCC_AVR32_H
- +#define GCC_AVR32_H
- +
- +
- +#ifndef OBJECT_FORMAT_ELF
- +#error avr32.h included before elfos.h
- +#endif
- +
- +#ifndef LOCAL_LABEL_PREFIX
- +#define LOCAL_LABEL_PREFIX "."
- +#endif
- +
- +#ifndef SUBTARGET_CPP_SPEC
- +#define SUBTARGET_CPP_SPEC "-D__ELF__"
- +#endif
- +
- +
- +extern struct rtx_def *avr32_compare_op0;
- +extern struct rtx_def *avr32_compare_op1;
- +
- +/* comparison type */
- +enum avr32_cmp_type {
- + CMP_QI, /* 1 byte ->char */
- + CMP_HI, /* 2 byte->half word */
- + CMP_SI, /* four byte->word*/
- + CMP_DI, /* eight byte->double word */
- + CMP_SF, /* single precision floats */
- + CMP_MAX /* max comparison type */
- +};
- +
- +extern enum avr32_cmp_type avr32_branch_type; /* type of branch to use */
- +
- +
- +extern struct rtx_def *avr32_acc_cache;
- +
- +/* cache instruction op5 codes */
- +#define AVR32_CACHE_INVALIDATE_ICACHE 1
- +
- +/*
- +These bits describe the different types of function supported by the AVR32
- +backend. They are exclusive, e.g. a function cannot be both a normal function
- +and an interworked function. Knowing the type of a function is important for
- +determining its prologue and epilogue sequences. Note value 7 is currently
- +unassigned. Also note that the interrupt function types all have bit 2 set,
- +so that they can be tested for easily. Note that 0 is deliberately chosen for
- +AVR32_FT_UNKNOWN so that when the machine_function structure is initialized
- +(to zero) func_type will default to unknown. This will force the first use of
- +avr32_current_func_type to call avr32_compute_func_type.
- +*/
- +#define AVR32_FT_UNKNOWN 0 /* Type has not yet been determined. */
- +#define AVR32_FT_NORMAL 1 /* Normal function. */
- +#define AVR32_FT_ACALL 2 /* An acall function. */
- +#define AVR32_FT_EXCEPTION_HANDLER 3 /* A C++ exception handler. */
- +#define AVR32_FT_ISR_FULL 4 /* A fully shadowed interrupt mode. */
- +#define AVR32_FT_ISR_HALF 5 /* A half shadowed interrupt mode. */
- +#define AVR32_FT_ISR_NONE 6 /* No shadow registers. */
- +
- +#define AVR32_FT_TYPE_MASK ((1 << 3) - 1)
- +
- +/* In addition functions can have several type modifiers, outlined by these bit masks: */
- +#define AVR32_FT_INTERRUPT (1 << 2) /* Note overlap with FT_ISR and above. */
- +#define AVR32_FT_NAKED (1 << 3) /* No prologue or epilogue. */
- +#define AVR32_FT_VOLATILE (1 << 4) /* Does not return. */
- +#define AVR32_FT_NESTED (1 << 5) /* Embedded inside another func. */
- +#define AVR32_FT_FLASHVAULT (1 << 6) /* Flashvault function call. */
- +#define AVR32_FT_FLASHVAULT_IMPL (1 << 7) /* Function definition in FlashVault. */
- +
- +
- +/* Some macros to test these flags. */
- +#define AVR32_FUNC_TYPE(t) (t & AVR32_FT_TYPE_MASK)
- +#define IS_INTERRUPT(t) (t & AVR32_FT_INTERRUPT)
- +#define IS_NAKED(t) (t & AVR32_FT_NAKED)
- +#define IS_VOLATILE(t) (t & AVR32_FT_VOLATILE)
- +#define IS_NESTED(t) (t & AVR32_FT_NESTED)
- +#define IS_FLASHVAULT(t) (t & AVR32_FT_FLASHVAULT)
- +#define IS_FLASHVAULT_IMPL(t) (t & AVR32_FT_FLASHVAULT_IMPL)
- +
- +#define SYMBOL_FLAG_RMW_ADDR_SHIFT SYMBOL_FLAG_MACH_DEP_SHIFT
- +#define SYMBOL_REF_RMW_ADDR(RTX) \
- + ((SYMBOL_REF_FLAGS (RTX) & (1 << SYMBOL_FLAG_RMW_ADDR_SHIFT)) != 0)
- +
- +
- +typedef struct minipool_labels
- +GTY ((chain_next ("%h.next"), chain_prev ("%h.prev")))
- +{
- + rtx label;
- + struct minipool_labels *prev;
- + struct minipool_labels *next;
- +} minipool_labels;
- +
- +/* A C structure for machine-specific, per-function data.
- + This is added to the cfun structure. */
- +
- +typedef struct machine_function
- +GTY (())
- +{
- + /* Records the type of the current function. */
- + unsigned long func_type;
- + /* List of minipool labels, use for checking if code label is valid in a
- + memory expression */
- + minipool_labels *minipool_label_head;
- + minipool_labels *minipool_label_tail;
- + int ifcvt_after_reload;
- +} machine_function;
- +
- +/* Initialize data used by insn expanders. This is called from insn_emit,
- + once for every function before code is generated. */
- +#define INIT_EXPANDERS avr32_init_expanders ()
- +
- +/******************************************************************************
- + * SPECS
- + *****************************************************************************/
- +
- +#ifndef ASM_SPEC
- +#define ASM_SPEC "%{fpic:--pic} %{mrelax|O*:%{mno-relax|O0|O1: ;:--linkrelax}} %{march=ucr2nomul:-march=ucr2;:%{march=*:-march=%*}} %{mpart=uc3a3revd:-mpart=uc3a3256s;:%{mpart=*:-mpart=%*}}"
- +#endif
- +
- +#ifndef MULTILIB_DEFAULTS
- +#define MULTILIB_DEFAULTS { "march=ap", "" }
- +#endif
- +
- +/******************************************************************************
- + * Run-time Target Specification
- + *****************************************************************************/
- +#ifndef TARGET_VERSION
- +#define TARGET_VERSION fprintf(stderr, " (AVR32, GNU assembler syntax)");
- +#endif
- +
- +
- +/* Part types. Keep this in sync with the order of avr32_part_types in avr32.c*/
- +enum part_type
- +{
- + PART_TYPE_AVR32_NONE,
- + PART_TYPE_AVR32_AP7000,
- + PART_TYPE_AVR32_AP7001,
- + PART_TYPE_AVR32_AP7002,
- + PART_TYPE_AVR32_AP7200,
- + PART_TYPE_AVR32_UC3A0128,
- + PART_TYPE_AVR32_UC3A0256,
- + PART_TYPE_AVR32_UC3A0512,
- + PART_TYPE_AVR32_UC3A0512ES,
- + PART_TYPE_AVR32_UC3A1128,
- + PART_TYPE_AVR32_UC3A1256,
- + PART_TYPE_AVR32_UC3A1512,
- + PART_TYPE_AVR32_UC3A1512ES,
- + PART_TYPE_AVR32_UC3A3REVD,
- + PART_TYPE_AVR32_UC3A364,
- + PART_TYPE_AVR32_UC3A364S,
- + PART_TYPE_AVR32_UC3A3128,
- + PART_TYPE_AVR32_UC3A3128S,
- + PART_TYPE_AVR32_UC3A3256,
- + PART_TYPE_AVR32_UC3A3256S,
- + PART_TYPE_AVR32_UC3A464,
- + PART_TYPE_AVR32_UC3A464S,
- + PART_TYPE_AVR32_UC3A4128,
- + PART_TYPE_AVR32_UC3A4128S,
- + PART_TYPE_AVR32_UC3A4256,
- + PART_TYPE_AVR32_UC3A4256S,
- + PART_TYPE_AVR32_UC3B064,
- + PART_TYPE_AVR32_UC3B0128,
- + PART_TYPE_AVR32_UC3B0256,
- + PART_TYPE_AVR32_UC3B0256ES,
- + PART_TYPE_AVR32_UC3B0512,
- + PART_TYPE_AVR32_UC3B0512REVC,
- + PART_TYPE_AVR32_UC3B164,
- + PART_TYPE_AVR32_UC3B1128,
- + PART_TYPE_AVR32_UC3B1256,
- + PART_TYPE_AVR32_UC3B1256ES,
- + PART_TYPE_AVR32_UC3B1512,
- + PART_TYPE_AVR32_UC3B1512REVC,
- + PART_TYPE_AVR32_UC64D3,
- + PART_TYPE_AVR32_UC128D3,
- + PART_TYPE_AVR32_UC64D4,
- + PART_TYPE_AVR32_UC128D4,
- + PART_TYPE_AVR32_UC3C0512CREVC,
- + PART_TYPE_AVR32_UC3C1512CREVC,
- + PART_TYPE_AVR32_UC3C2512CREVC,
- + PART_TYPE_AVR32_UC3L0256,
- + PART_TYPE_AVR32_UC3L0128,
- + PART_TYPE_AVR32_UC3L064,
- + PART_TYPE_AVR32_UC3L032,
- + PART_TYPE_AVR32_UC3L016,
- + PART_TYPE_AVR32_UC3L064REVB,
- + PART_TYPE_AVR32_UC64L3U,
- + PART_TYPE_AVR32_UC128L3U,
- + PART_TYPE_AVR32_UC256L3U,
- + PART_TYPE_AVR32_UC64L4U,
- + PART_TYPE_AVR32_UC128L4U,
- + PART_TYPE_AVR32_UC256L4U,
- + PART_TYPE_AVR32_UC3C064C,
- + PART_TYPE_AVR32_UC3C0128C,
- + PART_TYPE_AVR32_UC3C0256C,
- + PART_TYPE_AVR32_UC3C0512C,
- + PART_TYPE_AVR32_UC3C164C,
- + PART_TYPE_AVR32_UC3C1128C,
- + PART_TYPE_AVR32_UC3C1256C,
- + PART_TYPE_AVR32_UC3C1512C,
- + PART_TYPE_AVR32_UC3C264C,
- + PART_TYPE_AVR32_UC3C2128C,
- + PART_TYPE_AVR32_UC3C2256C,
- + PART_TYPE_AVR32_UC3C2512C,
- + PART_TYPE_AVR32_MXT768E
- +};
- +
- +/* Microarchitectures. */
- +enum microarchitecture_type
- +{
- + UARCH_TYPE_AVR32A,
- + UARCH_TYPE_AVR32B,
- + UARCH_TYPE_NONE
- +};
- +
- +/* Architectures types which specifies the pipeline.
- + Keep this in sync with avr32_arch_types in avr32.c
- + and the pipeline attribute in avr32.md */
- +enum architecture_type
- +{
- + ARCH_TYPE_AVR32_AP,
- + ARCH_TYPE_AVR32_UCR1,
- + ARCH_TYPE_AVR32_UCR2,
- + ARCH_TYPE_AVR32_UCR2NOMUL,
- + ARCH_TYPE_AVR32_UCR3,
- + ARCH_TYPE_AVR32_UCR3FP,
- + ARCH_TYPE_AVR32_NONE
- +};
- +
- +/* Flag specifying if the cpu has support for DSP instructions.*/
- +#define FLAG_AVR32_HAS_DSP (1 << 0)
- +/* Flag specifying if the cpu has support for Read-Modify-Write
- + instructions.*/
- +#define FLAG_AVR32_HAS_RMW (1 << 1)
- +/* Flag specifying if the cpu has support for SIMD instructions. */
- +#define FLAG_AVR32_HAS_SIMD (1 << 2)
- +/* Flag specifying if the cpu has support for unaligned memory word access. */
- +#define FLAG_AVR32_HAS_UNALIGNED_WORD (1 << 3)
- +/* Flag specifying if the cpu has support for branch prediction. */
- +#define FLAG_AVR32_HAS_BRANCH_PRED (1 << 4)
- +/* Flag specifying if the cpu has support for a return stack. */
- +#define FLAG_AVR32_HAS_RETURN_STACK (1 << 5)
- +/* Flag specifying if the cpu has caches. */
- +#define FLAG_AVR32_HAS_CACHES (1 << 6)
- +/* Flag specifying if the cpu has support for v2 insns. */
- +#define FLAG_AVR32_HAS_V2_INSNS (1 << 7)
- +/* Flag specifying that the cpu has buggy mul insns. */
- +#define FLAG_AVR32_HAS_NO_MUL_INSNS (1 << 8)
- +/* Flag specifying that the device has FPU instructions according
- + to AVR32002 specifications*/
- +#define FLAG_AVR32_HAS_FPU (1 << 9)
- +
- +/* Structure for holding information about different avr32 CPUs/parts */
- +struct part_type_s
- +{
- + const char *const name;
- + enum part_type part_type;
- + enum architecture_type arch_type;
- + /* Must lie outside user's namespace. NULL == no macro. */
- + const char *const macro;
- +};
- +
- +/* Structure for holding information about different avr32 pipeline
- + architectures. */
- +struct arch_type_s
- +{
- + const char *const name;
- + enum architecture_type arch_type;
- + enum microarchitecture_type uarch_type;
- + const unsigned long feature_flags;
- + /* Must lie outside user's namespace. NULL == no macro. */
- + const char *const macro;
- +};
- +
- +extern const struct part_type_s *avr32_part;
- +extern const struct arch_type_s *avr32_arch;
- +
- +#define TARGET_SIMD (avr32_arch->feature_flags & FLAG_AVR32_HAS_SIMD)
- +#define TARGET_DSP (avr32_arch->feature_flags & FLAG_AVR32_HAS_DSP)
- +#define TARGET_RMW (avr32_arch->feature_flags & FLAG_AVR32_HAS_RMW)
- +#define TARGET_UNALIGNED_WORD (avr32_arch->feature_flags & FLAG_AVR32_HAS_UNALIGNED_WORD)
- +#define TARGET_BRANCH_PRED (avr32_arch->feature_flags & FLAG_AVR32_HAS_BRANCH_PRED)
- +#define TARGET_RETURN_STACK (avr32_arch->feature_flags & FLAG_AVR32_HAS_RETURN_STACK)
- +#define TARGET_V2_INSNS (avr32_arch->feature_flags & FLAG_AVR32_HAS_V2_INSNS)
- +#define TARGET_CACHES (avr32_arch->feature_flags & FLAG_AVR32_HAS_CACHES)
- +#define TARGET_NO_MUL_INSNS (avr32_arch->feature_flags & FLAG_AVR32_HAS_NO_MUL_INSNS)
- +#define TARGET_ARCH_AP (avr32_arch->arch_type == ARCH_TYPE_AVR32_AP)
- +#define TARGET_ARCH_UCR1 (avr32_arch->arch_type == ARCH_TYPE_AVR32_UCR1)
- +#define TARGET_ARCH_UCR2 (avr32_arch->arch_type == ARCH_TYPE_AVR32_UCR2)
- +#define TARGET_ARCH_UC (TARGET_ARCH_UCR1 || TARGET_ARCH_UCR2)
- +#define TARGET_UARCH_AVR32A (avr32_arch->uarch_type == UARCH_TYPE_AVR32A)
- +#define TARGET_UARCH_AVR32B (avr32_arch->uarch_type == UARCH_TYPE_AVR32B)
- +#define TARGET_ARCH_FPU (avr32_arch->feature_flags & FLAG_AVR32_HAS_FPU)
- +
- +#define CAN_DEBUG_WITHOUT_FP
- +
- +
- +
- +
- +/******************************************************************************
- + * Storage Layout
- + *****************************************************************************/
- +
- +/*
- +Define this macro to have the value 1 if the most significant bit in a
- +byte has the lowest number; otherwise define it to have the value zero.
- +This means that bit-field instructions count from the most significant
- +bit. If the machine has no bit-field instructions, then this must still
- +be defined, but it doesn't matter which value it is defined to. This
- +macro need not be a constant.
- +
- +This macro does not affect the way structure fields are packed into
- +bytes or words; that is controlled by BYTES_BIG_ENDIAN.
- +*/
- +#define BITS_BIG_ENDIAN 0
- +
- +/*
- +Define this macro to have the value 1 if the most significant byte in a
- +word has the lowest number. This macro need not be a constant.
- +*/
- +/*
- + Data is stored in an big-endian way.
- +*/
- +#define BYTES_BIG_ENDIAN 1
- +
- +/*
- +Define this macro to have the value 1 if, in a multiword object, the
- +most significant word has the lowest number. This applies to both
- +memory locations and registers; GCC fundamentally assumes that the
- +order of words in memory is the same as the order in registers. This
- +macro need not be a constant.
- +*/
- +/*
- + Data is stored in an bin-endian way.
- +*/
- +#define WORDS_BIG_ENDIAN 1
- +
- +/*
- +Define this macro if WORDS_BIG_ENDIAN is not constant. This must be a
- +constant value with the same meaning as WORDS_BIG_ENDIAN, which will be
- +used only when compiling libgcc2.c. Typically the value will be set
- +based on preprocessor defines.
- +*/
- +#define LIBGCC2_WORDS_BIG_ENDIAN WORDS_BIG_ENDIAN
- +
- +/*
- +Define this macro to have the value 1 if DFmode, XFmode or
- +TFmode floating point numbers are stored in memory with the word
- +containing the sign bit at the lowest address; otherwise define it to
- +have the value 0. This macro need not be a constant.
- +
- +You need not define this macro if the ordering is the same as for
- +multi-word integers.
- +*/
- +/* #define FLOAT_WORDS_BIG_ENDIAN 1 */
- +
- +/*
- +Define this macro to be the number of bits in an addressable storage
- +unit (byte); normally 8.
- +*/
- +#define BITS_PER_UNIT 8
- +
- +/*
- +Number of bits in a word; normally 32.
- +*/
- +#define BITS_PER_WORD 32
- +
- +/*
- +Maximum number of bits in a word. If this is undefined, the default is
- +BITS_PER_WORD. Otherwise, it is the constant value that is the
- +largest value that BITS_PER_WORD can have at run-time.
- +*/
- +/* MAX_BITS_PER_WORD not defined*/
- +
- +/*
- +Number of storage units in a word; normally 4.
- +*/
- +#define UNITS_PER_WORD 4
- +
- +/*
- +Minimum number of units in a word. If this is undefined, the default is
- +UNITS_PER_WORD. Otherwise, it is the constant value that is the
- +smallest value that UNITS_PER_WORD can have at run-time.
- +*/
- +/* MIN_UNITS_PER_WORD not defined */
- +
- +/*
- +Width of a pointer, in bits. You must specify a value no wider than the
- +width of Pmode. If it is not equal to the width of Pmode,
- +you must define POINTERS_EXTEND_UNSIGNED.
- +*/
- +#define POINTER_SIZE 32
- +
- +/*
- +A C expression whose value is greater than zero if pointers that need to be
- +extended from being POINTER_SIZE bits wide to Pmode are to
- +be zero-extended and zero if they are to be sign-extended. If the value
- +is less then zero then there must be an "ptr_extend" instruction that
- +extends a pointer from POINTER_SIZE to Pmode.
- +
- +You need not define this macro if the POINTER_SIZE is equal
- +to the width of Pmode.
- +*/
- +/* #define POINTERS_EXTEND_UNSIGNED */
- +
- +/*
- +A Macro to update M and UNSIGNEDP when an object whose type
- +is TYPE and which has the specified mode and signedness is to be
- +stored in a register. This macro is only called when TYPE is a
- +scalar type.
- +
- +On most RISC machines, which only have operations that operate on a full
- +register, define this macro to set M to word_mode if
- +M is an integer mode narrower than BITS_PER_WORD. In most
- +cases, only integer modes should be widened because wider-precision
- +floating-point operations are usually more expensive than their narrower
- +counterparts.
- +
- +For most machines, the macro definition does not change UNSIGNEDP.
- +However, some machines, have instructions that preferentially handle
- +either signed or unsigned quantities of certain modes. For example, on
- +the DEC Alpha, 32-bit loads from memory and 32-bit add instructions
- +sign-extend the result to 64 bits. On such machines, set
- +UNSIGNEDP according to which kind of extension is more efficient.
- +
- +Do not define this macro if it would never modify M.
- +*/
- +#define PROMOTE_MODE(M, UNSIGNEDP, TYPE) \
- + { \
- + if (!AGGREGATE_TYPE_P (TYPE) \
- + && GET_MODE_CLASS (mode) == MODE_INT \
- + && GET_MODE_SIZE (mode) < 4) \
- + { \
- + if (M == QImode) \
- + (UNSIGNEDP) = 1; \
- + else if (M == HImode) \
- + (UNSIGNEDP) = 0; \
- + (M) = SImode; \
- + } \
- + }
- +
- +#define PROMOTE_FUNCTION_MODE(M, UNSIGNEDP, TYPE) \
- + PROMOTE_MODE(M, UNSIGNEDP, TYPE)
- +
- +/* Define if operations between registers always perform the operation
- + on the full register even if a narrower mode is specified. */
- +#define WORD_REGISTER_OPERATIONS
- +
- +/* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD
- + will either zero-extend or sign-extend. The value of this macro should
- + be the code that says which one of the two operations is implicitly
- + done, UNKNOWN if not known. */
- +#define LOAD_EXTEND_OP(MODE) \
- + (((MODE) == QImode) ? ZERO_EXTEND \
- + : ((MODE) == HImode) ? SIGN_EXTEND : UNKNOWN)
- +
- +
- +/*
- +Normal alignment required for function parameters on the stack, in
- +bits. All stack parameters receive at least this much alignment
- +regardless of data type. On most machines, this is the same as the
- +size of an integer.
- +*/
- +#define PARM_BOUNDARY 32
- +
- +/*
- +Define this macro to the minimum alignment enforced by hardware for the
- +stack pointer on this machine. The definition is a C expression for the
- +desired alignment (measured in bits). This value is used as a default
- +if PREFERRED_STACK_BOUNDARY is not defined. On most machines,
- +this should be the same as PARM_BOUNDARY.
- +*/
- +#define STACK_BOUNDARY 32
- +
- +/*
- +Define this macro if you wish to preserve a certain alignment for the
- +stack pointer, greater than what the hardware enforces. The definition
- +is a C expression for the desired alignment (measured in bits). This
- +macro must evaluate to a value equal to or larger than
- +STACK_BOUNDARY.
- +*/
- +#define PREFERRED_STACK_BOUNDARY (TARGET_FORCE_DOUBLE_ALIGN ? 64 : 32 )
- +
- +/*
- +Alignment required for a function entry point, in bits.
- +*/
- +#define FUNCTION_BOUNDARY 16
- +
- +/*
- +Biggest alignment that any data type can require on this machine, in bits.
- +*/
- +#define BIGGEST_ALIGNMENT (TARGET_FORCE_DOUBLE_ALIGN ? 64 : 32 )
- +
- +/*
- +If defined, the smallest alignment, in bits, that can be given to an
- +object that can be referenced in one operation, without disturbing any
- +nearby object. Normally, this is BITS_PER_UNIT, but may be larger
- +on machines that don't have byte or half-word store operations.
- +*/
- +#define MINIMUM_ATOMIC_ALIGNMENT BITS_PER_UNIT
- +
- +
- +/*
- +An integer expression for the size in bits of the largest integer machine mode that
- +should actually be used. All integer machine modes of this size or smaller can be
- +used for structures and unions with the appropriate sizes. If this macro is undefined,
- +GET_MODE_BITSIZE (DImode) is assumed.*/
- +#define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (DImode)
- +
- +
- +/*
- +If defined, a C expression to compute the alignment given to a constant
- +that is being placed in memory. CONSTANT is the constant and
- +BASIC_ALIGN is the alignment that the object would ordinarily
- +have. The value of this macro is used instead of that alignment to
- +align the object.
- +
- +If this macro is not defined, then BASIC_ALIGN is used.
- +
- +The typical use of this macro is to increase alignment for string
- +constants to be word aligned so that strcpy calls that copy
- +constants can be done inline.
- +*/
- +#define CONSTANT_ALIGNMENT(CONSTANT, BASIC_ALIGN) \
- + ((TREE_CODE(CONSTANT) == STRING_CST) ? BITS_PER_WORD : BASIC_ALIGN)
- +
- +/* Try to align string to a word. */
- +#define DATA_ALIGNMENT(TYPE, ALIGN) \
- + ({(TREE_CODE (TYPE) == ARRAY_TYPE \
- + && TYPE_MODE (TREE_TYPE (TYPE)) == QImode \
- + && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN));})
- +
- +/* Try to align local store strings to a word. */
- +#define LOCAL_ALIGNMENT(TYPE, ALIGN) \
- + ({(TREE_CODE (TYPE) == ARRAY_TYPE \
- + && TYPE_MODE (TREE_TYPE (TYPE)) == QImode \
- + && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN));})
- +
- +/*
- +Define this macro to be the value 1 if instructions will fail to work
- +if given data not on the nominal alignment. If instructions will merely
- +go slower in that case, define this macro as 0.
- +*/
- +#define STRICT_ALIGNMENT 1
- +
- +/*
- +Define this if you wish to imitate the way many other C compilers handle
- +alignment of bit-fields and the structures that contain them.
- +
- +The behavior is that the type written for a bit-field (int,
- +short, or other integer type) imposes an alignment for the
- +entire structure, as if the structure really did contain an ordinary
- +field of that type. In addition, the bit-field is placed within the
- +structure so that it would fit within such a field, not crossing a
- +boundary for it.
- +
- +Thus, on most machines, a bit-field whose type is written as int
- +would not cross a four-byte boundary, and would force four-byte
- +alignment for the whole structure. (The alignment used may not be four
- +bytes; it is controlled by the other alignment parameters.)
- +
- +If the macro is defined, its definition should be a C expression;
- +a nonzero value for the expression enables this behavior.
- +
- +Note that if this macro is not defined, or its value is zero, some
- +bit-fields may cross more than one alignment boundary. The compiler can
- +support such references if there are insv, extv, and
- +extzv insns that can directly reference memory.
- +
- +The other known way of making bit-fields work is to define
- +STRUCTURE_SIZE_BOUNDARY as large as BIGGEST_ALIGNMENT.
- +Then every structure can be accessed with fullwords.
- +
- +Unless the machine has bit-field instructions or you define
- +STRUCTURE_SIZE_BOUNDARY that way, you must define
- +PCC_BITFIELD_TYPE_MATTERS to have a nonzero value.
- +
- +If your aim is to make GCC use the same conventions for laying out
- +bit-fields as are used by another compiler, here is how to investigate
- +what the other compiler does. Compile and run this program:
- +
- +struct foo1
- +{
- + char x;
- + char :0;
- + char y;
- +};
- +
- +struct foo2
- +{
- + char x;
- + int :0;
- + char y;
- +};
- +
- +main ()
- +{
- + printf ("Size of foo1 is %d\n",
- + sizeof (struct foo1));
- + printf ("Size of foo2 is %d\n",
- + sizeof (struct foo2));
- + exit (0);
- +}
- +
- +If this prints 2 and 5, then the compiler's behavior is what you would
- +get from PCC_BITFIELD_TYPE_MATTERS.
- +*/
- +#define PCC_BITFIELD_TYPE_MATTERS 1
- +
- +
- +/******************************************************************************
- + * Layout of Source Language Data Types
- + *****************************************************************************/
- +
- +/*
- +A C expression for the size in bits of the type int on the
- +target machine. If you don't define this, the default is one word.
- +*/
- +#define INT_TYPE_SIZE 32
- +
- +/*
- +A C expression for the size in bits of the type short on the
- +target machine. If you don't define this, the default is half a word. (If
- +this would be less than one storage unit, it is rounded up to one unit.)
- +*/
- +#define SHORT_TYPE_SIZE 16
- +
- +/*
- +A C expression for the size in bits of the type long on the
- +target machine. If you don't define this, the default is one word.
- +*/
- +#define LONG_TYPE_SIZE 32
- +
- +
- +/*
- +A C expression for the size in bits of the type long long on the
- +target machine. If you don't define this, the default is two
- +words. If you want to support GNU Ada on your machine, the value of this
- +macro must be at least 64.
- +*/
- +#define LONG_LONG_TYPE_SIZE 64
- +
- +/*
- +A C expression for the size in bits of the type char on the
- +target machine. If you don't define this, the default is
- +BITS_PER_UNIT.
- +*/
- +#define CHAR_TYPE_SIZE 8
- +
- +
- +/*
- +A C expression for the size in bits of the C++ type bool and
- +C99 type _Bool on the target machine. If you don't define
- +this, and you probably shouldn't, the default is CHAR_TYPE_SIZE.
- +*/
- +#define BOOL_TYPE_SIZE 8
- +
- +
- +/*
- +An expression whose value is 1 or 0, according to whether the type
- +char should be signed or unsigned by default. The user can
- +always override this default with the options -fsigned-char
- +and -funsigned-char.
- +*/
- +/* We are using unsigned char */
- +#define DEFAULT_SIGNED_CHAR 0
- +
- +
- +/*
- +A C expression for a string describing the name of the data type to use
- +for size values. The typedef name size_t is defined using the
- +contents of the string.
- +
- +The string can contain more than one keyword. If so, separate them with
- +spaces, and write first any length keyword, then unsigned if
- +appropriate, and finally int. The string must exactly match one
- +of the data type names defined in the function
- +init_decl_processing in the file c-decl.c. You may not
- +omit int or change the order - that would cause the compiler to
- +crash on startup.
- +
- +If you don't define this macro, the default is "long unsigned int".
- +*/
- +#define SIZE_TYPE "long unsigned int"
- +
- +/*
- +A C expression for a string describing the name of the data type to use
- +for the result of subtracting two pointers. The typedef name
- +ptrdiff_t is defined using the contents of the string. See
- +SIZE_TYPE above for more information.
- +
- +If you don't define this macro, the default is "long int".
- +*/
- +#define PTRDIFF_TYPE "long int"
- +
- +
- +/*
- +A C expression for the size in bits of the data type for wide
- +characters. This is used in cpp, which cannot make use of
- +WCHAR_TYPE.
- +*/
- +#define WCHAR_TYPE_SIZE 32
- +
- +
- +/*
- +A C expression for a string describing the name of the data type to
- +use for wide characters passed to printf and returned from
- +getwc. The typedef name wint_t is defined using the
- +contents of the string. See SIZE_TYPE above for more
- +information.
- +
- +If you don't define this macro, the default is "unsigned int".
- +*/
- +#define WINT_TYPE "unsigned int"
- +
- +/*
- +A C expression for a string describing the name of the data type that
- +can represent any value of any standard or extended signed integer type.
- +The typedef name intmax_t is defined using the contents of the
- +string. See SIZE_TYPE above for more information.
- +
- +If you don't define this macro, the default is the first of
- +"int", "long int", or "long long int" that has as
- +much precision as long long int.
- +*/
- +#define INTMAX_TYPE "long long int"
- +
- +/*
- +A C expression for a string describing the name of the data type that
- +can represent any value of any standard or extended unsigned integer
- +type. The typedef name uintmax_t is defined using the contents
- +of the string. See SIZE_TYPE above for more information.
- +
- +If you don't define this macro, the default is the first of
- +"unsigned int", "long unsigned int", or "long long unsigned int"
- +that has as much precision as long long unsigned int.
- +*/
- +#define UINTMAX_TYPE "long long unsigned int"
- +
- +
- +/******************************************************************************
- + * Register Usage
- + *****************************************************************************/
- +
- +/* Convert from gcc internal register number to register number
- + used in assembly code */
- +#define ASM_REGNUM(reg) (LAST_REGNUM - (reg))
- +
- +/* Convert between register number used in assembly to gcc
- + internal register number */
- +#define INTERNAL_REGNUM(reg) (LAST_REGNUM - (reg))
- +
- +/** Basic Characteristics of Registers **/
- +
- +/*
- +Number of hardware registers known to the compiler. They receive
- +numbers 0 through FIRST_PSEUDO_REGISTER-1; thus, the first
- +pseudo register's number really is assigned the number
- +FIRST_PSEUDO_REGISTER.
- +*/
- +#define FIRST_PSEUDO_REGISTER (LAST_REGNUM + 1)
- +
- +#define FIRST_REGNUM 0
- +#define LAST_REGNUM 15
- +
- +/*
- +An initializer that says which registers are used for fixed purposes
- +all throughout the compiled code and are therefore not available for
- +general allocation. These would include the stack pointer, the frame
- +pointer (except on machines where that can be used as a general
- +register when no frame pointer is needed), the program counter on
- +machines where that is considered one of the addressable registers,
- +and any other numbered register with a standard use.
- +
- +This information is expressed as a sequence of numbers, separated by
- +commas and surrounded by braces. The nth number is 1 if
- +register n is fixed, 0 otherwise.
- +
- +The table initialized from this macro, and the table initialized by
- +the following one, may be overridden at run time either automatically,
- +by the actions of the macro CONDITIONAL_REGISTER_USAGE, or by
- +the user with the command options -ffixed-[reg],
- +-fcall-used-[reg] and -fcall-saved-[reg].
- +*/
- +
- +/* The internal gcc register numbers are reversed
- + compared to the real register numbers since
- + gcc expects data types stored over multiple
- + registers in the register file to be big endian
- + if the memory layout is big endian. But this
- + is not the case for avr32 so we fake a big
- + endian register file. */
- +
- +#define FIXED_REGISTERS { \
- + 1, /* Program Counter */ \
- + 0, /* Link Register */ \
- + 1, /* Stack Pointer */ \
- + 0, /* r12 */ \
- + 0, /* r11 */ \
- + 0, /* r10 */ \
- + 0, /* r9 */ \
- + 0, /* r8 */ \
- + 0, /* r7 */ \
- + 0, /* r6 */ \
- + 0, /* r5 */ \
- + 0, /* r4 */ \
- + 0, /* r3 */ \
- + 0, /* r2 */ \
- + 0, /* r1 */ \
- + 0, /* r0 */ \
- +}
- +
- +/*
- +Like FIXED_REGISTERS but has 1 for each register that is
- +clobbered (in general) by function calls as well as for fixed
- +registers. This macro therefore identifies the registers that are not
- +available for general allocation of values that must live across
- +function calls.
- +
- +If a register has 0 in CALL_USED_REGISTERS, the compiler
- +automatically saves it on function entry and restores it on function
- +exit, if the register is used within the function.
- +*/
- +#define CALL_USED_REGISTERS { \
- + 1, /* Program Counter */ \
- + 0, /* Link Register */ \
- + 1, /* Stack Pointer */ \
- + 1, /* r12 */ \
- + 1, /* r11 */ \
- + 1, /* r10 */ \
- + 1, /* r9 */ \
- + 1, /* r8 */ \
- + 0, /* r7 */ \
- + 0, /* r6 */ \
- + 0, /* r5 */ \
- + 0, /* r4 */ \
- + 0, /* r3 */ \
- + 0, /* r2 */ \
- + 0, /* r1 */ \
- + 0, /* r0 */ \
- +}
- +
- +/* Interrupt functions can only use registers that have already been
- + saved by the prologue, even if they would normally be
- + call-clobbered. */
- +#define HARD_REGNO_RENAME_OK(SRC, DST) \
- + (! IS_INTERRUPT (cfun->machine->func_type) || \
- + df_regs_ever_live_p (DST))
- +
- +
- +/*
- +Zero or more C statements that may conditionally modify five variables
- +fixed_regs, call_used_regs, global_regs,
- +reg_names, and reg_class_contents, to take into account
- +any dependence of these register sets on target flags. The first three
- +of these are of type char [] (interpreted as Boolean vectors).
- +global_regs is a const char *[], and
- +reg_class_contents is a HARD_REG_SET. Before the macro is
- +called, fixed_regs, call_used_regs,
- +reg_class_contents, and reg_names have been initialized
- +from FIXED_REGISTERS, CALL_USED_REGISTERS,
- +REG_CLASS_CONTENTS, and REGISTER_NAMES, respectively.
- +global_regs has been cleared, and any -ffixed-[reg],
- +-fcall-used-[reg] and -fcall-saved-[reg]
- +command options have been applied.
- +
- +You need not define this macro if it has no work to do.
- +
- +If the usage of an entire class of registers depends on the target
- +flags, you may indicate this to GCC by using this macro to modify
- +fixed_regs and call_used_regs to 1 for each of the
- +registers in the classes which should not be used by GCC. Also define
- +the macro REG_CLASS_FROM_LETTER to return NO_REGS if it
- +is called with a letter for a class that shouldn't be used.
- +
- + (However, if this class is not included in GENERAL_REGS and all
- +of the insn patterns whose constraints permit this class are
- +controlled by target switches, then GCC will automatically avoid using
- +these registers when the target switches are opposed to them.)
- +*/
- +#define CONDITIONAL_REGISTER_USAGE \
- + do \
- + { \
- + if (flag_pic) \
- + { \
- + fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
- + call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
- + } \
- + } \
- + while (0)
- +
- +
- +/*
- +If the program counter has a register number, define this as that
- +register number. Otherwise, do not define it.
- +*/
- +
- +#define LAST_AVR32_REGNUM 16
- +
- +
- +/** Order of Allocation of Registers **/
- +
- +/*
- +If defined, an initializer for a vector of integers, containing the
- +numbers of hard registers in the order in which GCC should prefer
- +to use them (from most preferred to least).
- +
- +If this macro is not defined, registers are used lowest numbered first
- +(all else being equal).
- +
- +One use of this macro is on machines where the highest numbered
- +registers must always be saved and the save-multiple-registers
- +instruction supports only sequences of consecutive registers. On such
- +machines, define REG_ALLOC_ORDER to be an initializer that lists
- +the highest numbered allocable register first.
- +*/
- +#define REG_ALLOC_ORDER \
- +{ \
- + INTERNAL_REGNUM(8), \
- + INTERNAL_REGNUM(9), \
- + INTERNAL_REGNUM(10), \
- + INTERNAL_REGNUM(11), \
- + INTERNAL_REGNUM(12), \
- + LR_REGNUM, \
- + INTERNAL_REGNUM(7), \
- + INTERNAL_REGNUM(6), \
- + INTERNAL_REGNUM(5), \
- + INTERNAL_REGNUM(4), \
- + INTERNAL_REGNUM(3), \
- + INTERNAL_REGNUM(2), \
- + INTERNAL_REGNUM(1), \
- + INTERNAL_REGNUM(0), \
- + SP_REGNUM, \
- + PC_REGNUM \
- +}
- +
- +
- +/** How Values Fit in Registers **/
- +
- +/*
- +A C expression for the number of consecutive hard registers, starting
- +at register number REGNO, required to hold a value of mode
- +MODE.
- +
- +On a machine where all registers are exactly one word, a suitable
- +definition of this macro is
- +
- +#define HARD_REGNO_NREGS(REGNO, MODE) \
- + ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) \
- + / UNITS_PER_WORD)
- +*/
- +#define HARD_REGNO_NREGS(REGNO, MODE) \
- + ((unsigned int)((GET_MODE_SIZE(MODE) + UNITS_PER_WORD -1 ) / UNITS_PER_WORD))
- +
- +/*
- +A C expression that is nonzero if it is permissible to store a value
- +of mode MODE in hard register number REGNO (or in several
- +registers starting with that one). For a machine where all registers
- +are equivalent, a suitable definition is
- +
- + #define HARD_REGNO_MODE_OK(REGNO, MODE) 1
- +
- +You need not include code to check for the numbers of fixed registers,
- +because the allocation mechanism considers them to be always occupied.
- +
- +On some machines, double-precision values must be kept in even/odd
- +register pairs. You can implement that by defining this macro to reject
- +odd register numbers for such modes.
- +
- +The minimum requirement for a mode to be OK in a register is that the
- +mov[mode] instruction pattern support moves between the
- +register and other hard register in the same class and that moving a
- +value into the register and back out not alter it.
- +
- +Since the same instruction used to move word_mode will work for
- +all narrower integer modes, it is not necessary on any machine for
- +HARD_REGNO_MODE_OK to distinguish between these modes, provided
- +you define patterns movhi, etc., to take advantage of this. This
- +is useful because of the interaction between HARD_REGNO_MODE_OK
- +and MODES_TIEABLE_P; it is very desirable for all integer modes
- +to be tieable.
- +
- +Many machines have special registers for floating point arithmetic.
- +Often people assume that floating point machine modes are allowed only
- +in floating point registers. This is not true. Any registers that
- +can hold integers can safely hold a floating point machine
- +mode, whether or not floating arithmetic can be done on it in those
- +registers. Integer move instructions can be used to move the values.
- +
- +On some machines, though, the converse is true: fixed-point machine
- +modes may not go in floating registers. This is true if the floating
- +registers normalize any value stored in them, because storing a
- +non-floating value there would garble it. In this case,
- +HARD_REGNO_MODE_OK should reject fixed-point machine modes in
- +floating registers. But if the floating registers do not automatically
- +normalize, if you can store any bit pattern in one and retrieve it
- +unchanged without a trap, then any machine mode may go in a floating
- +register, so you can define this macro to say so.
- +
- +The primary significance of special floating registers is rather that
- +they are the registers acceptable in floating point arithmetic
- +instructions. However, this is of no concern to
- +HARD_REGNO_MODE_OK. You handle it by writing the proper
- +constraints for those instructions.
- +
- +On some machines, the floating registers are especially slow to access,
- +so that it is better to store a value in a stack frame than in such a
- +register if floating point arithmetic is not being done. As long as the
- +floating registers are not in class GENERAL_REGS, they will not
- +be used unless some pattern's constraint asks for one.
- +*/
- +#define HARD_REGNO_MODE_OK(REGNO, MODE) avr32_hard_regno_mode_ok(REGNO, MODE)
- +
- +/*
- +A C expression that is nonzero if a value of mode
- +MODE1 is accessible in mode MODE2 without copying.
- +
- +If HARD_REGNO_MODE_OK(R, MODE1) and
- +HARD_REGNO_MODE_OK(R, MODE2) are always the same for
- +any R, then MODES_TIEABLE_P(MODE1, MODE2)
- +should be nonzero. If they differ for any R, you should define
- +this macro to return zero unless some other mechanism ensures the
- +accessibility of the value in a narrower mode.
- +
- +You should define this macro to return nonzero in as many cases as
- +possible since doing so will allow GCC to perform better register
- +allocation.
- +*/
- +#define MODES_TIEABLE_P(MODE1, MODE2) \
- + (GET_MODE_CLASS (MODE1) == GET_MODE_CLASS (MODE2))
- +
- +
- +
- +/******************************************************************************
- + * Register Classes
- + *****************************************************************************/
- +
- +/*
- +An enumeral type that must be defined with all the register class names
- +as enumeral values. NO_REGS must be first. ALL_REGS
- +must be the last register class, followed by one more enumeral value,
- +LIM_REG_CLASSES, which is not a register class but rather
- +tells how many classes there are.
- +
- +Each register class has a number, which is the value of casting
- +the class name to type int. The number serves as an index
- +in many of the tables described below.
- +*/
- +enum reg_class
- +{
- + NO_REGS,
- + GENERAL_REGS,
- + ALL_REGS,
- + LIM_REG_CLASSES
- +};
- +
- +/*
- +The number of distinct register classes, defined as follows:
- + #define N_REG_CLASSES (int) LIM_REG_CLASSES
- +*/
- +#define N_REG_CLASSES (int)LIM_REG_CLASSES
- +
- +/*
- +An initializer containing the names of the register classes as C string
- +constants. These names are used in writing some of the debugging dumps.
- +*/
- +#define REG_CLASS_NAMES \
- +{ \
- + "NO_REGS", \
- + "GENERAL_REGS", \
- + "ALL_REGS" \
- +}
- +
- +/*
- +An initializer containing the contents of the register classes, as integers
- +which are bit masks. The nth integer specifies the contents of class
- +n. The way the integer mask is interpreted is that
- +register r is in the class if mask & (1 << r) is 1.
- +
- +When the machine has more than 32 registers, an integer does not suffice.
- +Then the integers are replaced by sub-initializers, braced groupings containing
- +several integers. Each sub-initializer must be suitable as an initializer
- +for the type HARD_REG_SET which is defined in hard-reg-set.h.
- +In this situation, the first integer in each sub-initializer corresponds to
- +registers 0 through 31, the second integer to registers 32 through 63, and
- +so on.
- +*/
- +#define REG_CLASS_CONTENTS { \
- + {0x00000000}, /* NO_REGS */ \
- + {0x0000FFFF}, /* GENERAL_REGS */ \
- + {0x7FFFFFFF}, /* ALL_REGS */ \
- +}
- +
- +
- +/*
- +A C expression whose value is a register class containing hard register
- +REGNO. In general there is more than one such class; choose a class
- +which is minimal, meaning that no smaller class also contains the
- +register.
- +*/
- +#define REGNO_REG_CLASS(REGNO) (GENERAL_REGS)
- +
- +/*
- +A macro whose definition is the name of the class to which a valid
- +base register must belong. A base register is one used in an address
- +which is the register value plus a displacement.
- +*/
- +#define BASE_REG_CLASS GENERAL_REGS
- +
- +/*
- +This is a variation of the BASE_REG_CLASS macro which allows
- +the selection of a base register in a mode depenedent manner. If
- +mode is VOIDmode then it should return the same value as
- +BASE_REG_CLASS.
- +*/
- +#define MODE_BASE_REG_CLASS(MODE) BASE_REG_CLASS
- +
- +/*
- +A macro whose definition is the name of the class to which a valid
- +index register must belong. An index register is one used in an
- +address where its value is either multiplied by a scale factor or
- +added to another register (as well as added to a displacement).
- +*/
- +#define INDEX_REG_CLASS BASE_REG_CLASS
- +
- +/*
- +A C expression which defines the machine-dependent operand constraint
- +letters for register classes. If CHAR is such a letter, the
- +value should be the register class corresponding to it. Otherwise,
- +the value should be NO_REGS. The register letter r,
- +corresponding to class GENERAL_REGS, will not be passed
- +to this macro; you do not need to handle it.
- +*/
- +#define REG_CLASS_FROM_LETTER(CHAR) NO_REGS
- +
- +/* These assume that REGNO is a hard or pseudo reg number.
- + They give nonzero only if REGNO is a hard reg of the suitable class
- + or a pseudo reg currently allocated to a suitable hard reg.
- + Since they use reg_renumber, they are safe only once reg_renumber
- + has been allocated, which happens in local-alloc.c. */
- +#define TEST_REGNO(R, TEST, VALUE) \
- + ((R TEST VALUE) || ((unsigned) reg_renumber[R] TEST VALUE))
- +
- +/*
- +A C expression which is nonzero if register number num is suitable for use as a base
- +register in operand addresses. It may be either a suitable hard register or a pseudo
- +register that has been allocated such a hard register.
- +*/
- +#define REGNO_OK_FOR_BASE_P(NUM) TEST_REGNO(NUM, <=, LAST_REGNUM)
- +
- +/* The following macro defines cover classes for Integrated Register
- + Allocator. Cover classes is a set of non-intersected register
- + classes covering all hard registers used for register allocation
- + purpose. Any move between two registers of a cover class should be
- + cheaper than load or store of the registers. The macro value is
- + array of register classes with LIM_REG_CLASSES used as the end
- + marker. */
- +
- +#define IRA_COVER_CLASSES \
- +{ \
- + GENERAL_REGS, LIM_REG_CLASSES \
- +}
- +
- +/*
- +A C expression which is nonzero if register number NUM is
- +suitable for use as an index register in operand addresses. It may be
- +either a suitable hard register or a pseudo register that has been
- +allocated such a hard register.
- +
- +The difference between an index register and a base register is that
- +the index register may be scaled. If an address involves the sum of
- +two registers, neither one of them scaled, then either one may be
- +labeled the ``base'' and the other the ``index''; but whichever
- +labeling is used must fit the machine's constraints of which registers
- +may serve in each capacity. The compiler will try both labelings,
- +looking for one that is valid, and will reload one or both registers
- +only if neither labeling works.
- +*/
- +#define REGNO_OK_FOR_INDEX_P(NUM) TEST_REGNO(NUM, <=, LAST_REGNUM)
- +
- +/*
- +A C expression that places additional restrictions on the register class
- +to use when it is necessary to copy value X into a register in class
- +CLASS. The value is a register class; perhaps CLASS, or perhaps
- +another, smaller class. On many machines, the following definition is
- +safe: #define PREFERRED_RELOAD_CLASS(X,CLASS) CLASS
- +
- +Sometimes returning a more restrictive class makes better code. For
- +example, on the 68000, when X is an integer constant that is in range
- +for a 'moveq' instruction, the value of this macro is always
- +DATA_REGS as long as CLASS includes the data registers.
- +Requiring a data register guarantees that a 'moveq' will be used.
- +
- +If X is a const_double, by returning NO_REGS
- +you can force X into a memory constant. This is useful on
- +certain machines where immediate floating values cannot be loaded into
- +certain kinds of registers.
- +*/
- +#define PREFERRED_RELOAD_CLASS(X, CLASS) CLASS
- +
- +
- +
- +/*
- +A C expression for the maximum number of consecutive registers
- +of class CLASS needed to hold a value of mode MODE.
- +
- +This is closely related to the macro HARD_REGNO_NREGS. In fact,
- +the value of the macro CLASS_MAX_NREGS(CLASS, MODE)
- +should be the maximum value of HARD_REGNO_NREGS(REGNO, MODE)
- +for all REGNO values in the class CLASS.
- +
- +This macro helps control the handling of multiple-word values
- +in the reload pass.
- +*/
- +#define CLASS_MAX_NREGS(CLASS, MODE) /* ToDo:fixme */ \
- + (unsigned int)((GET_MODE_SIZE(MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
- +
- +
- +/*
- + Using CONST_OK_FOR_CONSTRAINT_P instead of CONS_OK_FOR_LETTER_P
- + in order to support constraints with more than one letter.
- + Only two letters are then used for constant constraints,
- + the letter 'K' and the letter 'I'. The constraint starting with
- + these letters must consist of four characters. The character following
- + 'K' or 'I' must be either 'u' (unsigned) or 's' (signed) to specify
- + if the constant is zero or sign extended. The last two characters specify
- + the length in bits of the constant. The base constraint letter 'I' means
- + that this is an negated constant, meaning that actually -VAL should be
- + checked to lie withing the valid range instead of VAL which is used when
- + 'K' is the base constraint letter.
- +
- +*/
- +
- +#define CONSTRAINT_LEN(C, STR) \
- + ( ((C) == 'K' || (C) == 'I') ? 4 : \
- + ((C) == 'R') ? 5 : \
- + ((C) == 'P') ? -1 : \
- + DEFAULT_CONSTRAINT_LEN((C), (STR)) )
- +
- +#define CONST_OK_FOR_CONSTRAINT_P(VALUE, C, STR) \
- + avr32_const_ok_for_constraint_p(VALUE, C, STR)
- +
- +/*
- +A C expression that defines the machine-dependent operand constraint
- +letters that specify particular ranges of const_double values ('G' or 'H').
- +
- +If C is one of those letters, the expression should check that
- +VALUE, an RTX of code const_double, is in the appropriate
- +range and return 1 if so, 0 otherwise. If C is not one of those
- +letters, the value should be 0 regardless of VALUE.
- +
- +const_double is used for all floating-point constants and for
- +DImode fixed-point constants. A given letter can accept either
- +or both kinds of values. It can use GET_MODE to distinguish
- +between these kinds.
- +*/
- +#define CONST_DOUBLE_OK_FOR_LETTER_P(OP, C) \
- + ((C) == 'G' ? avr32_const_double_immediate(OP) : 0)
- +
- +/*
- +A C expression that defines the optional machine-dependent constraint
- +letters that can be used to segregate specific types of operands, usually
- +memory references, for the target machine. Any letter that is not
- +elsewhere defined and not matched by REG_CLASS_FROM_LETTER
- +may be used. Normally this macro will not be defined.
- +
- +If it is required for a particular target machine, it should return 1
- +if VALUE corresponds to the operand type represented by the
- +constraint letter C. If C is not defined as an extra
- +constraint, the value returned should be 0 regardless of VALUE.
- +
- +For example, on the ROMP, load instructions cannot have their output
- +in r0 if the memory reference contains a symbolic address. Constraint
- +letter 'Q' is defined as representing a memory address that does
- +not contain a symbolic address. An alternative is specified with
- +a 'Q' constraint on the input and 'r' on the output. The next
- +alternative specifies 'm' on the input and a register class that
- +does not include r0 on the output.
- +*/
- +#define EXTRA_CONSTRAINT_STR(OP, C, STR) \
- + ((C) == 'W' ? avr32_address_operand(OP, GET_MODE(OP)) : \
- + (C) == 'R' ? (avr32_indirect_register_operand(OP, GET_MODE(OP)) || \
- + (avr32_imm_disp_memory_operand(OP, GET_MODE(OP)) \
- + && avr32_const_ok_for_constraint_p( \
- + INTVAL(XEXP(XEXP(OP, 0), 1)), \
- + (STR)[1], &(STR)[1]))) : \
- + (C) == 'S' ? avr32_indexed_memory_operand(OP, GET_MODE(OP)) : \
- + (C) == 'T' ? avr32_const_pool_ref_operand(OP, GET_MODE(OP)) : \
- + (C) == 'U' ? SYMBOL_REF_RCALL_FUNCTION_P(OP) : \
- + (C) == 'Z' ? avr32_cop_memory_operand(OP, GET_MODE(OP)) : \
- + (C) == 'Q' ? avr32_non_rmw_memory_operand(OP, GET_MODE(OP)) : \
- + (C) == 'Y' ? avr32_rmw_memory_operand(OP, GET_MODE(OP)) : \
- + 0)
- +
- +
- +#define EXTRA_MEMORY_CONSTRAINT(C, STR) ( ((C) == 'R') || \
- + ((C) == 'Q') || \
- + ((C) == 'S') || \
- + ((C) == 'Y') || \
- + ((C) == 'Z') )
- +
- +
- +/* Returns nonzero if op is a function SYMBOL_REF which
- + can be called using an rcall instruction */
- +#define SYMBOL_REF_RCALL_FUNCTION_P(op) \
- + ( GET_CODE(op) == SYMBOL_REF \
- + && SYMBOL_REF_FUNCTION_P(op) \
- + && SYMBOL_REF_LOCAL_P(op) \
- + && !SYMBOL_REF_EXTERNAL_P(op) \
- + && !TARGET_HAS_ASM_ADDR_PSEUDOS )
- +
- +/******************************************************************************
- + * Stack Layout and Calling Conventions
- + *****************************************************************************/
- +
- +/** Basic Stack Layout **/
- +
- +/*
- +Define this macro if pushing a word onto the stack moves the stack
- +pointer to a smaller address.
- +
- +When we say, ``define this macro if ...,'' it means that the
- +compiler checks this macro only with #ifdef so the precise
- +definition used does not matter.
- +*/
- +/* pushm decrece SP: *(--SP) <-- Rx */
- +#define STACK_GROWS_DOWNWARD
- +
- +/*
- +This macro defines the operation used when something is pushed
- +on the stack. In RTL, a push operation will be
- +(set (mem (STACK_PUSH_CODE (reg sp))) ...)
- +
- +The choices are PRE_DEC, POST_DEC, PRE_INC,
- +and POST_INC. Which of these is correct depends on
- +the stack direction and on whether the stack pointer points
- +to the last item on the stack or whether it points to the
- +space for the next item on the stack.
- +
- +The default is PRE_DEC when STACK_GROWS_DOWNWARD is
- +defined, which is almost always right, and PRE_INC otherwise,
- +which is often wrong.
- +*/
- +/* pushm: *(--SP) <-- Rx */
- +#define STACK_PUSH_CODE PRE_DEC
- +
- +/* Define this to nonzero if the nominal address of the stack frame
- + is at the high-address end of the local variables;
- + that is, each additional local variable allocated
- + goes at a more negative offset in the frame. */
- +#define FRAME_GROWS_DOWNWARD 1
- +
- +
- +/*
- +Offset from the frame pointer to the first local variable slot to be allocated.
- +
- +If FRAME_GROWS_DOWNWARD, find the next slot's offset by
- +subtracting the first slot's length from STARTING_FRAME_OFFSET.
- +Otherwise, it is found by adding the length of the first slot to the
- +value STARTING_FRAME_OFFSET.
- + (i'm not sure if the above is still correct.. had to change it to get
- + rid of an overfull. --mew 2feb93 )
- +*/
- +#define STARTING_FRAME_OFFSET 0
- +
- +/*
- +Offset from the stack pointer register to the first location at which
- +outgoing arguments are placed. If not specified, the default value of
- +zero is used. This is the proper value for most machines.
- +
- +If ARGS_GROW_DOWNWARD, this is the offset to the location above
- +the first location at which outgoing arguments are placed.
- +*/
- +#define STACK_POINTER_OFFSET 0
- +
- +/*
- +Offset from the argument pointer register to the first argument's
- +address. On some machines it may depend on the data type of the
- +function.
- +
- +If ARGS_GROW_DOWNWARD, this is the offset to the location above
- +the first argument's address.
- +*/
- +#define FIRST_PARM_OFFSET(FUNDECL) 0
- +
- +
- +/*
- +A C expression whose value is RTL representing the address in a stack
- +frame where the pointer to the caller's frame is stored. Assume that
- +FRAMEADDR is an RTL expression for the address of the stack frame
- +itself.
- +
- +If you don't define this macro, the default is to return the value
- +of FRAMEADDR - that is, the stack frame address is also the
- +address of the stack word that points to the previous frame.
- +*/
- +#define DYNAMIC_CHAIN_ADDRESS(FRAMEADDR) plus_constant ((FRAMEADDR), 4)
- +
- +
- +/*
- +A C expression whose value is RTL representing the value of the return
- +address for the frame COUNT steps up from the current frame, after
- +the prologue. FRAMEADDR is the frame pointer of the COUNT
- +frame, or the frame pointer of the COUNT - 1 frame if
- +RETURN_ADDR_IN_PREVIOUS_FRAME is defined.
- +
- +The value of the expression must always be the correct address when
- +COUNT is zero, but may be NULL_RTX if there is not way to
- +determine the return address of other frames.
- +*/
- +#define RETURN_ADDR_RTX(COUNT, FRAMEADDR) avr32_return_addr(COUNT, FRAMEADDR)
- +
- +
- +/*
- +A C expression whose value is RTL representing the location of the
- +incoming return address at the beginning of any function, before the
- +prologue. This RTL is either a REG, indicating that the return
- +value is saved in 'REG', or a MEM representing a location in
- +the stack.
- +
- +You only need to define this macro if you want to support call frame
- +debugging information like that provided by DWARF 2.
- +
- +If this RTL is a REG, you should also define
- +DWARF_FRAME_RETURN_COLUMN to DWARF_FRAME_REGNUM (REGNO).
- +*/
- +#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, LR_REGNUM)
- +
- +/*
- +A C expression whose value is an integer giving the offset, in bytes,
- +from the value of the stack pointer register to the top of the stack
- +frame at the beginning of any function, before the prologue. The top of
- +the frame is defined to be the value of the stack pointer in the
- +previous frame, just before the call instruction.
- +
- +You only need to define this macro if you want to support call frame
- +debugging information like that provided by DWARF 2.
- +*/
- +#define INCOMING_FRAME_SP_OFFSET 0
- +
- +
- +/** Exception Handling Support **/
- +
- +/* Use setjump/longjump for exception handling. */
- +#define DWARF2_UNWIND_INFO 0
- +#define MUST_USE_SJLJ_EXCEPTIONS 1
- +
- +/*
- +A C expression whose value is the Nth register number used for
- +data by exception handlers, or INVALID_REGNUM if fewer than
- +N registers are usable.
- +
- +The exception handling library routines communicate with the exception
- +handlers via a set of agreed upon registers. Ideally these registers
- +should be call-clobbered; it is possible to use call-saved registers,
- +but may negatively impact code size. The target must support at least
- +2 data registers, but should define 4 if there are enough free registers.
- +
- +You must define this macro if you want to support call frame exception
- +handling like that provided by DWARF 2.
- +*/
- +/*
- + Use r9-r11
- +*/
- +#define EH_RETURN_DATA_REGNO(N) \
- + ((N<3) ? INTERNAL_REGNUM(N+9) : INVALID_REGNUM)
- +
- +/*
- +A C expression whose value is RTL representing a location in which
- +to store a stack adjustment to be applied before function return.
- +This is used to unwind the stack to an exception handler's call frame.
- +It will be assigned zero on code paths that return normally.
- +
- +Typically this is a call-clobbered hard register that is otherwise
- +untouched by the epilogue, but could also be a stack slot.
- +
- +You must define this macro if you want to support call frame exception
- +handling like that provided by DWARF 2.
- +*/
- +/*
- + Use r8
- +*/
- +#define EH_RETURN_STACKADJ_REGNO INTERNAL_REGNUM(8)
- +#define EH_RETURN_STACKADJ_RTX gen_rtx_REG(SImode, EH_RETURN_STACKADJ_REGNO)
- +
- +/*
- +A C expression whose value is RTL representing a location in which
- +to store the address of an exception handler to which we should
- +return. It will not be assigned on code paths that return normally.
- +
- +Typically this is the location in the call frame at which the normal
- +return address is stored. For targets that return by popping an
- +address off the stack, this might be a memory address just below
- +the target call frame rather than inside the current call
- +frame. EH_RETURN_STACKADJ_RTX will have already been assigned,
- +so it may be used to calculate the location of the target call frame.
- +
- +Some targets have more complex requirements than storing to an
- +address calculable during initial code generation. In that case
- +the eh_return instruction pattern should be used instead.
- +
- +If you want to support call frame exception handling, you must
- +define either this macro or the eh_return instruction pattern.
- +*/
- +/*
- + We define the eh_return instruction pattern, so this isn't needed.
- +*/
- +/* #define EH_RETURN_HANDLER_RTX gen_rtx_REG(Pmode, RET_REGISTER) */
- +
- +/*
- + This macro chooses the encoding of pointers embedded in the
- + exception handling sections. If at all possible, this should be
- + defined such that the exception handling section will not require
- + dynamic relocations, and so may be read-only.
- +
- + code is 0 for data, 1 for code labels, 2 for function
- + pointers. global is true if the symbol may be affected by dynamic
- + relocations. The macro should return a combination of the DW_EH_PE_*
- + defines as found in dwarf2.h.
- +
- + If this macro is not defined, pointers will not be encoded but
- + represented directly.
- +*/
- +#define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \
- + ((flag_pic && (GLOBAL) ? DW_EH_PE_indirect : 0) \
- + | (flag_pic ? DW_EH_PE_pcrel : DW_EH_PE_absptr) \
- + | DW_EH_PE_sdata4)
- +
- +/* ToDo: The rest of this subsection */
- +
- +/** Specifying How Stack Checking is Done **/
- +/* ToDo: All in this subsection */
- +
- +/** Registers That Address the Stack Frame **/
- +
- +/*
- +The register number of the stack pointer register, which must also be a
- +fixed register according to FIXED_REGISTERS. On most machines,
- +the hardware determines which register this is.
- +*/
- +/* Using r13 as stack pointer. */
- +#define STACK_POINTER_REGNUM INTERNAL_REGNUM(13)
- +
- +/*
- +The register number of the frame pointer register, which is used to
- +access automatic variables in the stack frame. On some machines, the
- +hardware determines which register this is. On other machines, you can
- +choose any register you wish for this purpose.
- +*/
- +/* Use r7 */
- +#define FRAME_POINTER_REGNUM INTERNAL_REGNUM(7)
- +
- +/*
- +The register number of the arg pointer register, which is used to access
- +the function's argument list. On some machines, this is the same as the
- +frame pointer register. On some machines, the hardware determines which
- +register this is. On other machines, you can choose any register you
- +wish for this purpose. If this is not the same register as the frame
- +pointer register, then you must mark it as a fixed register according to
- +FIXED_REGISTERS, or arrange to be able to eliminate it (see Section
- +10.10.5 [Elimination], page 224).
- +*/
- +/* Using r5 */
- +#define ARG_POINTER_REGNUM INTERNAL_REGNUM(4)
- +
- +
- +/*
- +Register numbers used for passing a function's static chain pointer. If
- +register windows are used, the register number as seen by the called
- +function is STATIC_CHAIN_INCOMING_REGNUM, while the register
- +number as seen by the calling function is STATIC_CHAIN_REGNUM. If
- +these registers are the same, STATIC_CHAIN_INCOMING_REGNUM need
- +not be defined.
- +
- +The static chain register need not be a fixed register.
- +
- +If the static chain is passed in memory, these macros should not be
- +defined; instead, the next two macros should be defined.
- +*/
- +/* Using r0 */
- +#define STATIC_CHAIN_REGNUM INTERNAL_REGNUM(0)
- +
- +/** Eliminating Frame Pointer and Arg Pointer **/
- +
- +/*
- +A C expression which is nonzero if a function must have and use a frame
- +pointer. This expression is evaluated in the reload pass. If its value is
- +nonzero the function will have a frame pointer.
- +
- +The expression can in principle examine the current function and decide
- +according to the facts, but on most machines the constant 0 or the
- +constant 1 suffices. Use 0 when the machine allows code to be generated
- +with no frame pointer, and doing so saves some time or space. Use 1
- +when there is no possible advantage to avoiding a frame pointer.
- +
- +In certain cases, the compiler does not know how to produce valid code
- +without a frame pointer. The compiler recognizes those cases and
- +automatically gives the function a frame pointer regardless of what
- +FRAME_POINTER_REQUIRED says. You don't need to worry about
- +them.
- +
- +In a function that does not require a frame pointer, the frame pointer
- +register can be allocated for ordinary usage, unless you mark it as a
- +fixed register. See FIXED_REGISTERS for more information.
- +*/
- +/* We need the frame pointer when compiling for profiling */
- +#define FRAME_POINTER_REQUIRED (crtl->profile)
- +
- +/*
- +A C statement to store in the variable DEPTH_VAR the difference
- +between the frame pointer and the stack pointer values immediately after
- +the function prologue. The value would be computed from information
- +such as the result of get_frame_size () and the tables of
- +registers regs_ever_live and call_used_regs.
- +
- +If ELIMINABLE_REGS is defined, this macro will be not be used and
- +need not be defined. Otherwise, it must be defined even if
- +FRAME_POINTER_REQUIRED is defined to always be true; in that
- +case, you may set DEPTH_VAR to anything.
- +*/
- +#define INITIAL_FRAME_POINTER_OFFSET(DEPTH_VAR) ((DEPTH_VAR) = get_frame_size())
- +
- +/*
- +If defined, this macro specifies a table of register pairs used to
- +eliminate unneeded registers that point into the stack frame. If it is not
- +defined, the only elimination attempted by the compiler is to replace
- +references to the frame pointer with references to the stack pointer.
- +
- +The definition of this macro is a list of structure initializations, each
- +of which specifies an original and replacement register.
- +
- +On some machines, the position of the argument pointer is not known until
- +the compilation is completed. In such a case, a separate hard register
- +must be used for the argument pointer. This register can be eliminated by
- +replacing it with either the frame pointer or the argument pointer,
- +depending on whether or not the frame pointer has been eliminated.
- +
- +In this case, you might specify:
- + #define ELIMINABLE_REGS \
- + {{ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
- + {ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM}, \
- + {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}}
- +
- +Note that the elimination of the argument pointer with the stack pointer is
- +specified first since that is the preferred elimination.
- +*/
- +#define ELIMINABLE_REGS \
- +{ \
- + { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM }, \
- + { ARG_POINTER_REGNUM, STACK_POINTER_REGNUM }, \
- + { ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM } \
- +}
- +
- +/*
- +A C expression that returns nonzero if the compiler is allowed to try
- +to replace register number FROM with register number
- +TO. This macro need only be defined if ELIMINABLE_REGS
- +is defined, and will usually be the constant 1, since most of the cases
- +preventing register elimination are things that the compiler already
- +knows about.
- +*/
- +#define CAN_ELIMINATE(FROM, TO) 1
- +
- +/*
- +This macro is similar to INITIAL_FRAME_POINTER_OFFSET. It
- +specifies the initial difference between the specified pair of
- +registers. This macro must be defined if ELIMINABLE_REGS is
- +defined.
- +*/
- +#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
- + ((OFFSET) = avr32_initial_elimination_offset(FROM, TO))
- +
- +/** Passing Function Arguments on the Stack **/
- +
- +
- +/*
- +A C expression. If nonzero, push insns will be used to pass
- +outgoing arguments.
- +If the target machine does not have a push instruction, set it to zero.
- +That directs GCC to use an alternate strategy: to
- +allocate the entire argument block and then store the arguments into
- +it. When PUSH_ARGS is nonzero, PUSH_ROUNDING must be defined too.
- +*/
- +#define PUSH_ARGS 1
- +
- +/*
- +A C expression that is the number of bytes actually pushed onto the
- +stack when an instruction attempts to push NPUSHED bytes.
- +
- +On some machines, the definition
- +
- + #define PUSH_ROUNDING(BYTES) (BYTES)
- +
- +will suffice. But on other machines, instructions that appear
- +to push one byte actually push two bytes in an attempt to maintain
- +alignment. Then the definition should be
- +
- + #define PUSH_ROUNDING(BYTES) (((BYTES) + 1) & ~1)
- +*/
- +/* Push 4 bytes at the time. */
- +#define PUSH_ROUNDING(NPUSHED) (((NPUSHED) + 3) & ~3)
- +
- +/*
- +A C expression. If nonzero, the maximum amount of space required for
- +outgoing arguments will be computed and placed into the variable
- +current_function_outgoing_args_size. No space will be pushed
- +onto the stack for each call; instead, the function prologue should
- +increase the stack frame size by this amount.
- +
- +Setting both PUSH_ARGS and ACCUMULATE_OUTGOING_ARGS is not proper.
- +*/
- +#define ACCUMULATE_OUTGOING_ARGS 0
- +
- +/*
- +A C expression that should indicate the number of bytes of its own
- +arguments that a function pops on returning, or 0 if the
- +function pops no arguments and the caller must therefore pop them all
- +after the function returns.
- +
- +FUNDECL is a C variable whose value is a tree node that describes
- +the function in question. Normally it is a node of type
- +FUNCTION_DECL that describes the declaration of the function.
- +From this you can obtain the DECL_ATTRIBUTES of the function.
- +
- +FUNTYPE is a C variable whose value is a tree node that
- +describes the function in question. Normally it is a node of type
- +FUNCTION_TYPE that describes the data type of the function.
- +From this it is possible to obtain the data types of the value and
- +arguments (if known).
- +
- +When a call to a library function is being considered, FUNDECL
- +will contain an identifier node for the library function. Thus, if
- +you need to distinguish among various library functions, you can do so
- +by their names. Note that ``library function'' in this context means
- +a function used to perform arithmetic, whose name is known specially
- +in the compiler and was not mentioned in the C code being compiled.
- +
- +STACK_SIZE is the number of bytes of arguments passed on the
- +stack. If a variable number of bytes is passed, it is zero, and
- +argument popping will always be the responsibility of the calling function.
- +
- +On the VAX, all functions always pop their arguments, so the definition
- +of this macro is STACK_SIZE. On the 68000, using the standard
- +calling convention, no functions pop their arguments, so the value of
- +the macro is always 0 in this case. But an alternative calling
- +convention is available in which functions that take a fixed number of
- +arguments pop them but other functions (such as printf) pop
- +nothing (the caller pops all). When this convention is in use,
- +FUNTYPE is examined to determine whether a function takes a fixed
- +number of arguments.
- +*/
- +#define RETURN_POPS_ARGS(FUNDECL, FUNTYPE, STACK_SIZE) 0
- +
- +
- +/*Return true if this function can we use a single return instruction*/
- +#define USE_RETURN_INSN(ISCOND) avr32_use_return_insn(ISCOND)
- +
- +/*
- +A C expression that should indicate the number of bytes a call sequence
- +pops off the stack. It is added to the value of RETURN_POPS_ARGS
- +when compiling a function call.
- +
- +CUM is the variable in which all arguments to the called function
- +have been accumulated.
- +
- +On certain architectures, such as the SH5, a call trampoline is used
- +that pops certain registers off the stack, depending on the arguments
- +that have been passed to the function. Since this is a property of the
- +call site, not of the called function, RETURN_POPS_ARGS is not
- +appropriate.
- +*/
- +#define CALL_POPS_ARGS(CUM) 0
- +
- +/* Passing Arguments in Registers */
- +
- +/*
- +A C expression that controls whether a function argument is passed
- +in a register, and which register.
- +
- +The arguments are CUM, which summarizes all the previous
- +arguments; MODE, the machine mode of the argument; TYPE,
- +the data type of the argument as a tree node or 0 if that is not known
- +(which happens for C support library functions); and NAMED,
- +which is 1 for an ordinary argument and 0 for nameless arguments that
- +correspond to '...' in the called function's prototype.
- +TYPE can be an incomplete type if a syntax error has previously
- +occurred.
- +
- +The value of the expression is usually either a reg RTX for the
- +hard register in which to pass the argument, or zero to pass the
- +argument on the stack.
- +
- +For machines like the VAX and 68000, where normally all arguments are
- +pushed, zero suffices as a definition.
- +
- +The value of the expression can also be a parallel RTX. This is
- +used when an argument is passed in multiple locations. The mode of the
- +of the parallel should be the mode of the entire argument. The
- +parallel holds any number of expr_list pairs; each one
- +describes where part of the argument is passed. In each
- +expr_list the first operand must be a reg RTX for the hard
- +register in which to pass this part of the argument, and the mode of the
- +register RTX indicates how large this part of the argument is. The
- +second operand of the expr_list is a const_int which gives
- +the offset in bytes into the entire argument of where this part starts.
- +As a special exception the first expr_list in the parallel
- +RTX may have a first operand of zero. This indicates that the entire
- +argument is also stored on the stack.
- +
- +The last time this macro is called, it is called with MODE == VOIDmode,
- +and its result is passed to the call or call_value
- +pattern as operands 2 and 3 respectively.
- +
- +The usual way to make the ISO library 'stdarg.h' work on a machine
- +where some arguments are usually passed in registers, is to cause
- +nameless arguments to be passed on the stack instead. This is done
- +by making FUNCTION_ARG return 0 whenever NAMED is 0.
- +
- +You may use the macro MUST_PASS_IN_STACK (MODE, TYPE)
- +in the definition of this macro to determine if this argument is of a
- +type that must be passed in the stack. If REG_PARM_STACK_SPACE
- +is not defined and FUNCTION_ARG returns nonzero for such an
- +argument, the compiler will abort. If REG_PARM_STACK_SPACE is
- +defined, the argument will be computed in the stack and then loaded into
- +a register. */
- +
- +#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
- + avr32_function_arg(&(CUM), MODE, TYPE, NAMED)
- +
- +/*
- +A C type for declaring a variable that is used as the first argument of
- +FUNCTION_ARG and other related values. For some target machines,
- +the type int suffices and can hold the number of bytes of
- +argument so far.
- +
- +There is no need to record in CUMULATIVE_ARGS anything about the
- +arguments that have been passed on the stack. The compiler has other
- +variables to keep track of that. For target machines on which all
- +arguments are passed on the stack, there is no need to store anything in
- +CUMULATIVE_ARGS; however, the data structure must exist and
- +should not be empty, so use int.
- +*/
- +typedef struct avr32_args
- +{
- + /* Index representing the argument register the current function argument
- + will occupy */
- + int index;
- + /* A mask with bits representing the argument registers: if a bit is set
- + then this register is used for an argument */
- + int used_index;
- + /* TRUE if this function has anonymous arguments */
- + int uses_anonymous_args;
- + /* The size in bytes of the named arguments pushed on the stack */
- + int stack_pushed_args_size;
- + /* Set to true if this function needs a Return Value Pointer */
- + int use_rvp;
- + /* Set to true if function is a flashvault function. */
- + int flashvault_func;
- +
- +} CUMULATIVE_ARGS;
- +
- +
- +#define FIRST_CUM_REG_INDEX 0
- +#define LAST_CUM_REG_INDEX 4
- +#define GET_REG_INDEX(CUM) ((CUM)->index)
- +#define SET_REG_INDEX(CUM, INDEX) ((CUM)->index = (INDEX));
- +#define GET_USED_INDEX(CUM, INDEX) ((CUM)->used_index & (1 << (INDEX)))
- +#define SET_USED_INDEX(CUM, INDEX) \
- + do \
- + { \
- + if (INDEX >= 0) \
- + (CUM)->used_index |= (1 << (INDEX)); \
- + } \
- + while (0)
- +#define SET_INDEXES_UNUSED(CUM) ((CUM)->used_index = 0)
- +
- +/*
- + A C statement (sans semicolon) for initializing the variable cum for the
- + state at the beginning of the argument list. The variable has type
- + CUMULATIVE_ARGS. The value of FNTYPE is the tree node for the data type of
- + the function which will receive the args, or 0 if the args are to a compiler
- + support library function. For direct calls that are not libcalls, FNDECL
- + contain the declaration node of the function. FNDECL is also set when
- + INIT_CUMULATIVE_ARGS is used to find arguments for the function being
- + compiled. N_NAMED_ARGS is set to the number of named arguments, including a
- + structure return address if it is passed as a parameter, when making a call.
- + When processing incoming arguments, N_NAMED_ARGS is set to -1.
- +
- + When processing a call to a compiler support library function, LIBNAME
- + identifies which one. It is a symbol_ref rtx which contains the name of the
- + function, as a string. LIBNAME is 0 when an ordinary C function call is
- + being processed. Thus, each time this macro is called, either LIBNAME or
- + FNTYPE is nonzero, but never both of them at once.
- +*/
- +#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS) \
- + avr32_init_cumulative_args(&(CUM), FNTYPE, LIBNAME, FNDECL)
- +
- +/*
- +A C statement (sans semicolon) to update the summarizer variable
- +CUM to advance past an argument in the argument list. The
- +values MODE, TYPE and NAMED describe that argument.
- +Once this is done, the variable CUM is suitable for analyzing
- +the following argument with FUNCTION_ARG, etc.
- +
- +This macro need not do anything if the argument in question was passed
- +on the stack. The compiler knows how to track the amount of stack space
- +used for arguments without any special help.
- +*/
- +#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
- + avr32_function_arg_advance(&(CUM), MODE, TYPE, NAMED)
- +
- +/*
- +If defined, a C expression which determines whether, and in which direction,
- +to pad out an argument with extra space. The value should be of type
- +enum direction: either 'upward' to pad above the argument,
- +'downward' to pad below, or 'none' to inhibit padding.
- +
- +The amount of padding is always just enough to reach the next
- +multiple of FUNCTION_ARG_BOUNDARY; this macro does not control
- +it.
- +
- +This macro has a default definition which is right for most systems.
- +For little-endian machines, the default is to pad upward. For
- +big-endian machines, the default is to pad downward for an argument of
- +constant size shorter than an int, and upward otherwise.
- +*/
- +#define FUNCTION_ARG_PADDING(MODE, TYPE) \
- + avr32_function_arg_padding(MODE, TYPE)
- +
- +/*
- + Specify padding for the last element of a block move between registers
- + and memory. First is nonzero if this is the only element. Defining
- + this macro allows better control of register function parameters on
- + big-endian machines, without using PARALLEL rtl. In particular,
- + MUST_PASS_IN_STACK need not test padding and mode of types in registers,
- + as there is no longer a "wrong" part of a register; For example, a three
- + byte aggregate may be passed in the high part of a register if so required.
- +*/
- +#define BLOCK_REG_PADDING(MODE, TYPE, FIRST) \
- + avr32_function_arg_padding(MODE, TYPE)
- +
- +/*
- +If defined, a C expression which determines whether the default
- +implementation of va_arg will attempt to pad down before reading the
- +next argument, if that argument is smaller than its aligned space as
- +controlled by PARM_BOUNDARY. If this macro is not defined, all such
- +arguments are padded down if BYTES_BIG_ENDIAN is true.
- +*/
- +#define PAD_VARARGS_DOWN \
- + (FUNCTION_ARG_PADDING (TYPE_MODE (type), type) == downward)
- +
- +/*
- +A C expression that is nonzero if REGNO is the number of a hard
- +register in which function arguments are sometimes passed. This does
- +not include implicit arguments such as the static chain and
- +the structure-value address. On many machines, no registers can be
- +used for this purpose since all function arguments are pushed on the
- +stack.
- +*/
- +/*
- + Use r8 - r12 for function arguments.
- +*/
- +#define FUNCTION_ARG_REGNO_P(REGNO) \
- + (REGNO >= 3 && REGNO <= 7)
- +
- +/* Number of registers used for passing function arguments */
- +#define NUM_ARG_REGS 5
- +
- +/*
- +If defined, the order in which arguments are loaded into their
- +respective argument registers is reversed so that the last
- +argument is loaded first. This macro only affects arguments
- +passed in registers.
- +*/
- +/* #define LOAD_ARGS_REVERSED */
- +
- +/** How Scalar Function Values Are Returned **/
- +
- +/* AVR32 is using r12 as return register. */
- +#define RET_REGISTER (15 - 12)
- +
- +/*
- +A C expression to create an RTX representing the place where a library
- +function returns a value of mode MODE. If the precise function
- +being called is known, FUNC is a tree node
- +(FUNCTION_DECL) for it; otherwise, func is a null
- +pointer. This makes it possible to use a different value-returning
- +convention for specific functions when all their calls are
- +known.
- +
- +Note that "library function" in this context means a compiler
- +support routine, used to perform arithmetic, whose name is known
- +specially by the compiler and was not mentioned in the C code being
- +compiled.
- +
- +The definition of LIBRARY_VALUE need not be concerned aggregate
- +data types, because none of the library functions returns such types.
- +*/
- +#define LIBCALL_VALUE(MODE) avr32_libcall_value(MODE)
- +
- +/*
- +A C expression that is nonzero if REGNO is the number of a hard
- +register in which the values of called function may come back.
- +
- +A register whose use for returning values is limited to serving as the
- +second of a pair (for a value of type double, say) need not be
- +recognized by this macro. So for most machines, this definition
- +suffices:
- + #define FUNCTION_VALUE_REGNO_P(N) ((N) == 0)
- +
- +If the machine has register windows, so that the caller and the called
- +function use different registers for the return value, this macro
- +should recognize only the caller's register numbers.
- +*/
- +/*
- + When returning a value of mode DImode, r11:r10 is used, else r12 is used.
- +*/
- +#define FUNCTION_VALUE_REGNO_P(REGNO) ((REGNO) == RET_REGISTER \
- + || (REGNO) == INTERNAL_REGNUM(11))
- +
- +
- +/** How Large Values Are Returned **/
- +
- +
- +/*
- +Define this macro to be 1 if all structure and union return values must be
- +in memory. Since this results in slower code, this should be defined
- +only if needed for compatibility with other compilers or with an ABI.
- +If you define this macro to be 0, then the conventions used for structure
- +and union return values are decided by the RETURN_IN_MEMORY macro.
- +
- +If not defined, this defaults to the value 1.
- +*/
- +#define DEFAULT_PCC_STRUCT_RETURN 0
- +
- +
- +
- +
- +/** Generating Code for Profiling **/
- +
- +/*
- +A C statement or compound statement to output to FILE some
- +assembler code to call the profiling subroutine mcount.
- +
- +The details of how mcount expects to be called are determined by
- +your operating system environment, not by GCC. To figure them out,
- +compile a small program for profiling using the system's installed C
- +compiler and look at the assembler code that results.
- +
- +Older implementations of mcount expect the address of a counter
- +variable to be loaded into some register. The name of this variable is
- +'LP' followed by the number LABELNO, so you would generate
- +the name using 'LP%d' in a fprintf.
- +*/
- +/* ToDo: fixme */
- +#ifndef FUNCTION_PROFILER
- +#define FUNCTION_PROFILER(FILE, LABELNO) \
- + fprintf((FILE), "/* profiler %d */", (LABELNO))
- +#endif
- +
- +
- +/*****************************************************************************
- + * Trampolines for Nested Functions *
- + *****************************************************************************/
- +
- +/*
- +A C statement to output, on the stream FILE, assembler code for a
- +block of data that contains the constant parts of a trampoline. This
- +code should not include a label - the label is taken care of
- +automatically.
- +
- +If you do not define this macro, it means no template is needed
- +for the target. Do not define this macro on systems where the block move
- +code to copy the trampoline into place would be larger than the code
- +to generate it on the spot.
- +*/
- +/* ToDo: correct? */
- +#define TRAMPOLINE_TEMPLATE(FILE) avr32_trampoline_template(FILE);
- +
- +
- +/*
- +A C expression for the size in bytes of the trampoline, as an integer.
- +*/
- +/* ToDo: fixme */
- +#define TRAMPOLINE_SIZE 0x0C
- +
- +/*
- +Alignment required for trampolines, in bits.
- +
- +If you don't define this macro, the value of BIGGEST_ALIGNMENT
- +is used for aligning trampolines.
- +*/
- +#define TRAMPOLINE_ALIGNMENT 16
- +
- +/*
- +A C statement to initialize the variable parts of a trampoline.
- +ADDR is an RTX for the address of the trampoline; FNADDR is
- +an RTX for the address of the nested function; STATIC_CHAIN is an
- +RTX for the static chain value that should be passed to the function
- +when it is called.
- +*/
- +#define INITIALIZE_TRAMPOLINE(ADDR, FNADDR, STATIC_CHAIN) \
- + avr32_initialize_trampoline(ADDR, FNADDR, STATIC_CHAIN)
- +
- +
- +/******************************************************************************
- + * Implicit Calls to Library Routines
- + *****************************************************************************/
- +
- +/* Tail calling. */
- +
- +/* A C expression that evaluates to true if it is ok to perform a sibling
- + call to DECL. */
- +#define FUNCTION_OK_FOR_SIBCALL(DECL) 0
- +
- +#define OVERRIDE_OPTIONS avr32_override_options ()
- +
- +#define OPTIMIZATION_OPTIONS(LEVEL, SIZE) avr32_optimization_options (LEVEL, SIZE)
- +
- +/******************************************************************************
- + * Addressing Modes
- + *****************************************************************************/
- +
- +/*
- +A C expression that is nonzero if the machine supports pre-increment,
- +pre-decrement, post-increment, or post-decrement addressing respectively.
- +*/
- +/*
- + AVR32 supports Rp++ and --Rp
- +*/
- +#define HAVE_PRE_INCREMENT 0
- +#define HAVE_PRE_DECREMENT 1
- +#define HAVE_POST_INCREMENT 1
- +#define HAVE_POST_DECREMENT 0
- +
- +/*
- +A C expression that is nonzero if the machine supports pre- or
- +post-address side-effect generation involving constants other than
- +the size of the memory operand.
- +*/
- +#define HAVE_PRE_MODIFY_DISP 0
- +#define HAVE_POST_MODIFY_DISP 0
- +
- +/*
- +A C expression that is nonzero if the machine supports pre- or
- +post-address side-effect generation involving a register displacement.
- +*/
- +#define HAVE_PRE_MODIFY_REG 0
- +#define HAVE_POST_MODIFY_REG 0
- +
- +/*
- +A C expression that is 1 if the RTX X is a constant which
- +is a valid address. On most machines, this can be defined as
- +CONSTANT_P (X), but a few machines are more restrictive
- +in which constant addresses are supported.
- +
- +CONSTANT_P accepts integer-values expressions whose values are
- +not explicitly known, such as symbol_ref, label_ref, and
- +high expressions and const arithmetic expressions, in
- +addition to const_int and const_double expressions.
- +*/
- +#define CONSTANT_ADDRESS_P(X) CONSTANT_P(X)
- +
- +/*
- +A number, the maximum number of registers that can appear in a valid
- +memory address. Note that it is up to you to specify a value equal to
- +the maximum number that GO_IF_LEGITIMATE_ADDRESS would ever
- +accept.
- +*/
- +#define MAX_REGS_PER_ADDRESS 2
- +
- +/*
- +A C compound statement with a conditional goto LABEL;
- +executed if X (an RTX) is a legitimate memory address on the
- +target machine for a memory operand of mode MODE.
- +
- +It usually pays to define several simpler macros to serve as
- +subroutines for this one. Otherwise it may be too complicated to
- +understand.
- +
- +This macro must exist in two variants: a strict variant and a
- +non-strict one. The strict variant is used in the reload pass. It
- +must be defined so that any pseudo-register that has not been
- +allocated a hard register is considered a memory reference. In
- +contexts where some kind of register is required, a pseudo-register
- +with no hard register must be rejected.
- +
- +The non-strict variant is used in other passes. It must be defined to
- +accept all pseudo-registers in every context where some kind of
- +register is required.
- +
- +Compiler source files that want to use the strict variant of this
- +macro define the macro REG_OK_STRICT. You should use an
- +#ifdef REG_OK_STRICT conditional to define the strict variant
- +in that case and the non-strict variant otherwise.
- +
- +Subroutines to check for acceptable registers for various purposes (one
- +for base registers, one for index registers, and so on) are typically
- +among the subroutines used to define GO_IF_LEGITIMATE_ADDRESS.
- +Then only these subroutine macros need have two variants; the higher
- +levels of macros may be the same whether strict or not.
- +
- +Normally, constant addresses which are the sum of a symbol_ref
- +and an integer are stored inside a const RTX to mark them as
- +constant. Therefore, there is no need to recognize such sums
- +specifically as legitimate addresses. Normally you would simply
- +recognize any const as legitimate.
- +
- +Usually PRINT_OPERAND_ADDRESS is not prepared to handle constant
- +sums that are not marked with const. It assumes that a naked
- +plus indicates indexing. If so, then you must reject such
- +naked constant sums as illegitimate addresses, so that none of them will
- +be given to PRINT_OPERAND_ADDRESS.
- +
- +On some machines, whether a symbolic address is legitimate depends on
- +the section that the address refers to. On these machines, define the
- +macro ENCODE_SECTION_INFO to store the information into the
- +symbol_ref, and then check for it here. When you see a
- +const, you will have to look inside it to find the
- +symbol_ref in order to determine the section.
- +
- +The best way to modify the name string is by adding text to the
- +beginning, with suitable punctuation to prevent any ambiguity. Allocate
- +the new name in saveable_obstack. You will have to modify
- +ASM_OUTPUT_LABELREF to remove and decode the added text and
- +output the name accordingly, and define STRIP_NAME_ENCODING to
- +access the original name string.
- +
- +You can check the information stored here into the symbol_ref in
- +the definitions of the macros GO_IF_LEGITIMATE_ADDRESS and
- +PRINT_OPERAND_ADDRESS.
- +*/
- +#ifdef REG_OK_STRICT
- +# define GO_IF_LEGITIMATE_ADDRESS(MODE, X, LABEL) \
- + do \
- + { \
- + if (avr32_legitimate_address(MODE, X, 1)) \
- + goto LABEL; \
- + } \
- + while (0)
- +#else
- +# define GO_IF_LEGITIMATE_ADDRESS(MODE, X, LABEL) \
- + do \
- + { \
- + if (avr32_legitimate_address(MODE, X, 0)) \
- + goto LABEL; \
- + } \
- + while (0)
- +#endif
- +
- +
- +
- +/*
- +A C compound statement that attempts to replace X with a valid
- +memory address for an operand of mode MODE. win will be a
- +C statement label elsewhere in the code; the macro definition may use
- +
- + GO_IF_LEGITIMATE_ADDRESS (MODE, X, WIN);
- +
- +to avoid further processing if the address has become legitimate.
- +
- +X will always be the result of a call to break_out_memory_refs,
- +and OLDX will be the operand that was given to that function to produce
- +X.
- +
- +The code generated by this macro should not alter the substructure of
- +X. If it transforms X into a more legitimate form, it
- +should assign X (which will always be a C variable) a new value.
- +
- +It is not necessary for this macro to come up with a legitimate
- +address. The compiler has standard ways of doing so in all cases. In
- +fact, it is safe for this macro to do nothing. But often a
- +machine-dependent strategy can generate better code.
- +*/
- +#define LEGITIMIZE_ADDRESS(X, OLDX, MODE, WIN) \
- + do \
- + { \
- + if (GET_CODE(X) == PLUS \
- + && GET_CODE(XEXP(X, 0)) == REG \
- + && GET_CODE(XEXP(X, 1)) == CONST_INT \
- + && !CONST_OK_FOR_CONSTRAINT_P(INTVAL(XEXP(X, 1)), \
- + 'K', "Ks16")) \
- + { \
- + rtx index = force_reg(SImode, XEXP(X, 1)); \
- + X = gen_rtx_PLUS( SImode, XEXP(X, 0), index); \
- + } \
- + GO_IF_LEGITIMATE_ADDRESS(MODE, X, WIN); \
- + } \
- + while(0)
- +
- +
- +/*
- +A C statement or compound statement with a conditional
- +goto LABEL; executed if memory address X (an RTX) can have
- +different meanings depending on the machine mode of the memory
- +reference it is used for or if the address is valid for some modes
- +but not others.
- +
- +Autoincrement and autodecrement addresses typically have mode-dependent
- +effects because the amount of the increment or decrement is the size
- +of the operand being addressed. Some machines have other mode-dependent
- +addresses. Many RISC machines have no mode-dependent addresses.
- +
- +You may assume that ADDR is a valid address for the machine.
- +*/
- +#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR, LABEL) \
- + do \
- + { \
- + if (GET_CODE (ADDR) == POST_INC \
- + || GET_CODE (ADDR) == PRE_DEC) \
- + goto LABEL; \
- + } \
- + while (0)
- +
- +/*
- +A C expression that is nonzero if X is a legitimate constant for
- +an immediate operand on the target machine. You can assume that
- +X satisfies CONSTANT_P, so you need not check this. In fact,
- +'1' is a suitable definition for this macro on machines where
- +anything CONSTANT_P is valid.
- +*/
- +#define LEGITIMATE_CONSTANT_P(X) avr32_legitimate_constant_p(X)
- +
- +
- +/******************************************************************************
- + * Condition Code Status
- + *****************************************************************************/
- +
- +/*
- +C code for a data type which is used for declaring the mdep
- +component of cc_status. It defaults to int.
- +
- +This macro is not used on machines that do not use cc0.
- +*/
- +
- +typedef struct
- +{
- + int flags;
- + rtx value;
- + int cond_exec_cmp_clobbered;
- +} avr32_status_reg;
- +
- +
- +#define CC_STATUS_MDEP avr32_status_reg
- +
- +/*
- +A C expression to initialize the mdep field to "empty".
- +The default definition does nothing, since most machines don't use
- +the field anyway. If you want to use the field, you should probably
- +define this macro to initialize it.
- +
- +This macro is not used on machines that do not use cc0.
- +*/
- +
- +#define CC_STATUS_MDEP_INIT \
- + (cc_status.mdep.flags = CC_NONE , cc_status.mdep.cond_exec_cmp_clobbered = 0, cc_status.mdep.value = 0)
- +
- +/*
- +A C compound statement to set the components of cc_status
- +appropriately for an insn INSN whose body is EXP. It is
- +this macro's responsibility to recognize insns that set the condition
- +code as a byproduct of other activity as well as those that explicitly
- +set (cc0).
- +
- +This macro is not used on machines that do not use cc0.
- +
- +If there are insns that do not set the condition code but do alter
- +other machine registers, this macro must check to see whether they
- +invalidate the expressions that the condition code is recorded as
- +reflecting. For example, on the 68000, insns that store in address
- +registers do not set the condition code, which means that usually
- +NOTICE_UPDATE_CC can leave cc_status unaltered for such
- +insns. But suppose that the previous insn set the condition code
- +based on location 'a4@@(102)' and the current insn stores a new
- +value in 'a4'. Although the condition code is not changed by
- +this, it will no longer be true that it reflects the contents of
- +'a4@@(102)'. Therefore, NOTICE_UPDATE_CC must alter
- +cc_status in this case to say that nothing is known about the
- +condition code value.
- +
- +The definition of NOTICE_UPDATE_CC must be prepared to deal
- +with the results of peephole optimization: insns whose patterns are
- +parallel RTXs containing various reg, mem or
- +constants which are just the operands. The RTL structure of these
- +insns is not sufficient to indicate what the insns actually do. What
- +NOTICE_UPDATE_CC should do when it sees one is just to run
- +CC_STATUS_INIT.
- +
- +A possible definition of NOTICE_UPDATE_CC is to call a function
- +that looks at an attribute (see Insn Attributes) named, for example,
- +'cc'. This avoids having detailed information about patterns in
- +two places, the 'md' file and in NOTICE_UPDATE_CC.
- +*/
- +
- +#define NOTICE_UPDATE_CC(EXP, INSN) avr32_notice_update_cc(EXP, INSN)
- +
- +
- +
- +
- +/******************************************************************************
- + * Describing Relative Costs of Operations
- + *****************************************************************************/
- +
- +
- +
- +/*
- +A C expression for the cost of moving data of mode MODE from a
- +register in class FROM to one in class TO. The classes are
- +expressed using the enumeration values such as GENERAL_REGS. A
- +value of 2 is the default; other values are interpreted relative to
- +that.
- +
- +It is not required that the cost always equal 2 when FROM is the
- +same as TO; on some machines it is expensive to move between
- +registers if they are not general registers.
- +
- +If reload sees an insn consisting of a single set between two
- +hard registers, and if REGISTER_MOVE_COST applied to their
- +classes returns a value of 2, reload does not check to ensure that the
- +constraints of the insn are met. Setting a cost of other than 2 will
- +allow reload to verify that the constraints are met. You should do this
- +if the movm pattern's constraints do not allow such copying.
- +*/
- +#define REGISTER_MOVE_COST(MODE, FROM, TO) \
- + ((GET_MODE_SIZE(MODE) <= 4) ? 2: \
- + (GET_MODE_SIZE(MODE) <= 8) ? 3: \
- + 4)
- +
- +/*
- +A C expression for the cost of moving data of mode MODE between a
- +register of class CLASS and memory; IN is zero if the value
- +is to be written to memory, nonzero if it is to be read in. This cost
- +is relative to those in REGISTER_MOVE_COST. If moving between
- +registers and memory is more expensive than between two registers, you
- +should define this macro to express the relative cost.
- +
- +If you do not define this macro, GCC uses a default cost of 4 plus
- +the cost of copying via a secondary reload register, if one is
- +needed. If your machine requires a secondary reload register to copy
- +between memory and a register of CLASS but the reload mechanism is
- +more complex than copying via an intermediate, define this macro to
- +reflect the actual cost of the move.
- +
- +GCC defines the function memory_move_secondary_cost if
- +secondary reloads are needed. It computes the costs due to copying via
- +a secondary register. If your machine copies from memory using a
- +secondary register in the conventional way but the default base value of
- +4 is not correct for your machine, define this macro to add some other
- +value to the result of that function. The arguments to that function
- +are the same as to this macro.
- +*/
- +/*
- + Memory moves are costly
- +*/
- +#define MEMORY_MOVE_COST(MODE, CLASS, IN) \
- + (((IN) ? ((GET_MODE_SIZE(MODE) < 4) ? 4 : \
- + (GET_MODE_SIZE(MODE) > 8) ? 6 : \
- + 3) \
- + : ((GET_MODE_SIZE(MODE) > 8) ? 6 : 3)))
- +
- +/*
- +A C expression for the cost of a branch instruction. A value of 1 is
- +the default; other values are interpreted relative to that.
- +*/
- + /* Try to use conditionals as much as possible */
- +#define BRANCH_COST(speed_p, predictable_p) (TARGET_BRANCH_PRED ? 3 : 4)
- +
- +/*A C expression for the maximum number of instructions to execute via conditional
- + execution instructions instead of a branch. A value of BRANCH_COST+1 is the default
- + if the machine does not use cc0, and 1 if it does use cc0.*/
- +#define MAX_CONDITIONAL_EXECUTE 4
- +
- +/*
- +Define this macro as a C expression which is nonzero if accessing less
- +than a word of memory (i.e.: a char or a short) is no
- +faster than accessing a word of memory, i.e., if such access
- +require more than one instruction or if there is no difference in cost
- +between byte and (aligned) word loads.
- +
- +When this macro is not defined, the compiler will access a field by
- +finding the smallest containing object; when it is defined, a fullword
- +load will be used if alignment permits. Unless bytes accesses are
- +faster than word accesses, using word accesses is preferable since it
- +may eliminate subsequent memory access if subsequent accesses occur to
- +other fields in the same word of the structure, but to different bytes.
- +*/
- +#define SLOW_BYTE_ACCESS 1
- +
- +
- +/*
- +Define this macro if it is as good or better to call a constant
- +function address than to call an address kept in a register.
- +*/
- +#define NO_FUNCTION_CSE
- +
- +
- +/******************************************************************************
- + * Adjusting the Instruction Scheduler
- + *****************************************************************************/
- +
- +/*****************************************************************************
- + * Dividing the Output into Sections (Texts, Data, ...) *
- + *****************************************************************************/
- +
- +/*
- +A C expression whose value is a string, including spacing, containing the
- +assembler operation that should precede instructions and read-only data.
- +Normally "\t.text" is right.
- +*/
- +#define TEXT_SECTION_ASM_OP "\t.text"
- +/*
- +A C statement that switches to the default section containing instructions.
- +Normally this is not needed, as simply defining TEXT_SECTION_ASM_OP
- +is enough. The MIPS port uses this to sort all functions after all data
- +declarations.
- +*/
- +/* #define TEXT_SECTION */
- +
- +/*
- +A C expression whose value is a string, including spacing, containing the
- +assembler operation to identify the following data as writable initialized
- +data. Normally "\t.data" is right.
- +*/
- +#define DATA_SECTION_ASM_OP "\t.data"
- +
- +/*
- +If defined, a C expression whose value is a string, including spacing,
- +containing the assembler operation to identify the following data as
- +shared data. If not defined, DATA_SECTION_ASM_OP will be used.
- +*/
- +
- +/*
- +A C expression whose value is a string, including spacing, containing
- +the assembler operation to identify the following data as read-only
- +initialized data.
- +*/
- +#undef READONLY_DATA_SECTION_ASM_OP
- +#define READONLY_DATA_SECTION_ASM_OP \
- + ((TARGET_USE_RODATA_SECTION) ? \
- + "\t.section\t.rodata" : \
- + TEXT_SECTION_ASM_OP )
- +
- +
- +/*
- +If defined, a C expression whose value is a string, including spacing,
- +containing the assembler operation to identify the following data as
- +uninitialized global data. If not defined, and neither
- +ASM_OUTPUT_BSS nor ASM_OUTPUT_ALIGNED_BSS are defined,
- +uninitialized global data will be output in the data section if
- +-fno-common is passed, otherwise ASM_OUTPUT_COMMON will be
- +used.
- +*/
- +#define BSS_SECTION_ASM_OP "\t.section\t.bss"
- +
- +/*
- +If defined, a C expression whose value is a string, including spacing,
- +containing the assembler operation to identify the following data as
- +uninitialized global shared data. If not defined, and
- +BSS_SECTION_ASM_OP is, the latter will be used.
- +*/
- +/*#define SHARED_BSS_SECTION_ASM_OP "\trseg\tshared_bbs_section:data:noroot(0)\n"*/
- +/*
- +If defined, a C expression whose value is a string, including spacing,
- +containing the assembler operation to identify the following data as
- +initialization code. If not defined, GCC will assume such a section does
- +not exist.
- +*/
- +#undef INIT_SECTION_ASM_OP
- +#define INIT_SECTION_ASM_OP "\t.section\t.init"
- +
- +/*
- +If defined, a C expression whose value is a string, including spacing,
- +containing the assembler operation to identify the following data as
- +finalization code. If not defined, GCC will assume such a section does
- +not exist.
- +*/
- +#undef FINI_SECTION_ASM_OP
- +#define FINI_SECTION_ASM_OP "\t.section\t.fini"
- +
- +/*
- +If defined, an ASM statement that switches to a different section
- +via SECTION_OP, calls FUNCTION, and switches back to
- +the text section. This is used in crtstuff.c if
- +INIT_SECTION_ASM_OP or FINI_SECTION_ASM_OP to calls
- +to initialization and finalization functions from the init and fini
- +sections. By default, this macro uses a simple function call. Some
- +ports need hand-crafted assembly code to avoid dependencies on
- +registers initialized in the function prologue or to ensure that
- +constant pools don't end up too far way in the text section.
- +*/
- +#define CRT_CALL_STATIC_FUNCTION(SECTION_OP, FUNC) \
- + asm ( SECTION_OP "\n" \
- + "mcall r6[" USER_LABEL_PREFIX #FUNC "@got]\n" \
- + TEXT_SECTION_ASM_OP);
- +
- +
- +/*
- +Define this macro to be an expression with a nonzero value if jump
- +tables (for tablejump insns) should be output in the text
- +section, along with the assembler instructions. Otherwise, the
- +readonly data section is used.
- +
- +This macro is irrelevant if there is no separate readonly data section.
- +*/
- +/* Put jump tables in text section if we have caches. Otherwise assume that
- + loading data from code memory is slow. */
- +#define JUMP_TABLES_IN_TEXT_SECTION \
- + (TARGET_CACHES ? 1 : 0)
- +
- +
- +/******************************************************************************
- + * Position Independent Code (PIC)
- + *****************************************************************************/
- +
- +#ifndef AVR32_ALWAYS_PIC
- +#define AVR32_ALWAYS_PIC 0
- +#endif
- +
- +/* GOT is set to r6 */
- +#define PIC_OFFSET_TABLE_REGNUM INTERNAL_REGNUM(6)
- +
- +/*
- +A C expression that is nonzero if X is a legitimate immediate
- +operand on the target machine when generating position independent code.
- +You can assume that X satisfies CONSTANT_P, so you need not
- +check this. You can also assume flag_pic is true, so you need not
- +check it either. You need not define this macro if all constants
- +(including SYMBOL_REF) can be immediate operands when generating
- +position independent code.
- +*/
- +/* We can't directly access anything that contains a symbol,
- + nor can we indirect via the constant pool. */
- +#define LEGITIMATE_PIC_OPERAND_P(X) avr32_legitimate_pic_operand_p(X)
- +
- +
- +/* We need to know when we are making a constant pool; this determines
- + whether data needs to be in the GOT or can be referenced via a GOT
- + offset. */
- +extern int making_const_table;
- +
- +/******************************************************************************
- + * Defining the Output Assembler Language
- + *****************************************************************************/
- +
- +
- +/*
- +A C string constant describing how to begin a comment in the target
- +assembler language. The compiler assumes that the comment will end at
- +the end of the line.
- +*/
- +#define ASM_COMMENT_START "# "
- +
- +/*
- +A C string constant for text to be output before each asm
- +statement or group of consecutive ones. Normally this is
- +"#APP", which is a comment that has no effect on most
- +assemblers but tells the GNU assembler that it must check the lines
- +that follow for all valid assembler constructs.
- +*/
- +#undef ASM_APP_ON
- +#define ASM_APP_ON "#APP\n"
- +
- +/*
- +A C string constant for text to be output after each asm
- +statement or group of consecutive ones. Normally this is
- +"#NO_APP", which tells the GNU assembler to resume making the
- +time-saving assumptions that are valid for ordinary compiler output.
- +*/
- +#undef ASM_APP_OFF
- +#define ASM_APP_OFF "#NO_APP\n"
- +
- +
- +
- +#define FILE_ASM_OP "\t.file\n"
- +#define IDENT_ASM_OP "\t.ident\t"
- +#define SET_ASM_OP "\t.set\t"
- +
- +
- +/*
- + * Output assembly directives to switch to section name. The section
- + * should have attributes as specified by flags, which is a bit mask
- + * of the SECTION_* flags defined in 'output.h'. If align is nonzero,
- + * it contains an alignment in bytes to be used for the section,
- + * otherwise some target default should be used. Only targets that
- + * must specify an alignment within the section directive need pay
- + * attention to align -- we will still use ASM_OUTPUT_ALIGN.
- + *
- + * NOTE: This one must not be moved to avr32.c
- + */
- +#undef TARGET_ASM_NAMED_SECTION
- +#define TARGET_ASM_NAMED_SECTION default_elf_asm_named_section
- +
- +
- +/*
- +You may define this macro as a C expression. You should define the
- +expression to have a nonzero value if GCC should output the constant
- +pool for a function before the code for the function, or a zero value if
- +GCC should output the constant pool after the function. If you do
- +not define this macro, the usual case, GCC will output the constant
- +pool before the function.
- +*/
- +#define CONSTANT_POOL_BEFORE_FUNCTION 0
- +
- +
- +/*
- +Define this macro as a C expression which is nonzero if the constant
- +EXP, of type tree, should be output after the code for a
- +function. The compiler will normally output all constants before the
- +function; you need not define this macro if this is OK.
- +*/
- +#define CONSTANT_AFTER_FUNCTION_P(EXP) 1
- +
- +
- +/*
- +Define this macro as a C expression which is nonzero if C is
- +as a logical line separator by the assembler. STR points to the
- +position in the string where C was found; this can be used if a
- +line separator uses multiple characters.
- +
- +If you do not define this macro, the default is that only
- +the character ';' is treated as a logical line separator.
- +*/
- +#define IS_ASM_LOGICAL_LINE_SEPARATOR(C,STR) (((C) == '\n') || ((C) == ';'))
- +
- +
- +/** Output of Uninitialized Variables **/
- +
- +/*
- +A C statement (sans semicolon) to output to the stdio stream
- +STREAM the assembler definition of a common-label named
- +NAME whose size is SIZE bytes. The variable ROUNDED
- +is the size rounded up to whatever alignment the caller wants.
- +
- +Use the expression assemble_name(STREAM, NAME) to
- +output the name itself; before and after that, output the additional
- +assembler syntax for defining the name, and a newline.
- +
- +This macro controls how the assembler definitions of uninitialized
- +common global variables are output.
- +*/
- +/*
- +#define ASM_OUTPUT_COMMON(STREAM, NAME, SIZE, ROUNDED) \
- + avr32_asm_output_common(STREAM, NAME, SIZE, ROUNDED)
- +*/
- +
- +#define ASM_OUTPUT_COMMON(FILE, NAME, SIZE, ROUNDED) \
- + do \
- + { \
- + fputs ("\t.comm ", (FILE)); \
- + assemble_name ((FILE), (NAME)); \
- + fprintf ((FILE), ",%d\n", (SIZE)); \
- + } \
- + while (0)
- +
- +/*
- + * Like ASM_OUTPUT_BSS except takes the required alignment as a
- + * separate, explicit argument. If you define this macro, it is used
- + * in place of ASM_OUTPUT_BSS, and gives you more flexibility in
- + * handling the required alignment of the variable. The alignment is
- + * specified as the number of bits.
- + *
- + * Try to use function asm_output_aligned_bss defined in file varasm.c
- + * when defining this macro.
- + */
- +#define ASM_OUTPUT_ALIGNED_BSS(STREAM, DECL, NAME, SIZE, ALIGNMENT) \
- + asm_output_aligned_bss (STREAM, DECL, NAME, SIZE, ALIGNMENT)
- +
- +/*
- +A C statement (sans semicolon) to output to the stdio stream
- +STREAM the assembler definition of a local-common-label named
- +NAME whose size is SIZE bytes. The variable ROUNDED
- +is the size rounded up to whatever alignment the caller wants.
- +
- +Use the expression assemble_name(STREAM, NAME) to
- +output the name itself; before and after that, output the additional
- +assembler syntax for defining the name, and a newline.
- +
- +This macro controls how the assembler definitions of uninitialized
- +static variables are output.
- +*/
- +#define ASM_OUTPUT_LOCAL(FILE, NAME, SIZE, ROUNDED) \
- + do \
- + { \
- + fputs ("\t.lcomm ", (FILE)); \
- + assemble_name ((FILE), (NAME)); \
- + fprintf ((FILE), ",%d, %d\n", (SIZE), 2); \
- + } \
- + while (0)
- +
- +
- +/*
- +A C statement (sans semicolon) to output to the stdio stream
- +STREAM the assembler definition of a label named NAME.
- +Use the expression assemble_name(STREAM, NAME) to
- +output the name itself; before and after that, output the additional
- +assembler syntax for defining the name, and a newline.
- +*/
- +#define ASM_OUTPUT_LABEL(STREAM, NAME) avr32_asm_output_label(STREAM, NAME)
- +
- +/* A C string containing the appropriate assembler directive to
- + * specify the size of a symbol, without any arguments. On systems
- + * that use ELF, the default (in 'config/elfos.h') is '"\t.size\t"';
- + * on other systems, the default is not to define this macro.
- + *
- + * Define this macro only if it is correct to use the default
- + * definitions of ASM_ OUTPUT_SIZE_DIRECTIVE and
- + * ASM_OUTPUT_MEASURED_SIZE for your system. If you need your own
- + * custom definitions of those macros, or if you do not need explicit
- + * symbol sizes at all, do not define this macro.
- + */
- +#define SIZE_ASM_OP "\t.size\t"
- +
- +
- +/*
- +A C statement (sans semicolon) to output to the stdio stream
- +STREAM some commands that will make the label NAME global;
- +that is, available for reference from other files. Use the expression
- +assemble_name(STREAM, NAME) to output the name
- +itself; before and after that, output the additional assembler syntax
- +for making that name global, and a newline.
- +*/
- +#define GLOBAL_ASM_OP "\t.global\t"
- +
- +
- +
- +/*
- +A C expression which evaluates to true if the target supports weak symbols.
- +
- +If you don't define this macro, defaults.h provides a default
- +definition. If either ASM_WEAKEN_LABEL or ASM_WEAKEN_DECL
- +is defined, the default definition is '1'; otherwise, it is
- +'0'. Define this macro if you want to control weak symbol support
- +with a compiler flag such as -melf.
- +*/
- +#define SUPPORTS_WEAK 1
- +
- +/*
- +A C statement (sans semicolon) to output to the stdio stream
- +STREAM a reference in assembler syntax to a label named
- +NAME. This should add '_' to the front of the name, if that
- +is customary on your operating system, as it is in most Berkeley Unix
- +systems. This macro is used in assemble_name.
- +*/
- +#define ASM_OUTPUT_LABELREF(STREAM, NAME) \
- + avr32_asm_output_labelref(STREAM, NAME)
- +
- +
- +
- +/*
- +A C expression to assign to OUTVAR (which is a variable of type
- +char *) a newly allocated string made from the string
- +NAME and the number NUMBER, with some suitable punctuation
- +added. Use alloca to get space for the string.
- +
- +The string will be used as an argument to ASM_OUTPUT_LABELREF to
- +produce an assembler label for an internal static variable whose name is
- +NAME. Therefore, the string must be such as to result in valid
- +assembler code. The argument NUMBER is different each time this
- +macro is executed; it prevents conflicts between similarly-named
- +internal static variables in different scopes.
- +
- +Ideally this string should not be a valid C identifier, to prevent any
- +conflict with the user's own symbols. Most assemblers allow periods
- +or percent signs in assembler symbols; putting at least one of these
- +between the name and the number will suffice.
- +*/
- +#define ASM_FORMAT_PRIVATE_NAME(OUTVAR, NAME, NUMBER) \
- + do \
- + { \
- + (OUTVAR) = (char *) alloca (strlen ((NAME)) + 10); \
- + sprintf ((OUTVAR), "%s.%d", (NAME), (NUMBER)); \
- + } \
- + while (0)
- +
- +
- +/** Macros Controlling Initialization Routines **/
- +
- +
- +/*
- +If defined, main will not call __main as described above.
- +This macro should be defined for systems that control start-up code
- +on a symbol-by-symbol basis, such as OSF/1, and should not
- +be defined explicitly for systems that support INIT_SECTION_ASM_OP.
- +*/
- +/*
- + __main is not defined when debugging.
- +*/
- +#define HAS_INIT_SECTION
- +
- +
- +/** Output of Assembler Instructions **/
- +
- +/*
- +A C initializer containing the assembler's names for the machine
- +registers, each one as a C string constant. This is what translates
- +register numbers in the compiler into assembler language.
- +*/
- +
- +#define REGISTER_NAMES \
- +{ \
- + "pc", "lr", \
- + "sp", "r12", \
- + "r11", "r10", \
- + "r9", "r8", \
- + "r7", "r6", \
- + "r5", "r4", \
- + "r3", "r2", \
- + "r1", "r0", \
- +}
- +
- +/*
- +A C compound statement to output to stdio stream STREAM the
- +assembler syntax for an instruction operand X. X is an
- +RTL expression.
- +
- +CODE is a value that can be used to specify one of several ways
- +of printing the operand. It is used when identical operands must be
- +printed differently depending on the context. CODE comes from
- +the '%' specification that was used to request printing of the
- +operand. If the specification was just '%digit' then
- +CODE is 0; if the specification was '%ltr digit'
- +then CODE is the ASCII code for ltr.
- +
- +If X is a register, this macro should print the register's name.
- +The names can be found in an array reg_names whose type is
- +char *[]. reg_names is initialized from REGISTER_NAMES.
- +
- +When the machine description has a specification '%punct'
- +(a '%' followed by a punctuation character), this macro is called
- +with a null pointer for X and the punctuation character for
- +CODE.
- +*/
- +#define PRINT_OPERAND(STREAM, X, CODE) avr32_print_operand(STREAM, X, CODE)
- +
- +/* A C statement to be executed just prior to the output of
- + assembler code for INSN, to modify the extracted operands so
- + they will be output differently.
- +
- + Here the argument OPVEC is the vector containing the operands
- + extracted from INSN, and NOPERANDS is the number of elements of
- + the vector which contain meaningful data for this insn.
- + The contents of this vector are what will be used to convert the insn
- + template into assembler code, so you can change the assembler output
- + by changing the contents of the vector. */
- +#define FINAL_PRESCAN_INSN(INSN, OPVEC, NOPERANDS) \
- + avr32_final_prescan_insn ((INSN), (OPVEC), (NOPERANDS))
- +
- +/*
- +A C expression which evaluates to true if CODE is a valid
- +punctuation character for use in the PRINT_OPERAND macro. If
- +PRINT_OPERAND_PUNCT_VALID_P is not defined, it means that no
- +punctuation characters (except for the standard one, '%') are used
- +in this way.
- +*/
- +#define PRINT_OPERAND_PUNCT_VALID_P(CODE) \
- + (((CODE) == '?') \
- + || ((CODE) == '!'))
- +
- +/*
- +A C compound statement to output to stdio stream STREAM the
- +assembler syntax for an instruction operand that is a memory reference
- +whose address is X. X is an RTL expression.
- +
- +On some machines, the syntax for a symbolic address depends on the
- +section that the address refers to. On these machines, define the macro
- +ENCODE_SECTION_INFO to store the information into the
- +symbol_ref, and then check for it here. (see Assembler Format.)
- +*/
- +#define PRINT_OPERAND_ADDRESS(STREAM, X) avr32_print_operand_address(STREAM, X)
- +
- +
- +/** Output of Dispatch Tables **/
- +
- +/*
- + * A C statement to output to the stdio stream stream an assembler
- + * pseudo-instruction to generate a difference between two
- + * labels. value and rel are the numbers of two internal labels. The
- + * definitions of these labels are output using
- + * (*targetm.asm_out.internal_label), and they must be printed in the
- + * same way here. For example,
- + *
- + * fprintf (stream, "\t.word L%d-L%d\n",
- + * value, rel)
- + *
- + * You must provide this macro on machines where the addresses in a
- + * dispatch table are relative to the table's own address. If defined,
- + * GCC will also use this macro on all machines when producing
- + * PIC. body is the body of the ADDR_DIFF_VEC; it is provided so that
- + * the mode and flags can be read.
- + */
- +#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL) \
- + fprintf(STREAM, "\tbral\t%sL%d\n", LOCAL_LABEL_PREFIX, VALUE)
- +
- +/*
- +This macro should be provided on machines where the addresses
- +in a dispatch table are absolute.
- +
- +The definition should be a C statement to output to the stdio stream
- +STREAM an assembler pseudo-instruction to generate a reference to
- +a label. VALUE is the number of an internal label whose
- +definition is output using ASM_OUTPUT_INTERNAL_LABEL.
- +For example,
- +
- +fprintf(STREAM, "\t.word L%d\n", VALUE)
- +*/
- +
- +#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM, VALUE) \
- + fprintf(STREAM, "\t.long %sL%d\n", LOCAL_LABEL_PREFIX, VALUE)
- +
- +/** Assembler Commands for Exception Regions */
- +
- +/* ToDo: All of this subsection */
- +
- +/** Assembler Commands for Alignment */
- +
- +
- +/*
- +A C statement to output to the stdio stream STREAM an assembler
- +command to advance the location counter to a multiple of 2 to the
- +POWER bytes. POWER will be a C expression of type int.
- +*/
- +#define ASM_OUTPUT_ALIGN(STREAM, POWER) \
- + do \
- + { \
- + if ((POWER) != 0) \
- + fprintf(STREAM, "\t.align\t%d\n", POWER); \
- + } \
- + while (0)
- +
- +/*
- +Like ASM_OUTPUT_ALIGN, except that the \nop" instruction is used for padding, if
- +necessary.
- +*/
- +#define ASM_OUTPUT_ALIGN_WITH_NOP(STREAM, POWER) \
- + fprintf(STREAM, "\t.balignw\t%d, 0xd703\n", (1 << POWER))
- +
- +
- +
- +/******************************************************************************
- + * Controlling Debugging Information Format
- + *****************************************************************************/
- +
- +/* How to renumber registers for dbx and gdb. */
- +#define DBX_REGISTER_NUMBER(REGNO) ASM_REGNUM (REGNO)
- +
- +/* The DWARF 2 CFA column which tracks the return address. */
- +#define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM(LR_REGNUM)
- +
- +/*
- +Define this macro if GCC should produce dwarf version 2 format
- +debugging output in response to the -g option.
- +
- +To support optional call frame debugging information, you must also
- +define INCOMING_RETURN_ADDR_RTX and either set
- +RTX_FRAME_RELATED_P on the prologue insns if you use RTL for the
- +prologue, or call dwarf2out_def_cfa and dwarf2out_reg_save
- +as appropriate from TARGET_ASM_FUNCTION_PROLOGUE if you don't.
- +*/
- +#define DWARF2_DEBUGGING_INFO 1
- +
- +
- +#define DWARF2_ASM_LINE_DEBUG_INFO 1
- +#define DWARF2_FRAME_INFO 1
- +
- +
- +/******************************************************************************
- + * Miscellaneous Parameters
- + *****************************************************************************/
- +
- +/* ToDo: a lot */
- +
- +/*
- +An alias for a machine mode name. This is the machine mode that
- +elements of a jump-table should have.
- +*/
- +#define CASE_VECTOR_MODE SImode
- +
- +/*
- +Define this macro to be a C expression to indicate when jump-tables
- +should contain relative addresses. If jump-tables never contain
- +relative addresses, then you need not define this macro.
- +*/
- +#define CASE_VECTOR_PC_RELATIVE 0
- +
- +/* Increase the threshold for using table jumps on the UC arch. */
- +#define CASE_VALUES_THRESHOLD (TARGET_BRANCH_PRED ? 4 : 7)
- +
- +/*
- +The maximum number of bytes that a single instruction can move quickly
- +between memory and registers or between two memory locations.
- +*/
- +#define MOVE_MAX (2*UNITS_PER_WORD)
- +
- +
- +/* A C expression that is nonzero if on this machine the number of bits actually used
- + for the count of a shift operation is equal to the number of bits needed to represent
- + the size of the object being shifted. When this macro is nonzero, the compiler will
- + assume that it is safe to omit a sign-extend, zero-extend, and certain bitwise 'and'
- + instructions that truncates the count of a shift operation. On machines that have
- + instructions that act on bit-fields at variable positions, which may include 'bit test'
- + 378 GNU Compiler Collection (GCC) Internals
- + instructions, a nonzero SHIFT_COUNT_TRUNCATED also enables deletion of truncations
- + of the values that serve as arguments to bit-field instructions.
- + If both types of instructions truncate the count (for shifts) and position (for bit-field
- + operations), or if no variable-position bit-field instructions exist, you should define
- + this macro.
- + However, on some machines, such as the 80386 and the 680x0, truncation only applies
- + to shift operations and not the (real or pretended) bit-field operations. Define SHIFT_
- + COUNT_TRUNCATED to be zero on such machines. Instead, add patterns to the 'md' file
- + that include the implied truncation of the shift instructions.
- + You need not dene this macro if it would always have the value of zero. */
- +#define SHIFT_COUNT_TRUNCATED 1
- +
- +/*
- +A C expression which is nonzero if on this machine it is safe to
- +convert an integer of INPREC bits to one of OUTPREC
- +bits (where OUTPREC is smaller than INPREC) by merely
- +operating on it as if it had only OUTPREC bits.
- +
- +On many machines, this expression can be 1.
- +
- +When TRULY_NOOP_TRUNCATION returns 1 for a pair of sizes for
- +modes for which MODES_TIEABLE_P is 0, suboptimal code can result.
- +If this is the case, making TRULY_NOOP_TRUNCATION return 0 in
- +such cases may improve things.
- +*/
- +#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
- +
- +/*
- +An alias for the machine mode for pointers. On most machines, define
- +this to be the integer mode corresponding to the width of a hardware
- +pointer; SImode on 32-bit machine or DImode on 64-bit machines.
- +On some machines you must define this to be one of the partial integer
- +modes, such as PSImode.
- +
- +The width of Pmode must be at least as large as the value of
- +POINTER_SIZE. If it is not equal, you must define the macro
- +POINTERS_EXTEND_UNSIGNED to specify how pointers are extended
- +to Pmode.
- +*/
- +#define Pmode SImode
- +
- +/*
- +An alias for the machine mode used for memory references to functions
- +being called, in call RTL expressions. On most machines this
- +should be QImode.
- +*/
- +#define FUNCTION_MODE SImode
- +
- +
- +#define REG_S_P(x) \
- + (REG_P (x) || (GET_CODE (x) == SUBREG && REG_P (XEXP (x, 0))))
- +
- +
- +/* If defined, modifies the length assigned to instruction INSN as a
- + function of the context in which it is used. LENGTH is an lvalue
- + that contains the initially computed length of the insn and should
- + be updated with the correct length of the insn. */
- +#define ADJUST_INSN_LENGTH(INSN, LENGTH) \
- + ((LENGTH) = avr32_adjust_insn_length ((INSN), (LENGTH)))
- +
- +
- +#define CLZ_DEFINED_VALUE_AT_ZERO(mode, value) \
- + (value = 32, (mode == SImode))
- +
- +#define CTZ_DEFINED_VALUE_AT_ZERO(mode, value) \
- + (value = 32, (mode == SImode))
- +
- +#define UNITS_PER_SIMD_WORD(mode) UNITS_PER_WORD
- +
- +#define STORE_FLAG_VALUE 1
- +
- +
- +/* IF-conversion macros. */
- +#define IFCVT_MODIFY_INSN( CE_INFO, PATTERN, INSN ) \
- + { \
- + (PATTERN) = avr32_ifcvt_modify_insn (CE_INFO, PATTERN, INSN, &num_true_changes); \
- + }
- +
- +#define IFCVT_EXTRA_FIELDS \
- + int num_cond_clobber_insns; \
- + int num_extra_move_insns; \
- + rtx extra_move_insns[MAX_CONDITIONAL_EXECUTE]; \
- + rtx moved_insns[MAX_CONDITIONAL_EXECUTE];
- +
- +#define IFCVT_INIT_EXTRA_FIELDS( CE_INFO ) \
- + { \
- + (CE_INFO)->num_cond_clobber_insns = 0; \
- + (CE_INFO)->num_extra_move_insns = 0; \
- + }
- +
- +
- +#define IFCVT_MODIFY_CANCEL( CE_INFO ) avr32_ifcvt_modify_cancel (CE_INFO, &num_true_changes)
- +
- +#define IFCVT_ALLOW_MODIFY_TEST_IN_INSN 1
- +#define IFCVT_COND_EXEC_BEFORE_RELOAD (TARGET_COND_EXEC_BEFORE_RELOAD)
- +
- +enum avr32_builtins
- +{
- + AVR32_BUILTIN_MTSR,
- + AVR32_BUILTIN_MFSR,
- + AVR32_BUILTIN_MTDR,
- + AVR32_BUILTIN_MFDR,
- + AVR32_BUILTIN_CACHE,
- + AVR32_BUILTIN_SYNC,
- + AVR32_BUILTIN_SSRF,
- + AVR32_BUILTIN_CSRF,
- + AVR32_BUILTIN_TLBR,
- + AVR32_BUILTIN_TLBS,
- + AVR32_BUILTIN_TLBW,
- + AVR32_BUILTIN_BREAKPOINT,
- + AVR32_BUILTIN_XCHG,
- + AVR32_BUILTIN_LDXI,
- + AVR32_BUILTIN_BSWAP16,
- + AVR32_BUILTIN_BSWAP32,
- + AVR32_BUILTIN_COP,
- + AVR32_BUILTIN_MVCR_W,
- + AVR32_BUILTIN_MVRC_W,
- + AVR32_BUILTIN_MVCR_D,
- + AVR32_BUILTIN_MVRC_D,
- + AVR32_BUILTIN_MULSATHH_H,
- + AVR32_BUILTIN_MULSATHH_W,
- + AVR32_BUILTIN_MULSATRNDHH_H,
- + AVR32_BUILTIN_MULSATRNDWH_W,
- + AVR32_BUILTIN_MULSATWH_W,
- + AVR32_BUILTIN_MACSATHH_W,
- + AVR32_BUILTIN_SATADD_H,
- + AVR32_BUILTIN_SATSUB_H,
- + AVR32_BUILTIN_SATADD_W,
- + AVR32_BUILTIN_SATSUB_W,
- + AVR32_BUILTIN_MULWH_D,
- + AVR32_BUILTIN_MULNWH_D,
- + AVR32_BUILTIN_MACWH_D,
- + AVR32_BUILTIN_MACHH_D,
- + AVR32_BUILTIN_MUSFR,
- + AVR32_BUILTIN_MUSTR,
- + AVR32_BUILTIN_SATS,
- + AVR32_BUILTIN_SATU,
- + AVR32_BUILTIN_SATRNDS,
- + AVR32_BUILTIN_SATRNDU,
- + AVR32_BUILTIN_MEMS,
- + AVR32_BUILTIN_MEMC,
- + AVR32_BUILTIN_MEMT,
- + AVR32_BUILTIN_SLEEP,
- + AVR32_BUILTIN_DELAY_CYCLES
- +};
- +
- +
- +#define FLOAT_LIB_COMPARE_RETURNS_BOOL(MODE, COMPARISON) \
- + ((MODE == SFmode) || (MODE == DFmode))
- +
- +#define RENAME_LIBRARY_SET ".set"
- +
- +/* Make ABI_NAME an alias for __GCC_NAME. */
- +#define RENAME_LIBRARY(GCC_NAME, ABI_NAME) \
- + __asm__ (".globl\t__avr32_" #ABI_NAME "\n" \
- + ".set\t__avr32_" #ABI_NAME \
- + ", __" #GCC_NAME "\n");
- +
- +/* Give libgcc functions avr32 ABI name. */
- +#ifdef L_muldi3
- +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (muldi3, mul64)
- +#endif
- +#ifdef L_divdi3
- +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (divdi3, sdiv64)
- +#endif
- +#ifdef L_udivdi3
- +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (udivdi3, udiv64)
- +#endif
- +#ifdef L_moddi3
- +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (moddi3, smod64)
- +#endif
- +#ifdef L_umoddi3
- +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (umoddi3, umod64)
- +#endif
- +#ifdef L_ashldi3
- +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (ashldi3, lsl64)
- +#endif
- +#ifdef L_lshrdi3
- +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (lshrdi3, lsr64)
- +#endif
- +#ifdef L_ashrdi3
- +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (ashrdi3, asr64)
- +#endif
- +
- +#ifdef L_fixsfdi
- +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixsfdi, f32_to_s64)
- +#endif
- +#ifdef L_fixunssfdi
- +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixunssfdi, f32_to_u64)
- +#endif
- +#ifdef L_floatdidf
- +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (floatdidf, s64_to_f64)
- +#endif
- +#ifdef L_floatdisf
- +#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (floatdisf, s64_to_f32)
- +#endif
- +
- +#endif
- --- /dev/null
- +++ b/gcc/config/avr32/avr32.md
- @@ -0,0 +1,5198 @@
- +;; AVR32 machine description file.
- +;; Copyright 2003,2004,2005,2006,2007,2008,2009 Atmel Corporation.
- +;;
- +;; This file is part of GCC.
- +;;
- +;; This program is free software; you can redistribute it and/or modify
- +;; it under the terms of the GNU General Public License as published by
- +;; the Free Software Foundation; either version 2 of the License, or
- +;; (at your option) any later version.
- +;;
- +;; This program is distributed in the hope that it will be useful,
- +;; but WITHOUT ANY WARRANTY; without even the implied warranty of
- +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- +;; GNU General Public License for more details.
- +;;
- +;; You should have received a copy of the GNU General Public License
- +;; along with this program; if not, write to the Free Software
- +;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- +
- +;; -*- Mode: Scheme -*-
- +
- +(define_attr "type" "alu,alu2,alu_sat,mulhh,mulwh,mulww_w,mulww_d,div,machh_w,macww_w,macww_d,branch,call,load,load_rm,store,load2,load4,store2,store4,fmul,fcmps,fcmpd,fcast,fmv,fmvcpu,fldd,fstd,flds,fsts,fstm"
- + (const_string "alu"))
- +
- +
- +(define_attr "cc" "none,set_vncz,set_ncz,set_cz,set_z,set_z_if_not_v2,bld,compare,cmp_cond_insn,clobber,call_set,fpcompare,from_fpcc"
- + (const_string "none"))
- +
- +
- +; NB! Keep this in sync with enum architecture_type in avr32.h
- +(define_attr "pipeline" "ap,ucr1,ucr2,ucr2nomul,ucr3,ucr3fp"
- + (const (symbol_ref "avr32_arch->arch_type")))
- +
- +; Insn length in bytes
- +(define_attr "length" ""
- + (const_int 4))
- +
- +; Signal if an insn is predicable and hence can be conditionally executed.
- +(define_attr "predicable" "no,yes" (const_string "no"))
- +
- +;; Uses of UNSPEC in this file:
- +(define_constants
- + [(UNSPEC_PUSHM 0)
- + (UNSPEC_POPM 1)
- + (UNSPEC_UDIVMODSI4_INTERNAL 2)
- + (UNSPEC_DIVMODSI4_INTERNAL 3)
- + (UNSPEC_STM 4)
- + (UNSPEC_LDM 5)
- + (UNSPEC_MOVSICC 6)
- + (UNSPEC_ADDSICC 7)
- + (UNSPEC_COND_MI 8)
- + (UNSPEC_COND_PL 9)
- + (UNSPEC_PIC_SYM 10)
- + (UNSPEC_PIC_BASE 11)
- + (UNSPEC_STORE_MULTIPLE 12)
- + (UNSPEC_STMFP 13)
- + (UNSPEC_FRCPA 14)
- + (UNSPEC_REG_TO_CC 15)
- + (UNSPEC_FORCE_MINIPOOL 16)
- + (UNSPEC_SATS 17)
- + (UNSPEC_SATU 18)
- + (UNSPEC_SATRNDS 19)
- + (UNSPEC_SATRNDU 20)
- + ])
- +
- +(define_constants
- + [(VUNSPEC_EPILOGUE 0)
- + (VUNSPEC_CACHE 1)
- + (VUNSPEC_MTSR 2)
- + (VUNSPEC_MFSR 3)
- + (VUNSPEC_BLOCKAGE 4)
- + (VUNSPEC_SYNC 5)
- + (VUNSPEC_TLBR 6)
- + (VUNSPEC_TLBW 7)
- + (VUNSPEC_TLBS 8)
- + (VUNSPEC_BREAKPOINT 9)
- + (VUNSPEC_MTDR 10)
- + (VUNSPEC_MFDR 11)
- + (VUNSPEC_MVCR 12)
- + (VUNSPEC_MVRC 13)
- + (VUNSPEC_COP 14)
- + (VUNSPEC_ALIGN 15)
- + (VUNSPEC_POOL_START 16)
- + (VUNSPEC_POOL_END 17)
- + (VUNSPEC_POOL_4 18)
- + (VUNSPEC_POOL_8 19)
- + (VUNSPEC_POOL_16 20)
- + (VUNSPEC_MUSFR 21)
- + (VUNSPEC_MUSTR 22)
- + (VUNSPEC_SYNC_CMPXCHG 23)
- + (VUNSPEC_SYNC_SET_LOCK_AND_LOAD 24)
- + (VUNSPEC_SYNC_STORE_IF_LOCK 25)
- + (VUNSPEC_EH_RETURN 26)
- + (VUNSPEC_FRS 27)
- + (VUNSPEC_CSRF 28)
- + (VUNSPEC_SSRF 29)
- + (VUNSPEC_SLEEP 30)
- + (VUNSPEC_DELAY_CYCLES 31)
- + (VUNSPEC_DELAY_CYCLES_1 32)
- + (VUNSPEC_DELAY_CYCLES_2 33)
- + (VUNSPEC_NOP 34)
- + (VUNSPEC_NOP3 35)
- + ])
- +
- +(define_constants
- + [
- + ;; R7 = 15-7 = 8
- + (FP_REGNUM 8)
- + ;; Return Register = R12 = 15 - 12 = 3
- + (RETVAL_REGNUM 3)
- + ;; SP = R13 = 15 - 13 = 2
- + (SP_REGNUM 2)
- + ;; LR = R14 = 15 - 14 = 1
- + (LR_REGNUM 1)
- + ;; PC = R15 = 15 - 15 = 0
- + (PC_REGNUM 0)
- + ;; FPSR = GENERAL_REGS + 1 = 17
- + (FPCC_REGNUM 17)
- + ])
- +
- +
- +
- +
- +;;******************************************************************************
- +;; Macros
- +;;******************************************************************************
- +
- +;; Integer Modes for basic alu insns
- +(define_mode_iterator INTM [SI HI QI])
- +(define_mode_attr alu_cc_attr [(SI "set_vncz") (HI "clobber") (QI "clobber")])
- +
- +;; Move word modes
- +(define_mode_iterator MOVM [SI V2HI V4QI])
- +
- +;; For mov/addcc insns
- +(define_mode_iterator ADDCC [SI HI QI])
- +(define_mode_iterator MOVCC [SF SI HI QI])
- +(define_mode_iterator CMP [DI SI HI QI])
- +(define_mode_attr store_postfix [(SF ".w") (SI ".w") (HI ".h") (QI ".b")])
- +(define_mode_attr load_postfix [(SF ".w") (SI ".w") (HI ".sh") (QI ".ub")])
- +(define_mode_attr load_postfix_s [(SI ".w") (HI ".sh") (QI ".sb")])
- +(define_mode_attr load_postfix_u [(SI ".w") (HI ".uh") (QI ".ub")])
- +(define_mode_attr pred_mem_constraint [(SF "RKu11") (SI "RKu11") (HI "RKu10") (QI "RKu09")])
- +(define_mode_attr cmp_constraint [(DI "rKu20") (SI "rKs21") (HI "r") (QI "r")])
- +(define_mode_attr cmp_predicate [(DI "register_immediate_operand")
- + (SI "register_const_int_operand")
- + (HI "register_operand")
- + (QI "register_operand")])
- +(define_mode_attr cmp_length [(DI "6")
- + (SI "4")
- + (HI "4")
- + (QI "4")])
- +
- +;; For all conditional insns
- +(define_code_iterator any_cond_b [ge lt geu ltu])
- +(define_code_iterator any_cond [gt ge lt le gtu geu ltu leu])
- +(define_code_iterator any_cond4 [gt le gtu leu])
- +(define_code_attr cond [(eq "eq") (ne "ne") (gt "gt") (ge "ge") (lt "lt") (le "le")
- + (gtu "hi") (geu "hs") (ltu "lo") (leu "ls")])
- +(define_code_attr invcond [(eq "ne") (ne "eq") (gt "le") (ge "lt") (lt "ge") (le "gt")
- + (gtu "ls") (geu "lo") (ltu "hs") (leu "hi")])
- +
- +;; For logical operations
- +(define_code_iterator logical [and ior xor])
- +(define_code_attr logical_insn [(and "and") (ior "or") (xor "eor")])
- +
- +;; Predicable operations with three register operands
- +(define_code_iterator predicable_op3 [and ior xor plus minus])
- +(define_code_attr predicable_insn3 [(and "and") (ior "or") (xor "eor") (plus "add") (minus "sub")])
- +(define_code_attr predicable_commutative3 [(and "%") (ior "%") (xor "%") (plus "%") (minus "")])
- +
- +;; Load the predicates
- +(include "predicates.md")
- +
- +
- +;;******************************************************************************
- +;; Automaton pipeline description for avr32
- +;;******************************************************************************
- +
- +(define_automaton "avr32_ap")
- +
- +
- +(define_cpu_unit "is" "avr32_ap")
- +(define_cpu_unit "a1,m1,da" "avr32_ap")
- +(define_cpu_unit "a2,m2,d" "avr32_ap")
- +
- +;;Alu instructions
- +(define_insn_reservation "alu_op" 1
- + (and (eq_attr "pipeline" "ap")
- + (eq_attr "type" "alu"))
- + "is,a1,a2")
- +
- +(define_insn_reservation "alu2_op" 2
- + (and (eq_attr "pipeline" "ap")
- + (eq_attr "type" "alu2"))
- + "is,is+a1,a1+a2,a2")
- +
- +(define_insn_reservation "alu_sat_op" 2
- + (and (eq_attr "pipeline" "ap")
- + (eq_attr "type" "alu_sat"))
- + "is,a1,a2")
- +
- +
- +;;Mul instructions
- +(define_insn_reservation "mulhh_op" 2
- + (and (eq_attr "pipeline" "ap")
- + (eq_attr "type" "mulhh,mulwh"))
- + "is,m1,m2")
- +
- +(define_insn_reservation "mulww_w_op" 3
- + (and (eq_attr "pipeline" "ap")
- + (eq_attr "type" "mulww_w"))
- + "is,m1,m1+m2,m2")
- +
- +(define_insn_reservation "mulww_d_op" 5
- + (and (eq_attr "pipeline" "ap")
- + (eq_attr "type" "mulww_d"))
- + "is,m1,m1+m2,m1+m2,m2,m2")
- +
- +(define_insn_reservation "div_op" 33
- + (and (eq_attr "pipeline" "ap")
- + (eq_attr "type" "div"))
- + "is,m1,m1*31 + m2*31,m2")
- +
- +(define_insn_reservation "machh_w_op" 3
- + (and (eq_attr "pipeline" "ap")
- + (eq_attr "type" "machh_w"))
- + "is*2,m1,m2")
- +
- +
- +(define_insn_reservation "macww_w_op" 4
- + (and (eq_attr "pipeline" "ap")
- + (eq_attr "type" "macww_w"))
- + "is*2,m1,m1,m2")
- +
- +
- +(define_insn_reservation "macww_d_op" 6
- + (and (eq_attr "pipeline" "ap")
- + (eq_attr "type" "macww_d"))
- + "is*2,m1,m1+m2,m1+m2,m2")
- +
- +;;Bypasses for Mac instructions, because of accumulator cache.
- +;;Set latency as low as possible in order to let the compiler let
- +;;mul -> mac and mac -> mac combinations which use the same
- +;;accumulator cache be placed close together to avoid any
- +;;instructions which can ruin the accumulator cache come inbetween.
- +(define_bypass 4 "machh_w_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass")
- +(define_bypass 5 "macww_w_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass")
- +(define_bypass 7 "macww_d_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass")
- +
- +(define_bypass 3 "mulhh_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass")
- +(define_bypass 4 "mulww_w_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass")
- +(define_bypass 6 "mulww_d_op" "alu_op,alu2_op,alu_sat_op,load_op" "avr32_mul_waw_bypass")
- +
- +
- +;;Bypasses for all mul/mac instructions followed by an instruction
- +;;which reads the output AND writes the result to the same register.
- +;;This will generate an Write After Write hazard which gives an
- +;;extra cycle before the result is ready.
- +(define_bypass 0 "machh_w_op" "machh_w_op" "avr32_valid_macmac_bypass")
- +(define_bypass 0 "macww_w_op" "macww_w_op" "avr32_valid_macmac_bypass")
- +(define_bypass 0 "macww_d_op" "macww_d_op" "avr32_valid_macmac_bypass")
- +
- +(define_bypass 0 "mulhh_op" "machh_w_op" "avr32_valid_mulmac_bypass")
- +(define_bypass 0 "mulww_w_op" "macww_w_op" "avr32_valid_mulmac_bypass")
- +(define_bypass 0 "mulww_d_op" "macww_d_op" "avr32_valid_mulmac_bypass")
- +
- +;;Branch and call instructions
- +;;We assume that all branches and rcalls are predicted correctly :-)
- +;;while calls use a lot of cycles.
- +(define_insn_reservation "branch_op" 0
- + (and (eq_attr "pipeline" "ap")
- + (eq_attr "type" "branch"))
- + "nothing")
- +
- +(define_insn_reservation "call_op" 10
- + (and (eq_attr "pipeline" "ap")
- + (eq_attr "type" "call"))
- + "nothing")
- +
- +
- +;;Load store instructions
- +(define_insn_reservation "load_op" 2
- + (and (eq_attr "pipeline" "ap")
- + (eq_attr "type" "load"))
- + "is,da,d")
- +
- +(define_insn_reservation "load_rm_op" 3
- + (and (eq_attr "pipeline" "ap")
- + (eq_attr "type" "load_rm"))
- + "is,da,d")
- +
- +
- +(define_insn_reservation "store_op" 0
- + (and (eq_attr "pipeline" "ap")
- + (eq_attr "type" "store"))
- + "is,da,d")
- +
- +
- +(define_insn_reservation "load_double_op" 3
- + (and (eq_attr "pipeline" "ap")
- + (eq_attr "type" "load2"))
- + "is,da,da+d,d")
- +
- +(define_insn_reservation "load_quad_op" 4
- + (and (eq_attr "pipeline" "ap")
- + (eq_attr "type" "load4"))
- + "is,da,da+d,da+d,d")
- +
- +(define_insn_reservation "store_double_op" 0
- + (and (eq_attr "pipeline" "ap")
- + (eq_attr "type" "store2"))
- + "is,da,da+d,d")
- +
- +
- +(define_insn_reservation "store_quad_op" 0
- + (and (eq_attr "pipeline" "ap")
- + (eq_attr "type" "store4"))
- + "is,da,da+d,da+d,d")
- +
- +;;For store the operand to write to memory is read in d and
- +;;the real latency between any instruction and a store is therefore
- +;;one less than for the instructions which reads the operands in the first
- +;;excecution stage
- +(define_bypass 2 "load_double_op" "store_double_op" "avr32_store_bypass")
- +(define_bypass 3 "load_quad_op" "store_quad_op" "avr32_store_bypass")
- +(define_bypass 1 "load_op" "store_op" "avr32_store_bypass")
- +(define_bypass 2 "load_rm_op" "store_op" "avr32_store_bypass")
- +(define_bypass 1 "alu_sat_op" "store_op" "avr32_store_bypass")
- +(define_bypass 1 "alu2_op" "store_op" "avr32_store_bypass")
- +(define_bypass 1 "mulhh_op" "store_op" "avr32_store_bypass")
- +(define_bypass 2 "mulww_w_op" "store_op" "avr32_store_bypass")
- +(define_bypass 4 "mulww_d_op" "store_op" "avr32_store_bypass" )
- +(define_bypass 2 "machh_w_op" "store_op" "avr32_store_bypass")
- +(define_bypass 3 "macww_w_op" "store_op" "avr32_store_bypass")
- +(define_bypass 5 "macww_d_op" "store_op" "avr32_store_bypass")
- +
- +
- +; Bypass for load double operation. If only the first loaded word is needed
- +; then the latency is 2
- +(define_bypass 2 "load_double_op"
- + "load_op,load_rm_op,alu_sat_op, alu2_op, alu_op, mulhh_op, mulww_w_op,
- + mulww_d_op, machh_w_op, macww_w_op, macww_d_op"
- + "avr32_valid_load_double_bypass")
- +
- +; Bypass for load quad operation. If only the first or second loaded word is needed
- +; we set the latency to 2
- +(define_bypass 2 "load_quad_op"
- + "load_op,load_rm_op,alu_sat_op, alu2_op, alu_op, mulhh_op, mulww_w_op,
- + mulww_d_op, machh_w_op, macww_w_op, macww_d_op"
- + "avr32_valid_load_quad_bypass")
- +
- +
- +;;******************************************************************************
- +;; End of Automaton pipeline description for avr32
- +;;******************************************************************************
- +
- +(define_cond_exec
- + [(match_operator 0 "avr32_comparison_operator"
- + [(match_operand:CMP 1 "register_operand" "r")
- + (match_operand:CMP 2 "<CMP:cmp_predicate>" "<CMP:cmp_constraint>")])]
- + "TARGET_V2_INSNS"
- + "%!"
- +)
- +
- +(define_cond_exec
- + [(match_operator 0 "avr32_comparison_operator"
- + [(and:SI (match_operand:SI 1 "register_operand" "r")
- + (match_operand:SI 2 "one_bit_set_operand" "i"))
- + (const_int 0)])]
- + "TARGET_V2_INSNS"
- + "%!"
- + )
- +
- +;;=============================================================================
- +;; move
- +;;-----------------------------------------------------------------------------
- +
- +
- +;;== char - 8 bits ============================================================
- +(define_expand "movqi"
- + [(set (match_operand:QI 0 "nonimmediate_operand" "")
- + (match_operand:QI 1 "general_operand" ""))]
- + ""
- + {
- + if ( can_create_pseudo_p () ){
- + if (GET_CODE (operands[1]) == MEM && optimize){
- + rtx reg = gen_reg_rtx (SImode);
- +
- + emit_insn (gen_zero_extendqisi2 (reg, operands[1]));
- + operands[1] = gen_lowpart (QImode, reg);
- + }
- +
- + /* One of the ops has to be in a register. */
- + if (GET_CODE (operands[0]) == MEM)
- + operands[1] = force_reg (QImode, operands[1]);
- + }
- +
- + })
- +
- +(define_insn "*movqi_internal"
- + [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,m,r")
- + (match_operand:QI 1 "general_operand" "rKs08,m,r,i"))]
- + "register_operand (operands[0], QImode)
- + || register_operand (operands[1], QImode)"
- + "@
- + mov\t%0, %1
- + ld.ub\t%0, %1
- + st.b\t%0, %1
- + mov\t%0, %1"
- + [(set_attr "length" "2,4,4,4")
- + (set_attr "type" "alu,load_rm,store,alu")])
- +
- +
- +
- +;;== short - 16 bits ==========================================================
- +(define_expand "movhi"
- + [(set (match_operand:HI 0 "nonimmediate_operand" "")
- + (match_operand:HI 1 "general_operand" ""))]
- + ""
- + {
- + if ( can_create_pseudo_p () ){
- + if (GET_CODE (operands[1]) == MEM && optimize){
- + rtx reg = gen_reg_rtx (SImode);
- +
- + emit_insn (gen_extendhisi2 (reg, operands[1]));
- + operands[1] = gen_lowpart (HImode, reg);
- + }
- +
- + /* One of the ops has to be in a register. */
- + if (GET_CODE (operands[0]) == MEM)
- + operands[1] = force_reg (HImode, operands[1]);
- + }
- +
- + })
- +
- +
- +(define_insn "*movhi_internal"
- + [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,m,r")
- + (match_operand:HI 1 "general_operand" "rKs08,m,r,i"))]
- + "register_operand (operands[0], HImode)
- + || register_operand (operands[1], HImode)"
- + "@
- + mov\t%0, %1
- + ld.sh\t%0, %1
- + st.h\t%0, %1
- + mov\t%0, %1"
- + [(set_attr "length" "2,4,4,4")
- + (set_attr "type" "alu,load_rm,store,alu")])
- +
- +
- +;;== int - 32 bits ============================================================
- +
- +(define_expand "movmisalignsi"
- + [(set (match_operand:SI 0 "nonimmediate_operand" "")
- + (match_operand:SI 1 "nonimmediate_operand" ""))]
- + "TARGET_UNALIGNED_WORD"
- + {
- + }
- +)
- +
- +(define_expand "mov<mode>"
- + [(set (match_operand:MOVM 0 "avr32_non_rmw_nonimmediate_operand" "")
- + (match_operand:MOVM 1 "avr32_non_rmw_general_operand" ""))]
- + ""
- + {
- +
- + /* One of the ops has to be in a register. */
- + if (GET_CODE (operands[0]) == MEM)
- + operands[1] = force_reg (<MODE>mode, operands[1]);
- +
- + /* Check for out of range immediate constants as these may
- + occur during reloading, since it seems like reload does
- + not check if the immediate is legitimate. Don't know if
- + this is a bug? */
- + if ( reload_in_progress
- + && avr32_imm_in_const_pool
- + && GET_CODE(operands[1]) == CONST_INT
- + && !avr32_const_ok_for_constraint_p(INTVAL(operands[1]), 'K', "Ks21") ){
- + operands[1] = force_const_mem(SImode, operands[1]);
- + }
- + /* Check for RMW memory operands. They are not allowed for mov operations
- + only the atomic memc/s/t operations */
- + if ( !reload_in_progress
- + && avr32_rmw_memory_operand (operands[0], <MODE>mode) ){
- + operands[0] = copy_rtx (operands[0]);
- + XEXP(operands[0], 0) = force_reg (<MODE>mode, XEXP(operands[0], 0));
- + }
- +
- + if ( !reload_in_progress
- + && avr32_rmw_memory_operand (operands[1], <MODE>mode) ){
- + operands[1] = copy_rtx (operands[1]);
- + XEXP(operands[1], 0) = force_reg (<MODE>mode, XEXP(operands[1], 0));
- + }
- + if ( (flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS)
- + && !avr32_legitimate_pic_operand_p(operands[1]) )
- + operands[1] = legitimize_pic_address (operands[1], <MODE>mode,
- + (can_create_pseudo_p () ? 0: operands[0]));
- + else if ( flag_pic && avr32_address_operand(operands[1], GET_MODE(operands[1])) )
- + /* If we have an address operand then this function uses the pic register. */
- + crtl->uses_pic_offset_table = 1;
- + })
- +
- +
- +(define_insn "mov<mode>_internal"
- + [(set (match_operand:MOVM 0 "avr32_non_rmw_nonimmediate_operand" "=r, r, r,r,r,Q,r")
- + (match_operand:MOVM 1 "avr32_non_rmw_general_operand" "rKs08,Ks21,J,n,Q,r,W"))]
- + "(register_operand (operands[0], <MODE>mode)
- + || register_operand (operands[1], <MODE>mode))
- + && !avr32_rmw_memory_operand (operands[0], <MODE>mode)
- + && !avr32_rmw_memory_operand (operands[1], <MODE>mode)"
- + {
- + switch (which_alternative) {
- + case 0:
- + case 1: return "mov\t%0, %1";
- + case 2:
- + if ( TARGET_V2_INSNS )
- + return "movh\t%0, hi(%1)";
- + /* Fallthrough */
- + case 3: return "mov\t%0, lo(%1)\;orh\t%0,hi(%1)";
- + case 4:
- + if ( (REG_P(XEXP(operands[1], 0))
- + && REGNO(XEXP(operands[1], 0)) == SP_REGNUM)
- + || (GET_CODE(XEXP(operands[1], 0)) == PLUS
- + && REGNO(XEXP(XEXP(operands[1], 0), 0)) == SP_REGNUM
- + && GET_CODE(XEXP(XEXP(operands[1], 0), 1)) == CONST_INT
- + && INTVAL(XEXP(XEXP(operands[1], 0), 1)) % 4 == 0
- + && INTVAL(XEXP(XEXP(operands[1], 0), 1)) <= 0x1FC) )
- + return "lddsp\t%0, %1";
- + else if ( avr32_const_pool_ref_operand(operands[1], GET_MODE(operands[1])) )
- + return "lddpc\t%0, %1";
- + else
- + return "ld.w\t%0, %1";
- + case 5:
- + if ( (REG_P(XEXP(operands[0], 0))
- + && REGNO(XEXP(operands[0], 0)) == SP_REGNUM)
- + || (GET_CODE(XEXP(operands[0], 0)) == PLUS
- + && REGNO(XEXP(XEXP(operands[0], 0), 0)) == SP_REGNUM
- + && GET_CODE(XEXP(XEXP(operands[0], 0), 1)) == CONST_INT
- + && INTVAL(XEXP(XEXP(operands[0], 0), 1)) % 4 == 0
- + && INTVAL(XEXP(XEXP(operands[0], 0), 1)) <= 0x1FC) )
- + return "stdsp\t%0, %1";
- + else
- + return "st.w\t%0, %1";
- + case 6:
- + if ( TARGET_HAS_ASM_ADDR_PSEUDOS )
- + return "lda.w\t%0, %1";
- + else
- + return "ld.w\t%0, r6[%1@got]";
- + default:
- + abort();
- + }
- + }
- +
- + [(set_attr "length" "2,4,4,8,4,4,8")
- + (set_attr "type" "alu,alu,alu,alu2,load,store,load")
- + (set_attr "cc" "none,none,set_z_if_not_v2,set_z,none,none,clobber")])
- +
- +
- +(define_expand "reload_out_rmw_memory_operand"
- + [(set (match_operand:SI 2 "register_operand" "=r")
- + (match_operand:SI 0 "address_operand" ""))
- + (set (mem:SI (match_dup 2))
- + (match_operand:SI 1 "register_operand" ""))]
- + ""
- + {
- + operands[0] = XEXP(operands[0], 0);
- + }
- +)
- +
- +(define_expand "reload_in_rmw_memory_operand"
- + [(set (match_operand:SI 2 "register_operand" "=r")
- + (match_operand:SI 1 "address_operand" ""))
- + (set (match_operand:SI 0 "register_operand" "")
- + (mem:SI (match_dup 2)))]
- + ""
- + {
- + operands[1] = XEXP(operands[1], 0);
- + }
- +)
- +
- +
- +;; These instructions are for loading constants which cannot be loaded
- +;; directly from the constant pool because the offset is too large
- +;; high and lo_sum are used even tough for our case it should be
- +;; low and high sum :-)
- +(define_insn "mov_symbol_lo"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (high:SI (match_operand:SI 1 "immediate_operand" "i" )))]
- + ""
- + "mov\t%0, lo(%1)"
- + [(set_attr "type" "alu")
- + (set_attr "length" "4")]
- +)
- +
- +(define_insn "add_symbol_hi"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (lo_sum:SI (match_dup 0)
- + (match_operand:SI 1 "immediate_operand" "i" )))]
- + ""
- + "orh\t%0, hi(%1)"
- + [(set_attr "type" "alu")
- + (set_attr "length" "4")]
- +)
- +
- +
- +
- +;; When generating pic, we need to load the symbol offset into a register.
- +;; So that the optimizer does not confuse this with a normal symbol load
- +;; we use an unspec. The offset will be loaded from a constant pool entry,
- +;; since that is the only type of relocation we can use.
- +(define_insn "pic_load_addr"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (unspec:SI [(match_operand:SI 1 "" "")] UNSPEC_PIC_SYM))]
- + "flag_pic && CONSTANT_POOL_ADDRESS_P(XEXP(operands[1], 0))"
- + "lddpc\t%0, %1"
- + [(set_attr "type" "load")
- + (set_attr "length" "4")]
- +)
- +
- +(define_insn "pic_compute_got_from_pc"
- + [(set (match_operand:SI 0 "register_operand" "+r")
- + (unspec:SI [(minus:SI (pc)
- + (match_dup 0))] UNSPEC_PIC_BASE))
- + (use (label_ref (match_operand 1 "" "")))]
- + "flag_pic"
- + {
- + (*targetm.asm_out.internal_label) (asm_out_file, "L",
- + CODE_LABEL_NUMBER (operands[1]));
- + return \"rsub\t%0, pc\";
- + }
- + [(set_attr "cc" "clobber")
- + (set_attr "length" "2")]
- +)
- +
- +;;== long long int - 64 bits ==================================================
- +
- +(define_expand "movdi"
- + [(set (match_operand:DI 0 "nonimmediate_operand" "")
- + (match_operand:DI 1 "general_operand" ""))]
- + ""
- + {
- +
- + /* One of the ops has to be in a register. */
- + if (GET_CODE (operands[0]) != REG)
- + operands[1] = force_reg (DImode, operands[1]);
- +
- + })
- +
- +
- +(define_insn_and_split "*movdi_internal"
- + [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r, r, r,r,r,m")
- + (match_operand:DI 1 "general_operand" "r, Ks08,Ks21,G,n,m,r"))]
- + "register_operand (operands[0], DImode)
- + || register_operand (operands[1], DImode)"
- + {
- + switch (which_alternative ){
- + case 0:
- + case 1:
- + case 2:
- + case 3:
- + case 4:
- + return "#";
- + case 5:
- + if ( avr32_const_pool_ref_operand(operands[1], GET_MODE(operands[1])))
- + return "ld.d\t%0, pc[%1 - .]";
- + else
- + return "ld.d\t%0, %1";
- + case 6:
- + return "st.d\t%0, %1";
- + default:
- + abort();
- + }
- + }
- +;; Lets split all reg->reg or imm->reg transfers into two SImode transfers
- + "reload_completed &&
- + (REG_P (operands[0]) &&
- + (REG_P (operands[1])
- + || GET_CODE (operands[1]) == CONST_INT
- + || GET_CODE (operands[1]) == CONST_DOUBLE))"
- + [(set (match_dup 0) (match_dup 1))
- + (set (match_dup 2) (match_dup 3))]
- + {
- + operands[2] = gen_highpart (SImode, operands[0]);
- + operands[0] = gen_lowpart (SImode, operands[0]);
- + if ( REG_P(operands[1]) ){
- + operands[3] = gen_highpart(SImode, operands[1]);
- + operands[1] = gen_lowpart(SImode, operands[1]);
- + } else if ( GET_CODE(operands[1]) == CONST_DOUBLE
- + || GET_CODE(operands[1]) == CONST_INT ){
- + rtx split_const[2];
- + avr32_split_const_expr (DImode, SImode, operands[1], split_const);
- + operands[3] = split_const[1];
- + operands[1] = split_const[0];
- + } else {
- + internal_error("Illegal operand[1] for movdi split!");
- + }
- + }
- +
- + [(set_attr "length" "*,*,*,*,*,4,4")
- + (set_attr "type" "*,*,*,*,*,load2,store2")
- + (set_attr "cc" "*,*,*,*,*,none,none")])
- +
- +
- +;;== 128 bits ==================================================
- +(define_expand "movti"
- + [(set (match_operand:TI 0 "nonimmediate_operand" "")
- + (match_operand:TI 1 "nonimmediate_operand" ""))]
- + "TARGET_ARCH_AP"
- + {
- +
- + /* One of the ops has to be in a register. */
- + if (GET_CODE (operands[0]) != REG)
- + operands[1] = force_reg (TImode, operands[1]);
- +
- + /* We must fix any pre_dec for loads and post_inc stores */
- + if ( GET_CODE (operands[0]) == MEM
- + && GET_CODE (XEXP(operands[0],0)) == POST_INC ){
- + emit_move_insn(gen_rtx_MEM(TImode, XEXP(XEXP(operands[0],0),0)), operands[1]);
- + emit_insn(gen_addsi3(XEXP(XEXP(operands[0],0),0), XEXP(XEXP(operands[0],0),0), GEN_INT(GET_MODE_SIZE(TImode))));
- + DONE;
- + }
- +
- + if ( GET_CODE (operands[1]) == MEM
- + && GET_CODE (XEXP(operands[1],0)) == PRE_DEC ){
- + emit_insn(gen_addsi3(XEXP(XEXP(operands[1],0),0), XEXP(XEXP(operands[1],0),0), GEN_INT(-GET_MODE_SIZE(TImode))));
- + emit_move_insn(operands[0], gen_rtx_MEM(TImode, XEXP(XEXP(operands[1],0),0)));
- + DONE;
- + }
- + })
- +
- +
- +(define_insn_and_split "*movti_internal"
- + [(set (match_operand:TI 0 "avr32_movti_dst_operand" "=r,&r, r, <RKu00,r,r")
- + (match_operand:TI 1 "avr32_movti_src_operand" " r,RKu00>,RKu00,r, n,T"))]
- + "(register_operand (operands[0], TImode)
- + || register_operand (operands[1], TImode))"
- + {
- + switch (which_alternative ){
- + case 0:
- + case 2:
- + case 4:
- + return "#";
- + case 1:
- + return "ldm\t%p1, %0";
- + case 3:
- + return "stm\t%p0, %1";
- + case 5:
- + return "ld.d\t%U0, pc[%1 - .]\;ld.d\t%B0, pc[%1 - . + 8]";
- + }
- + }
- +
- + "reload_completed &&
- + (REG_P (operands[0]) &&
- + (REG_P (operands[1])
- + /* If this is a load from the constant pool we split it into
- + two double loads. */
- + || (GET_CODE (operands[1]) == MEM
- + && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
- + && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
- + /* If this is a load where the pointer register is a part
- + of the register list, we must split it into two double
- + loads in order for it to be exception safe. */
- + || (GET_CODE (operands[1]) == MEM
- + && register_operand (XEXP (operands[1], 0), SImode)
- + && reg_overlap_mentioned_p (operands[0], XEXP (operands[1], 0)))
- + || GET_CODE (operands[1]) == CONST_INT
- + || GET_CODE (operands[1]) == CONST_DOUBLE))"
- + [(set (match_dup 0) (match_dup 1))
- + (set (match_dup 2) (match_dup 3))]
- + {
- + operands[2] = simplify_gen_subreg ( DImode, operands[0],
- + TImode, 0 );
- + operands[0] = simplify_gen_subreg ( DImode, operands[0],
- + TImode, 8 );
- + if ( REG_P(operands[1]) ){
- + operands[3] = simplify_gen_subreg ( DImode, operands[1],
- + TImode, 0 );
- + operands[1] = simplify_gen_subreg ( DImode, operands[1],
- + TImode, 8 );
- + } else if ( GET_CODE(operands[1]) == CONST_DOUBLE
- + || GET_CODE(operands[1]) == CONST_INT ){
- + rtx split_const[2];
- + avr32_split_const_expr (TImode, DImode, operands[1], split_const);
- + operands[3] = split_const[1];
- + operands[1] = split_const[0];
- + } else if (avr32_const_pool_ref_operand (operands[1], GET_MODE(operands[1]))){
- + rtx split_const[2];
- + rtx cop = avoid_constant_pool_reference (operands[1]);
- + if (operands[1] == cop)
- + cop = get_pool_constant (XEXP (operands[1], 0));
- + avr32_split_const_expr (TImode, DImode, cop, split_const);
- + operands[3] = force_const_mem (DImode, split_const[1]);
- + operands[1] = force_const_mem (DImode, split_const[0]);
- + } else {
- + rtx ptr_reg = XEXP (operands[1], 0);
- + operands[1] = gen_rtx_MEM (DImode,
- + gen_rtx_PLUS ( SImode,
- + ptr_reg,
- + GEN_INT (8) ));
- + operands[3] = gen_rtx_MEM (DImode,
- + ptr_reg);
- +
- + /* Check if the first load will clobber the pointer.
- + If so, we must switch the order of the operations. */
- + if ( reg_overlap_mentioned_p (operands[0], ptr_reg) )
- + {
- + /* We need to switch the order of the operations
- + so that the pointer register does not get clobbered
- + after the first double word load. */
- + rtx tmp;
- + tmp = operands[0];
- + operands[0] = operands[2];
- + operands[2] = tmp;
- + tmp = operands[1];
- + operands[1] = operands[3];
- + operands[3] = tmp;
- + }
- +
- +
- + }
- + }
- + [(set_attr "length" "*,*,4,4,*,8")
- + (set_attr "type" "*,*,load4,store4,*,load4")])
- +
- +
- +;;== float - 32 bits ==========================================================
- +(define_expand "movsf"
- + [(set (match_operand:SF 0 "nonimmediate_operand" "")
- + (match_operand:SF 1 "general_operand" ""))]
- + ""
- + {
- +
- +
- + /* One of the ops has to be in a register. */
- + if (GET_CODE (operands[0]) != REG)
- + operands[1] = force_reg (SFmode, operands[1]);
- +
- + })
- +
- +(define_insn "*movsf_internal"
- + [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,r,r,m")
- + (match_operand:SF 1 "general_operand" "r, G,F,m,r"))]
- + "(register_operand (operands[0], SFmode)
- + || register_operand (operands[1], SFmode))"
- + {
- + switch (which_alternative) {
- + case 0:
- + case 1: return "mov\t%0, %1";
- + case 2:
- + {
- + HOST_WIDE_INT target_float[2];
- + real_to_target (target_float, CONST_DOUBLE_REAL_VALUE (operands[1]), SFmode);
- + if ( TARGET_V2_INSNS
- + && avr32_hi16_immediate_operand (GEN_INT (target_float[0]), VOIDmode) )
- + return "movh\t%0, hi(%1)";
- + else
- + return "mov\t%0, lo(%1)\;orh\t%0, hi(%1)";
- + }
- + case 3:
- + if ( (REG_P(XEXP(operands[1], 0))
- + && REGNO(XEXP(operands[1], 0)) == SP_REGNUM)
- + || (GET_CODE(XEXP(operands[1], 0)) == PLUS
- + && REGNO(XEXP(XEXP(operands[1], 0), 0)) == SP_REGNUM
- + && GET_CODE(XEXP(XEXP(operands[1], 0), 1)) == CONST_INT
- + && INTVAL(XEXP(XEXP(operands[1], 0), 1)) % 4 == 0
- + && INTVAL(XEXP(XEXP(operands[1], 0), 1)) <= 0x1FC) )
- + return "lddsp\t%0, %1";
- + else if ( avr32_const_pool_ref_operand(operands[1], GET_MODE(operands[1])) )
- + return "lddpc\t%0, %1";
- + else
- + return "ld.w\t%0, %1";
- + case 4:
- + if ( (REG_P(XEXP(operands[0], 0))
- + && REGNO(XEXP(operands[0], 0)) == SP_REGNUM)
- + || (GET_CODE(XEXP(operands[0], 0)) == PLUS
- + && REGNO(XEXP(XEXP(operands[0], 0), 0)) == SP_REGNUM
- + && GET_CODE(XEXP(XEXP(operands[0], 0), 1)) == CONST_INT
- + && INTVAL(XEXP(XEXP(operands[0], 0), 1)) % 4 == 0
- + && INTVAL(XEXP(XEXP(operands[0], 0), 1)) <= 0x1FC) )
- + return "stdsp\t%0, %1";
- + else
- + return "st.w\t%0, %1";
- + default:
- + abort();
- + }
- + }
- +
- + [(set_attr "length" "2,4,8,4,4")
- + (set_attr "type" "alu,alu,alu2,load,store")
- + (set_attr "cc" "none,none,clobber,none,none")])
- +
- +
- +
- +;;== double - 64 bits =========================================================
- +(define_expand "movdf"
- + [(set (match_operand:DF 0 "nonimmediate_operand" "")
- + (match_operand:DF 1 "general_operand" ""))]
- + ""
- + {
- + /* One of the ops has to be in a register. */
- + if (GET_CODE (operands[0]) != REG){
- + operands[1] = force_reg (DFmode, operands[1]);
- + }
- + })
- +
- +
- +(define_insn_and_split "*movdf_internal"
- + [(set (match_operand:DF 0 "nonimmediate_operand" "=r,r,r,r,m")
- + (match_operand:DF 1 "general_operand" " r,G,F,m,r"))]
- + "(register_operand (operands[0], DFmode)
- + || register_operand (operands[1], DFmode))"
- + {
- + switch (which_alternative ){
- + case 0:
- + case 1:
- + case 2:
- + return "#";
- + case 3:
- + if ( avr32_const_pool_ref_operand(operands[1], GET_MODE(operands[1])))
- + return "ld.d\t%0, pc[%1 - .]";
- + else
- + return "ld.d\t%0, %1";
- + case 4:
- + return "st.d\t%0, %1";
- + default:
- + abort();
- + }
- + }
- + "reload_completed
- + && (REG_P (operands[0])
- + && (REG_P (operands[1])
- + || GET_CODE (operands[1]) == CONST_DOUBLE))"
- + [(set (match_dup 0) (match_dup 1))
- + (set (match_dup 2) (match_dup 3))]
- + "
- + {
- + operands[2] = gen_highpart (SImode, operands[0]);
- + operands[0] = gen_lowpart (SImode, operands[0]);
- + operands[3] = gen_highpart(SImode, operands[1]);
- + operands[1] = gen_lowpart(SImode, operands[1]);
- + }
- + "
- +
- + [(set_attr "length" "*,*,*,4,4")
- + (set_attr "type" "*,*,*,load2,store2")
- + (set_attr "cc" "*,*,*,none,none")])
- +
- +
- +;;=============================================================================
- +;; Conditional Moves
- +;;=============================================================================
- +(define_insn "ld<mode>_predicable"
- + [(set (match_operand:MOVCC 0 "register_operand" "=r")
- + (match_operand:MOVCC 1 "avr32_non_rmw_memory_operand" "<MOVCC:pred_mem_constraint>"))]
- + "TARGET_V2_INSNS"
- + "ld<MOVCC:load_postfix>%?\t%0, %1"
- + [(set_attr "length" "4")
- + (set_attr "cc" "cmp_cond_insn")
- + (set_attr "type" "load")
- + (set_attr "predicable" "yes")]
- +)
- +
- +
- +(define_insn "st<mode>_predicable"
- + [(set (match_operand:MOVCC 0 "avr32_non_rmw_memory_operand" "=<MOVCC:pred_mem_constraint>")
- + (match_operand:MOVCC 1 "register_operand" "r"))]
- + "TARGET_V2_INSNS"
- + "st<MOVCC:store_postfix>%?\t%0, %1"
- + [(set_attr "length" "4")
- + (set_attr "cc" "cmp_cond_insn")
- + (set_attr "type" "store")
- + (set_attr "predicable" "yes")]
- +)
- +
- +(define_insn "mov<mode>_predicable"
- + [(set (match_operand:MOVCC 0 "register_operand" "=r")
- + (match_operand:MOVCC 1 "avr32_cond_register_immediate_operand" "rKs08"))]
- + ""
- + "mov%?\t%0, %1"
- + [(set_attr "length" "4")
- + (set_attr "cc" "cmp_cond_insn")
- + (set_attr "type" "alu")
- + (set_attr "predicable" "yes")]
- +)
- +
- +
- +;;=============================================================================
- +;; Move chunks of memory
- +;;=============================================================================
- +
- +(define_expand "movmemsi"
- + [(match_operand:BLK 0 "general_operand" "")
- + (match_operand:BLK 1 "general_operand" "")
- + (match_operand:SI 2 "const_int_operand" "")
- + (match_operand:SI 3 "const_int_operand" "")]
- + ""
- + "
- + if (avr32_gen_movmemsi (operands))
- + DONE;
- + FAIL;
- + "
- + )
- +
- +
- +
- +
- +;;=============================================================================
- +;; Bit field instructions
- +;;-----------------------------------------------------------------------------
- +;; Instructions to insert or extract bit-fields
- +;;=============================================================================
- +
- +(define_insn "insv"
- + [ (set (zero_extract:SI (match_operand:SI 0 "register_operand" "+r")
- + (match_operand:SI 1 "immediate_operand" "Ku05")
- + (match_operand:SI 2 "immediate_operand" "Ku05"))
- + (match_operand 3 "register_operand" "r"))]
- + ""
- + "bfins\t%0, %3, %2, %1"
- + [(set_attr "type" "alu")
- + (set_attr "length" "4")
- + (set_attr "cc" "set_ncz")])
- +
- +
- +
- +(define_expand "extv"
- + [ (set (match_operand:SI 0 "register_operand" "")
- + (sign_extract:SI (match_operand:SI 1 "register_operand" "")
- + (match_operand:SI 2 "immediate_operand" "")
- + (match_operand:SI 3 "immediate_operand" "")))]
- + ""
- + {
- + if ( INTVAL(operands[2]) >= 32 )
- + FAIL;
- + }
- +)
- +
- +(define_expand "extzv"
- + [ (set (match_operand:SI 0 "register_operand" "")
- + (zero_extract:SI (match_operand:SI 1 "register_operand" "")
- + (match_operand:SI 2 "immediate_operand" "")
- + (match_operand:SI 3 "immediate_operand" "")))]
- + ""
- + {
- + if ( INTVAL(operands[2]) >= 32 )
- + FAIL;
- + }
- +)
- +
- +(define_insn "extv_internal"
- + [ (set (match_operand:SI 0 "register_operand" "=r")
- + (sign_extract:SI (match_operand:SI 1 "register_operand" "r")
- + (match_operand:SI 2 "immediate_operand" "Ku05")
- + (match_operand:SI 3 "immediate_operand" "Ku05")))]
- + "INTVAL(operands[2]) < 32"
- + "bfexts\t%0, %1, %3, %2"
- + [(set_attr "type" "alu")
- + (set_attr "length" "4")
- + (set_attr "cc" "set_ncz")])
- +
- +
- +(define_insn "extzv_internal"
- + [ (set (match_operand:SI 0 "register_operand" "=r")
- + (zero_extract:SI (match_operand:SI 1 "register_operand" "r")
- + (match_operand:SI 2 "immediate_operand" "Ku05")
- + (match_operand:SI 3 "immediate_operand" "Ku05")))]
- + "INTVAL(operands[2]) < 32"
- + "bfextu\t%0, %1, %3, %2"
- + [(set_attr "type" "alu")
- + (set_attr "length" "4")
- + (set_attr "cc" "set_ncz")])
- +
- +
- +
- +;;=============================================================================
- +;; Some peepholes for avoiding unnecessary cast instructions
- +;; followed by bfins.
- +;;-----------------------------------------------------------------------------
- +
- +(define_peephole2
- + [(set (match_operand:SI 0 "register_operand" "")
- + (zero_extend:SI (match_operand:QI 1 "register_operand" "")))
- + (set (zero_extract:SI (match_operand 2 "register_operand" "")
- + (match_operand:SI 3 "immediate_operand" "")
- + (match_operand:SI 4 "immediate_operand" ""))
- + (match_dup 0))]
- + "((peep2_reg_dead_p(2, operands[0]) &&
- + (INTVAL(operands[3]) <= 8)))"
- + [(set (zero_extract:SI (match_dup 2)
- + (match_dup 3)
- + (match_dup 4))
- + (match_dup 1))]
- + )
- +
- +(define_peephole2
- + [(set (match_operand:SI 0 "register_operand" "")
- + (zero_extend:SI (match_operand:HI 1 "register_operand" "")))
- + (set (zero_extract:SI (match_operand 2 "register_operand" "")
- + (match_operand:SI 3 "immediate_operand" "")
- + (match_operand:SI 4 "immediate_operand" ""))
- + (match_dup 0))]
- + "((peep2_reg_dead_p(2, operands[0]) &&
- + (INTVAL(operands[3]) <= 16)))"
- + [(set (zero_extract:SI (match_dup 2)
- + (match_dup 3)
- + (match_dup 4))
- + (match_dup 1))]
- + )
- +
- +;;=============================================================================
- +;; push bytes
- +;;-----------------------------------------------------------------------------
- +;; Implements the push instruction
- +;;=============================================================================
- +(define_insn "pushm"
- + [(set (mem:BLK (pre_dec:BLK (reg:SI SP_REGNUM)))
- + (unspec:BLK [(match_operand 0 "const_int_operand" "")]
- + UNSPEC_PUSHM))]
- + ""
- + {
- + if (INTVAL(operands[0])) {
- + return "pushm\t%r0";
- + } else {
- + return "";
- + }
- + }
- + [(set_attr "type" "store")
- + (set_attr "length" "2")
- + (set_attr "cc" "none")])
- +
- +(define_insn "stm"
- + [(unspec [(match_operand 0 "register_operand" "r")
- + (match_operand 1 "const_int_operand" "")
- + (match_operand 2 "const_int_operand" "")]
- + UNSPEC_STM)]
- + ""
- + {
- + if (INTVAL(operands[1])) {
- + if (INTVAL(operands[2]) != 0)
- + return "stm\t--%0, %s1";
- + else
- + return "stm\t%0, %s1";
- + } else {
- + return "";
- + }
- + }
- + [(set_attr "type" "store")
- + (set_attr "length" "4")
- + (set_attr "cc" "none")])
- +
- +
- +
- +(define_insn "popm"
- + [(unspec [(match_operand 0 "const_int_operand" "")]
- + UNSPEC_POPM)]
- + ""
- + {
- + if (INTVAL(operands[0])) {
- + return "popm %r0";
- + } else {
- + return "";
- + }
- + }
- + [(set_attr "type" "load")
- + (set_attr "length" "2")])
- +
- +
- +
- +;;=============================================================================
- +;; add
- +;;-----------------------------------------------------------------------------
- +;; Adds reg1 with reg2 and puts the result in reg0.
- +;;=============================================================================
- +(define_insn "add<mode>3"
- + [(set (match_operand:INTM 0 "register_operand" "=r,r,r,r,r")
- + (plus:INTM (match_operand:INTM 1 "register_operand" "%0,r,0,r,0")
- + (match_operand:INTM 2 "avr32_add_operand" "r,r,Is08,Is16,Is21")))]
- + ""
- + "@
- + add %0, %2
- + add %0, %1, %2
- + sub %0, %n2
- + sub %0, %1, %n2
- + sub %0, %n2"
- +
- + [(set_attr "length" "2,4,2,4,4")
- + (set_attr "cc" "<INTM:alu_cc_attr>")])
- +
- +(define_insn "add<mode>3_lsl"
- + [(set (match_operand:INTM 0 "register_operand" "=r")
- + (plus:INTM (ashift:INTM (match_operand:INTM 1 "register_operand" "r")
- + (match_operand:INTM 3 "avr32_add_shift_immediate_operand" "Ku02"))
- + (match_operand:INTM 2 "register_operand" "r")))]
- + ""
- + "add %0, %2, %1 << %3"
- + [(set_attr "length" "4")
- + (set_attr "cc" "<INTM:alu_cc_attr>")])
- +
- +(define_insn "add<mode>3_lsl2"
- + [(set (match_operand:INTM 0 "register_operand" "=r")
- + (plus:INTM (match_operand:INTM 1 "register_operand" "r")
- + (ashift:INTM (match_operand:INTM 2 "register_operand" "r")
- + (match_operand:INTM 3 "avr32_add_shift_immediate_operand" "Ku02"))))]
- + ""
- + "add %0, %1, %2 << %3"
- + [(set_attr "length" "4")
- + (set_attr "cc" "<INTM:alu_cc_attr>")])
- +
- +
- +(define_insn "add<mode>3_mul"
- + [(set (match_operand:INTM 0 "register_operand" "=r")
- + (plus:INTM (mult:INTM (match_operand:INTM 1 "register_operand" "r")
- + (match_operand:INTM 3 "immediate_operand" "Ku04" ))
- + (match_operand:INTM 2 "register_operand" "r")))]
- + "(INTVAL(operands[3]) == 0) || (INTVAL(operands[3]) == 2) ||
- + (INTVAL(operands[3]) == 4) || (INTVAL(operands[3]) == 8)"
- + "add %0, %2, %1 << %p3"
- + [(set_attr "length" "4")
- + (set_attr "cc" "<INTM:alu_cc_attr>")])
- +
- +(define_insn "add<mode>3_mul2"
- + [(set (match_operand:INTM 0 "register_operand" "=r")
- + (plus:INTM (match_operand:INTM 1 "register_operand" "r")
- + (mult:INTM (match_operand:INTM 2 "register_operand" "r")
- + (match_operand:INTM 3 "immediate_operand" "Ku04" ))))]
- + "(INTVAL(operands[3]) == 0) || (INTVAL(operands[3]) == 2) ||
- + (INTVAL(operands[3]) == 4) || (INTVAL(operands[3]) == 8)"
- + "add %0, %1, %2 << %p3"
- + [(set_attr "length" "4")
- + (set_attr "cc" "<INTM:alu_cc_attr>")])
- +
- +
- +(define_peephole2
- + [(set (match_operand:SI 0 "register_operand" "")
- + (ashift:SI (match_operand:SI 1 "register_operand" "")
- + (match_operand:SI 2 "immediate_operand" "")))
- + (set (match_operand:SI 3 "register_operand" "")
- + (plus:SI (match_dup 0)
- + (match_operand:SI 4 "register_operand" "")))]
- + "(peep2_reg_dead_p(2, operands[0]) &&
- + (INTVAL(operands[2]) < 4 && INTVAL(operands[2]) > 0))"
- + [(set (match_dup 3)
- + (plus:SI (ashift:SI (match_dup 1)
- + (match_dup 2))
- + (match_dup 4)))]
- + )
- +
- +(define_peephole2
- + [(set (match_operand:SI 0 "register_operand" "")
- + (ashift:SI (match_operand:SI 1 "register_operand" "")
- + (match_operand:SI 2 "immediate_operand" "")))
- + (set (match_operand:SI 3 "register_operand" "")
- + (plus:SI (match_operand:SI 4 "register_operand" "")
- + (match_dup 0)))]
- + "(peep2_reg_dead_p(2, operands[0]) &&
- + (INTVAL(operands[2]) < 4 && INTVAL(operands[2]) > 0))"
- + [(set (match_dup 3)
- + (plus:SI (ashift:SI (match_dup 1)
- + (match_dup 2))
- + (match_dup 4)))]
- + )
- +
- +(define_insn "adddi3"
- + [(set (match_operand:DI 0 "register_operand" "=r,r")
- + (plus:DI (match_operand:DI 1 "register_operand" "%0,r")
- + (match_operand:DI 2 "register_operand" "r,r")))]
- + ""
- + "@
- + add %0, %2\;adc %m0, %m0, %m2
- + add %0, %1, %2\;adc %m0, %m1, %m2"
- + [(set_attr "length" "6,8")
- + (set_attr "type" "alu2")
- + (set_attr "cc" "set_vncz")])
- +
- +
- +(define_insn "add<mode>_imm_predicable"
- + [(set (match_operand:INTM 0 "register_operand" "+r")
- + (plus:INTM (match_dup 0)
- + (match_operand:INTM 1 "avr32_cond_immediate_operand" "%Is08")))]
- + ""
- + "sub%?\t%0, -%1"
- + [(set_attr "length" "4")
- + (set_attr "cc" "cmp_cond_insn")
- + (set_attr "predicable" "yes")]
- +)
- +
- +;;=============================================================================
- +;; subtract
- +;;-----------------------------------------------------------------------------
- +;; Subtract reg2 or immediate value from reg0 and puts the result in reg0.
- +;;=============================================================================
- +
- +(define_insn "sub<mode>3"
- + [(set (match_operand:INTM 0 "general_operand" "=r,r,r,r,r,r,r")
- + (minus:INTM (match_operand:INTM 1 "register_const_int_operand" "0,r,0,r,0,r,Ks08")
- + (match_operand:INTM 2 "register_const_int_operand" "r,r,Ks08,Ks16,Ks21,0,r")))]
- + ""
- + "@
- + sub %0, %2
- + sub %0, %1, %2
- + sub %0, %2
- + sub %0, %1, %2
- + sub %0, %2
- + rsub %0, %1
- + rsub %0, %2, %1"
- + [(set_attr "length" "2,4,2,4,4,2,4")
- + (set_attr "cc" "<INTM:alu_cc_attr>")])
- +
- +(define_insn "*sub<mode>3_mul"
- + [(set (match_operand:INTM 0 "register_operand" "=r")
- + (minus:INTM (match_operand:INTM 1 "register_operand" "r")
- + (mult:INTM (match_operand:INTM 2 "register_operand" "r")
- + (match_operand:SI 3 "immediate_operand" "Ku04" ))))]
- + "(INTVAL(operands[3]) == 0) || (INTVAL(operands[3]) == 2) ||
- + (INTVAL(operands[3]) == 4) || (INTVAL(operands[3]) == 8)"
- + "sub %0, %1, %2 << %p3"
- + [(set_attr "length" "4")
- + (set_attr "cc" "<INTM:alu_cc_attr>")])
- +
- +(define_insn "*sub<mode>3_lsl"
- + [(set (match_operand:INTM 0 "register_operand" "=r")
- + (minus:INTM (match_operand:INTM 1 "register_operand" "r")
- + (ashift:INTM (match_operand:INTM 2 "register_operand" "r")
- + (match_operand:SI 3 "avr32_add_shift_immediate_operand" "Ku02"))))]
- + ""
- + "sub %0, %1, %2 << %3"
- + [(set_attr "length" "4")
- + (set_attr "cc" "<INTM:alu_cc_attr>")])
- +
- +
- +(define_insn "subdi3"
- + [(set (match_operand:DI 0 "register_operand" "=r,r")
- + (minus:DI (match_operand:DI 1 "register_operand" "%0,r")
- + (match_operand:DI 2 "register_operand" "r,r")))]
- + ""
- + "@
- + sub %0, %2\;sbc %m0, %m0, %m2
- + sub %0, %1, %2\;sbc %m0, %m1, %m2"
- + [(set_attr "length" "6,8")
- + (set_attr "type" "alu2")
- + (set_attr "cc" "set_vncz")])
- +
- +
- +(define_insn "sub<mode>_imm_predicable"
- + [(set (match_operand:INTM 0 "register_operand" "+r")
- + (minus:INTM (match_dup 0)
- + (match_operand:INTM 1 "avr32_cond_immediate_operand" "Ks08")))]
- + ""
- + "sub%?\t%0, %1"
- + [(set_attr "length" "4")
- + (set_attr "cc" "cmp_cond_insn")
- + (set_attr "predicable" "yes")])
- +
- +(define_insn "rsub<mode>_imm_predicable"
- + [(set (match_operand:INTM 0 "register_operand" "+r")
- + (minus:INTM (match_operand:INTM 1 "avr32_cond_immediate_operand" "Ks08")
- + (match_dup 0)))]
- + ""
- + "rsub%?\t%0, %1"
- + [(set_attr "length" "4")
- + (set_attr "cc" "cmp_cond_insn")
- + (set_attr "predicable" "yes")])
- +
- +;;=============================================================================
- +;; multiply
- +;;-----------------------------------------------------------------------------
- +;; Multiply op1 and op2 and put the value in op0.
- +;;=============================================================================
- +
- +
- +(define_insn "mulqi3"
- + [(set (match_operand:QI 0 "register_operand" "=r,r,r")
- + (mult:QI (match_operand:QI 1 "register_operand" "%0,r,r")
- + (match_operand:QI 2 "avr32_mul_operand" "r,r,Ks08")))]
- + "!TARGET_NO_MUL_INSNS"
- + {
- + switch (which_alternative){
- + case 0:
- + return "mul %0, %2";
- + case 1:
- + return "mul %0, %1, %2";
- + case 2:
- + return "mul %0, %1, %2";
- + default:
- + gcc_unreachable();
- + }
- + }
- + [(set_attr "type" "mulww_w,mulww_w,mulwh")
- + (set_attr "length" "2,4,4")
- + (set_attr "cc" "none")])
- +
- +(define_insn "mulsi3"
- + [(set (match_operand:SI 0 "register_operand" "=r,r,r")
- + (mult:SI (match_operand:SI 1 "register_operand" "%0,r,r")
- + (match_operand:SI 2 "avr32_mul_operand" "r,r,Ks08")))]
- + "!TARGET_NO_MUL_INSNS"
- + {
- + switch (which_alternative){
- + case 0:
- + return "mul %0, %2";
- + case 1:
- + return "mul %0, %1, %2";
- + case 2:
- + return "mul %0, %1, %2";
- + default:
- + gcc_unreachable();
- + }
- + }
- + [(set_attr "type" "mulww_w,mulww_w,mulwh")
- + (set_attr "length" "2,4,4")
- + (set_attr "cc" "none")])
- +
- +
- +(define_insn "mulhisi3"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (mult:SI
- + (sign_extend:SI (match_operand:HI 1 "register_operand" "%r"))
- + (sign_extend:SI (match_operand:HI 2 "register_operand" "r"))))]
- + "!TARGET_NO_MUL_INSNS && TARGET_DSP"
- + "mulhh.w %0, %1:b, %2:b"
- + [(set_attr "type" "mulhh")
- + (set_attr "length" "4")
- + (set_attr "cc" "none")])
- +
- +(define_peephole2
- + [(match_scratch:DI 6 "r")
- + (set (match_operand:SI 0 "register_operand" "")
- + (mult:SI
- + (sign_extend:SI (match_operand:HI 1 "register_operand" ""))
- + (sign_extend:SI (match_operand:HI 2 "register_operand" ""))))
- + (set (match_operand:SI 3 "register_operand" "")
- + (ashiftrt:SI (match_dup 0)
- + (const_int 16)))]
- + "!TARGET_NO_MUL_INSNS && TARGET_DSP
- + && (peep2_reg_dead_p(1, operands[0]) || (REGNO(operands[0]) == REGNO(operands[3])))"
- + [(set (match_dup 4) (sign_extend:SI (match_dup 1)))
- + (set (match_dup 6)
- + (ashift:DI (mult:DI (sign_extend:DI (match_dup 4))
- + (sign_extend:DI (match_dup 2)))
- + (const_int 16)))
- + (set (match_dup 3) (match_dup 5))]
- +
- + "{
- + operands[4] = gen_rtx_REG(SImode, REGNO(operands[1]));
- + operands[5] = gen_highpart (SImode, operands[4]);
- + }"
- + )
- +
- +(define_insn "mulnhisi3"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (mult:SI
- + (sign_extend:SI (neg:HI (match_operand:HI 1 "register_operand" "r")))
- + (sign_extend:SI (match_operand:HI 2 "register_operand" "r"))))]
- + "!TARGET_NO_MUL_INSNS && TARGET_DSP"
- + "mulnhh.w %0, %1:b, %2:b"
- + [(set_attr "type" "mulhh")
- + (set_attr "length" "4")
- + (set_attr "cc" "none")])
- +
- +(define_insn "machisi3"
- + [(set (match_operand:SI 0 "register_operand" "+r")
- + (plus:SI (mult:SI
- + (sign_extend:SI (match_operand:HI 1 "register_operand" "%r"))
- + (sign_extend:SI (match_operand:HI 2 "register_operand" "r")))
- + (match_dup 0)))]
- + "!TARGET_NO_MUL_INSNS && TARGET_DSP"
- + "machh.w %0, %1:b, %2:b"
- + [(set_attr "type" "machh_w")
- + (set_attr "length" "4")
- + (set_attr "cc" "none")])
- +
- +
- +
- +(define_insn "mulsidi3"
- + [(set (match_operand:DI 0 "register_operand" "=r")
- + (mult:DI
- + (sign_extend:DI (match_operand:SI 1 "register_operand" "%r"))
- + (sign_extend:DI (match_operand:SI 2 "register_operand" "r"))))]
- + "!TARGET_NO_MUL_INSNS"
- + "muls.d %0, %1, %2"
- + [(set_attr "type" "mulww_d")
- + (set_attr "length" "4")
- + (set_attr "cc" "none")])
- +
- +(define_insn "umulsidi3"
- + [(set (match_operand:DI 0 "register_operand" "=r")
- + (mult:DI
- + (zero_extend:DI (match_operand:SI 1 "register_operand" "%r"))
- + (zero_extend:DI (match_operand:SI 2 "register_operand" "r"))))]
- + "!TARGET_NO_MUL_INSNS"
- + "mulu.d %0, %1, %2"
- + [(set_attr "type" "mulww_d")
- + (set_attr "length" "4")
- + (set_attr "cc" "none")])
- +
- +(define_insn "*mulaccsi3"
- + [(set (match_operand:SI 0 "register_operand" "+r")
- + (plus:SI (mult:SI (match_operand:SI 1 "register_operand" "%r")
- + (match_operand:SI 2 "register_operand" "r"))
- + (match_dup 0)))]
- + "!TARGET_NO_MUL_INSNS"
- + "mac %0, %1, %2"
- + [(set_attr "type" "macww_w")
- + (set_attr "length" "4")
- + (set_attr "cc" "none")])
- +
- +(define_insn "*mulaccsidi3"
- + [(set (match_operand:DI 0 "register_operand" "+r")
- + (plus:DI (mult:DI
- + (sign_extend:DI (match_operand:SI 1 "register_operand" "%r"))
- + (sign_extend:DI (match_operand:SI 2 "register_operand" "r")))
- + (match_dup 0)))]
- + "!TARGET_NO_MUL_INSNS"
- + "macs.d %0, %1, %2"
- + [(set_attr "type" "macww_d")
- + (set_attr "length" "4")
- + (set_attr "cc" "none")])
- +
- +(define_insn "*umulaccsidi3"
- + [(set (match_operand:DI 0 "register_operand" "+r")
- + (plus:DI (mult:DI
- + (zero_extend:DI (match_operand:SI 1 "register_operand" "%r"))
- + (zero_extend:DI (match_operand:SI 2 "register_operand" "r")))
- + (match_dup 0)))]
- + "!TARGET_NO_MUL_INSNS"
- + "macu.d %0, %1, %2"
- + [(set_attr "type" "macww_d")
- + (set_attr "length" "4")
- + (set_attr "cc" "none")])
- +
- +
- +
- +;; Try to avoid Write-After-Write hazards for mul operations
- +;; if it can be done
- +(define_peephole2
- + [(set (match_operand:SI 0 "register_operand" "")
- + (mult:SI
- + (sign_extend:SI (match_operand 1 "general_operand" ""))
- + (sign_extend:SI (match_operand 2 "general_operand" ""))))
- + (set (match_dup 0)
- + (match_operator:SI 3 "alu_operator" [(match_dup 0)
- + (match_operand 4 "general_operand" "")]))]
- + "peep2_reg_dead_p(1, operands[2])"
- + [(set (match_dup 5)
- + (mult:SI
- + (sign_extend:SI (match_dup 1))
- + (sign_extend:SI (match_dup 2))))
- + (set (match_dup 0)
- + (match_op_dup 3 [(match_dup 5)
- + (match_dup 4)]))]
- + "{operands[5] = gen_rtx_REG(SImode, REGNO(operands[2]));}"
- + )
- +
- +
- +
- +;;=============================================================================
- +;; DSP instructions
- +;;=============================================================================
- +(define_insn "mulsathh_h"
- + [(set (match_operand:HI 0 "register_operand" "=r")
- + (ss_truncate:HI (ashiftrt:SI (mult:SI (sign_extend:SI (match_operand:HI 1 "register_operand" "%r"))
- + (sign_extend:SI (match_operand:HI 2 "register_operand" "r")))
- + (const_int 15))))]
- + "!TARGET_NO_MUL_INSNS && TARGET_DSP"
- + "mulsathh.h\t%0, %1:b, %2:b"
- + [(set_attr "length" "4")
- + (set_attr "cc" "none")
- + (set_attr "type" "mulhh")])
- +
- +(define_insn "mulsatrndhh_h"
- + [(set (match_operand:HI 0 "register_operand" "=r")
- + (ss_truncate:HI (ashiftrt:SI
- + (plus:SI (mult:SI (sign_extend:SI (match_operand:HI 1 "register_operand" "%r"))
- + (sign_extend:SI (match_operand:HI 2 "register_operand" "r")))
- + (const_int 1073741824))
- + (const_int 15))))]
- + "!TARGET_NO_MUL_INSNS && TARGET_DSP"
- + "mulsatrndhh.h\t%0, %1:b, %2:b"
- + [(set_attr "length" "4")
- + (set_attr "cc" "none")
- + (set_attr "type" "mulhh")])
- +
- +(define_insn "mulsathh_w"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (ss_truncate:SI (ashift:DI (mult:DI (sign_extend:DI (match_operand:HI 1 "register_operand" "%r"))
- + (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
- + (const_int 1))))]
- + "!TARGET_NO_MUL_INSNS && TARGET_DSP"
- + "mulsathh.w\t%0, %1:b, %2:b"
- + [(set_attr "length" "4")
- + (set_attr "cc" "none")
- + (set_attr "type" "mulhh")])
- +
- +(define_insn "mulsatwh_w"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (ss_truncate:SI (ashiftrt:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
- + (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
- + (const_int 15))))]
- + "!TARGET_NO_MUL_INSNS && TARGET_DSP"
- + "mulsatwh.w\t%0, %1, %2:b"
- + [(set_attr "length" "4")
- + (set_attr "cc" "none")
- + (set_attr "type" "mulwh")])
- +
- +(define_insn "mulsatrndwh_w"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (ss_truncate:SI (ashiftrt:DI (plus:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
- + (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
- + (const_int 1073741824))
- + (const_int 15))))]
- + "!TARGET_NO_MUL_INSNS && TARGET_DSP"
- + "mulsatrndwh.w\t%0, %1, %2:b"
- + [(set_attr "length" "4")
- + (set_attr "cc" "none")
- + (set_attr "type" "mulwh")])
- +
- +(define_insn "macsathh_w"
- + [(set (match_operand:SI 0 "register_operand" "+r")
- + (plus:SI (match_dup 0)
- + (ss_truncate:SI (ashift:DI (mult:DI (sign_extend:DI (match_operand:HI 1 "register_operand" "%r"))
- + (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
- + (const_int 1)))))]
- + "!TARGET_NO_MUL_INSNS && TARGET_DSP"
- + "macsathh.w\t%0, %1:b, %2:b"
- + [(set_attr "length" "4")
- + (set_attr "cc" "none")
- + (set_attr "type" "mulhh")])
- +
- +
- +(define_insn "mulwh_d"
- + [(set (match_operand:DI 0 "register_operand" "=r")
- + (ashift:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
- + (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
- + (const_int 16)))]
- + "!TARGET_NO_MUL_INSNS && TARGET_DSP"
- + "mulwh.d\t%0, %1, %2:b"
- + [(set_attr "length" "4")
- + (set_attr "cc" "none")
- + (set_attr "type" "mulwh")])
- +
- +
- +(define_insn "mulnwh_d"
- + [(set (match_operand:DI 0 "register_operand" "=r")
- + (ashift:DI (mult:DI (not:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r")))
- + (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
- + (const_int 16)))]
- + "!TARGET_NO_MUL_INSNS && TARGET_DSP"
- + "mulnwh.d\t%0, %1, %2:b"
- + [(set_attr "length" "4")
- + (set_attr "cc" "none")
- + (set_attr "type" "mulwh")])
- +
- +(define_insn "macwh_d"
- + [(set (match_operand:DI 0 "register_operand" "+r")
- + (plus:DI (match_dup 0)
- + (ashift:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "%r"))
- + (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))
- + (const_int 16))))]
- + "!TARGET_NO_MUL_INSNS && TARGET_DSP"
- + "macwh.d\t%0, %1, %2:b"
- + [(set_attr "length" "4")
- + (set_attr "cc" "none")
- + (set_attr "type" "mulwh")])
- +
- +(define_insn "machh_d"
- + [(set (match_operand:DI 0 "register_operand" "+r")
- + (plus:DI (match_dup 0)
- + (mult:DI (sign_extend:DI (match_operand:HI 1 "register_operand" "%r"))
- + (sign_extend:DI (match_operand:HI 2 "register_operand" "r")))))]
- + "!TARGET_NO_MUL_INSNS && TARGET_DSP"
- + "machh.d\t%0, %1:b, %2:b"
- + [(set_attr "length" "4")
- + (set_attr "cc" "none")
- + (set_attr "type" "mulwh")])
- +
- +(define_insn "satadd_w"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (ss_plus:SI (match_operand:SI 1 "register_operand" "r")
- + (match_operand:SI 2 "register_operand" "r")))]
- + "TARGET_DSP"
- + "satadd.w\t%0, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "cc" "none")
- + (set_attr "type" "alu_sat")])
- +
- +(define_insn "satsub_w"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (ss_minus:SI (match_operand:SI 1 "register_operand" "r")
- + (match_operand:SI 2 "register_operand" "r")))]
- + "TARGET_DSP"
- + "satsub.w\t%0, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "cc" "none")
- + (set_attr "type" "alu_sat")])
- +
- +(define_insn "satadd_h"
- + [(set (match_operand:HI 0 "register_operand" "=r")
- + (ss_plus:HI (match_operand:HI 1 "register_operand" "r")
- + (match_operand:HI 2 "register_operand" "r")))]
- + "TARGET_DSP"
- + "satadd.h\t%0, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "cc" "none")
- + (set_attr "type" "alu_sat")])
- +
- +(define_insn "satsub_h"
- + [(set (match_operand:HI 0 "register_operand" "=r")
- + (ss_minus:HI (match_operand:HI 1 "register_operand" "r")
- + (match_operand:HI 2 "register_operand" "r")))]
- + "TARGET_DSP"
- + "satsub.h\t%0, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "cc" "none")
- + (set_attr "type" "alu_sat")])
- +
- +
- +;;=============================================================================
- +;; smin
- +;;-----------------------------------------------------------------------------
- +;; Set reg0 to the smallest value of reg1 and reg2. It is used for signed
- +;; values in the registers.
- +;;=============================================================================
- +(define_insn "sminsi3"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (smin:SI (match_operand:SI 1 "register_operand" "r")
- + (match_operand:SI 2 "register_operand" "r")))]
- + ""
- + "min %0, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "cc" "none")])
- +
- +;;=============================================================================
- +;; smax
- +;;-----------------------------------------------------------------------------
- +;; Set reg0 to the largest value of reg1 and reg2. It is used for signed
- +;; values in the registers.
- +;;=============================================================================
- +(define_insn "smaxsi3"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (smax:SI (match_operand:SI 1 "register_operand" "r")
- + (match_operand:SI 2 "register_operand" "r")))]
- + ""
- + "max %0, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "cc" "none")])
- +
- +
- +
- +;;=============================================================================
- +;; Logical operations
- +;;-----------------------------------------------------------------------------
- +
- +
- +;; Split up simple DImode logical operations. Simply perform the logical
- +;; operation on the upper and lower halves of the registers.
- +(define_split
- + [(set (match_operand:DI 0 "register_operand" "")
- + (match_operator:DI 6 "logical_binary_operator"
- + [(match_operand:DI 1 "register_operand" "")
- + (match_operand:DI 2 "register_operand" "")]))]
- + "reload_completed"
- + [(set (match_dup 0) (match_op_dup:SI 6 [(match_dup 1) (match_dup 2)]))
- + (set (match_dup 3) (match_op_dup:SI 6 [(match_dup 4) (match_dup 5)]))]
- + "
- + {
- + operands[3] = gen_highpart (SImode, operands[0]);
- + operands[0] = gen_lowpart (SImode, operands[0]);
- + operands[4] = gen_highpart (SImode, operands[1]);
- + operands[1] = gen_lowpart (SImode, operands[1]);
- + operands[5] = gen_highpart (SImode, operands[2]);
- + operands[2] = gen_lowpart (SImode, operands[2]);
- + }"
- +)
- +
- +;;=============================================================================
- +;; Logical operations with shifted operand
- +;;=============================================================================
- +(define_insn "<code>si_lshift"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (logical:SI (match_operator:SI 4 "logical_shift_operator"
- + [(match_operand:SI 2 "register_operand" "r")
- + (match_operand:SI 3 "immediate_operand" "Ku05")])
- + (match_operand:SI 1 "register_operand" "r")))]
- + ""
- + {
- + if ( GET_CODE(operands[4]) == ASHIFT )
- + return "<logical_insn>\t%0, %1, %2 << %3";
- + else
- + return "<logical_insn>\t%0, %1, %2 >> %3";
- + }
- +
- + [(set_attr "cc" "set_z")]
- +)
- +
- +
- +;;************************************************
- +;; Peepholes for detecting logical operantions
- +;; with shifted operands
- +;;************************************************
- +
- +(define_peephole
- + [(set (match_operand:SI 3 "register_operand" "")
- + (match_operator:SI 5 "logical_shift_operator"
- + [(match_operand:SI 1 "register_operand" "")
- + (match_operand:SI 2 "immediate_operand" "")]))
- + (set (match_operand:SI 0 "register_operand" "")
- + (logical:SI (match_operand:SI 4 "register_operand" "")
- + (match_dup 3)))]
- + "(dead_or_set_p(insn, operands[3])) || (REGNO(operands[3]) == REGNO(operands[0]))"
- + {
- + if ( GET_CODE(operands[5]) == ASHIFT )
- + return "<logical_insn>\t%0, %4, %1 << %2";
- + else
- + return "<logical_insn>\t%0, %4, %1 >> %2";
- + }
- + [(set_attr "cc" "set_z")]
- + )
- +
- +(define_peephole
- + [(set (match_operand:SI 3 "register_operand" "")
- + (match_operator:SI 5 "logical_shift_operator"
- + [(match_operand:SI 1 "register_operand" "")
- + (match_operand:SI 2 "immediate_operand" "")]))
- + (set (match_operand:SI 0 "register_operand" "")
- + (logical:SI (match_dup 3)
- + (match_operand:SI 4 "register_operand" "")))]
- + "(dead_or_set_p(insn, operands[3])) || (REGNO(operands[3]) == REGNO(operands[0]))"
- + {
- + if ( GET_CODE(operands[5]) == ASHIFT )
- + return "<logical_insn>\t%0, %4, %1 << %2";
- + else
- + return "<logical_insn>\t%0, %4, %1 >> %2";
- + }
- + [(set_attr "cc" "set_z")]
- + )
- +
- +
- +(define_peephole2
- + [(set (match_operand:SI 0 "register_operand" "")
- + (match_operator:SI 5 "logical_shift_operator"
- + [(match_operand:SI 1 "register_operand" "")
- + (match_operand:SI 2 "immediate_operand" "")]))
- + (set (match_operand:SI 3 "register_operand" "")
- + (logical:SI (match_operand:SI 4 "register_operand" "")
- + (match_dup 0)))]
- + "(peep2_reg_dead_p(2, operands[0])) || (REGNO(operands[3]) == REGNO(operands[0]))"
- +
- + [(set (match_dup 3)
- + (logical:SI (match_op_dup:SI 5 [(match_dup 1) (match_dup 2)])
- + (match_dup 4)))]
- +
- + ""
- +)
- +
- +(define_peephole2
- + [(set (match_operand:SI 0 "register_operand" "")
- + (match_operator:SI 5 "logical_shift_operator"
- + [(match_operand:SI 1 "register_operand" "")
- + (match_operand:SI 2 "immediate_operand" "")]))
- + (set (match_operand:SI 3 "register_operand" "")
- + (logical:SI (match_dup 0)
- + (match_operand:SI 4 "register_operand" "")))]
- + "(peep2_reg_dead_p(2, operands[0])) || (REGNO(operands[3]) == REGNO(operands[0]))"
- +
- + [(set (match_dup 3)
- + (logical:SI (match_op_dup:SI 5 [(match_dup 1) (match_dup 2)])
- + (match_dup 4)))]
- +
- + ""
- +)
- +
- +
- +;;=============================================================================
- +;; and
- +;;-----------------------------------------------------------------------------
- +;; Store the result after a bitwise logical-and between reg0 and reg2 in reg0.
- +;;=============================================================================
- +
- +(define_insn "andnsi"
- + [(set (match_operand:SI 0 "register_operand" "+r")
- + (and:SI (match_dup 0)
- + (not:SI (match_operand:SI 1 "register_operand" "r"))))]
- + ""
- + "andn %0, %1"
- + [(set_attr "cc" "set_z")
- + (set_attr "length" "2")]
- +)
- +
- +
- +(define_insn "andsi3"
- + [(set (match_operand:SI 0 "avr32_rmw_memory_or_register_operand" "=Y,r,r,r, r, r,r,r,r,r")
- + (and:SI (match_operand:SI 1 "avr32_rmw_memory_or_register_operand" "%0,r,0,0, 0, 0,0,0,0,r" )
- + (match_operand:SI 2 "nonmemory_operand" " N,M,N,Ku16,Ks17,J,L,r,i,r")))]
- + ""
- + "@
- + memc\t%0, %z2
- + bfextu\t%0, %1, 0, %z2
- + cbr\t%0, %z2
- + andl\t%0, %2, COH
- + andl\t%0, lo(%2)
- + andh\t%0, hi(%2), COH
- + andh\t%0, hi(%2)
- + and\t%0, %2
- + andh\t%0, hi(%2)\;andl\t%0, lo(%2)
- + and\t%0, %1, %2"
- +
- + [(set_attr "length" "4,4,2,4,4,4,4,2,8,4")
- + (set_attr "cc" "none,set_z,set_z,set_z,set_z,set_z,set_z,set_z,set_z,set_z")])
- +
- +
- +
- +(define_insn "anddi3"
- + [(set (match_operand:DI 0 "register_operand" "=&r,&r")
- + (and:DI (match_operand:DI 1 "register_operand" "%0,r")
- + (match_operand:DI 2 "register_operand" "r,r")))]
- + ""
- + "#"
- + [(set_attr "length" "8")
- + (set_attr "cc" "clobber")]
- +)
- +
- +;;=============================================================================
- +;; or
- +;;-----------------------------------------------------------------------------
- +;; Store the result after a bitwise inclusive-or between reg0 and reg2 in reg0.
- +;;=============================================================================
- +
- +(define_insn "iorsi3"
- + [(set (match_operand:SI 0 "avr32_rmw_memory_or_register_operand" "=Y,r,r, r,r,r,r")
- + (ior:SI (match_operand:SI 1 "avr32_rmw_memory_or_register_operand" "%0,0,0, 0,0,0,r" )
- + (match_operand:SI 2 "nonmemory_operand" " O,O,Ku16,J,r,i,r")))]
- + ""
- + "@
- + mems\t%0, %p2
- + sbr\t%0, %p2
- + orl\t%0, %2
- + orh\t%0, hi(%2)
- + or\t%0, %2
- + orh\t%0, hi(%2)\;orl\t%0, lo(%2)
- + or\t%0, %1, %2"
- +
- + [(set_attr "length" "4,2,4,4,2,8,4")
- + (set_attr "cc" "none,set_z,set_z,set_z,set_z,set_z,set_z")])
- +
- +
- +(define_insn "iordi3"
- + [(set (match_operand:DI 0 "register_operand" "=&r,&r")
- + (ior:DI (match_operand:DI 1 "register_operand" "%0,r")
- + (match_operand:DI 2 "register_operand" "r,r")))]
- + ""
- + "#"
- + [(set_attr "length" "8")
- + (set_attr "cc" "clobber")]
- +)
- +
- +;;=============================================================================
- +;; xor bytes
- +;;-----------------------------------------------------------------------------
- +;; Store the result after a bitwise exclusive-or between reg0 and reg2 in reg0.
- +;;=============================================================================
- +
- +(define_insn "xorsi3"
- + [(set (match_operand:SI 0 "avr32_rmw_memory_or_register_operand" "=Y,r, r,r,r,r")
- + (xor:SI (match_operand:SI 1 "avr32_rmw_memory_or_register_operand" "%0,0, 0,0,0,r" )
- + (match_operand:SI 2 "nonmemory_operand" " O,Ku16,J,r,i,r")))]
- + ""
- + "@
- + memt\t%0, %p2
- + eorl\t%0, %2
- + eorh\t%0, hi(%2)
- + eor\t%0, %2
- + eorh\t%0, hi(%2)\;eorl\t%0, lo(%2)
- + eor\t%0, %1, %2"
- +
- + [(set_attr "length" "4,4,4,2,8,4")
- + (set_attr "cc" "none,set_z,set_z,set_z,set_z,set_z")])
- +
- +(define_insn "xordi3"
- + [(set (match_operand:DI 0 "register_operand" "=&r,&r")
- + (xor:DI (match_operand:DI 1 "register_operand" "%0,r")
- + (match_operand:DI 2 "register_operand" "r,r")))]
- + ""
- + "#"
- + [(set_attr "length" "8")
- + (set_attr "cc" "clobber")]
- +)
- +
- +;;=============================================================================
- +;; Three operand predicable insns
- +;;=============================================================================
- +
- +(define_insn "<predicable_insn3><mode>_predicable"
- + [(set (match_operand:INTM 0 "register_operand" "=r")
- + (predicable_op3:INTM (match_operand:INTM 1 "register_operand" "<predicable_commutative3>r")
- + (match_operand:INTM 2 "register_operand" "r")))]
- + "TARGET_V2_INSNS"
- + "<predicable_insn3>%?\t%0, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "cc" "cmp_cond_insn")
- + (set_attr "predicable" "yes")]
- +)
- +
- +(define_insn_and_split "<predicable_insn3><mode>_imm_clobber_predicable"
- + [(parallel
- + [(set (match_operand:INTM 0 "register_operand" "=r")
- + (predicable_op3:INTM (match_operand:INTM 1 "register_operand" "<predicable_commutative3>r")
- + (match_operand:INTM 2 "avr32_mov_immediate_operand" "JKs21")))
- + (clobber (match_operand:INTM 3 "register_operand" "=&r"))])]
- + "TARGET_V2_INSNS"
- + {
- + if ( current_insn_predicate != NULL_RTX )
- + {
- + if ( avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks08") )
- + return "%! mov%?\t%3, %2\;<predicable_insn3>%?\t%0, %1, %3";
- + else if ( avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks21") )
- + return "%! mov\t%3, %2\;<predicable_insn3>%?\t%0, %1, %3";
- + else
- + return "%! movh\t%3, hi(%2)\;<predicable_insn3>%?\t%0, %1, %3";
- + }
- + else
- + {
- + if ( !avr32_cond_imm_clobber_splittable (insn, operands) )
- + {
- + if ( avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks08") )
- + return "mov%?\t%3, %2\;<predicable_insn3>%?\t%0, %1, %3";
- + else if ( avr32_const_ok_for_constraint_p (INTVAL (operands[2]), 'K', "Ks21") )
- + return "mov\t%3, %2\;<predicable_insn3>%?\t%0, %1, %3";
- + else
- + return "movh\t%3, hi(%2)\;<predicable_insn3>%?\t%0, %1, %3";
- + }
- + return "#";
- + }
- +
- + }
- + ;; If we find out that we could not actually do if-conversion on the block
- + ;; containing this insn we convert it back to normal immediate format
- + ;; to avoid outputing a redundant move insn
- + ;; Do not split until after we have checked if we can make the insn
- + ;; conditional.
- + "(GET_CODE (PATTERN (insn)) != COND_EXEC
- + && cfun->machine->ifcvt_after_reload
- + && avr32_cond_imm_clobber_splittable (insn, operands))"
- + [(set (match_dup 0)
- + (predicable_op3:INTM (match_dup 1)
- + (match_dup 2)))]
- + ""
- + [(set_attr "length" "8")
- + (set_attr "cc" "cmp_cond_insn")
- + (set_attr "predicable" "yes")]
- + )
- +
- +
- +;;=============================================================================
- +;; Zero extend predicable insns
- +;;=============================================================================
- +(define_insn_and_split "zero_extendhisi_clobber_predicable"
- + [(parallel
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (zero_extend:SI (match_operand:HI 1 "register_operand" "r")))
- + (clobber (match_operand:SI 2 "register_operand" "=&r"))])]
- + "TARGET_V2_INSNS"
- + {
- + if ( current_insn_predicate != NULL_RTX )
- + {
- + return "%! mov\t%2, 0xffff\;and%?\t%0, %1, %2";
- + }
- + else
- + {
- + return "#";
- + }
- +
- + }
- + ;; If we find out that we could not actually do if-conversion on the block
- + ;; containing this insn we convert it back to normal immediate format
- + ;; to avoid outputing a redundant move insn
- + ;; Do not split until after we have checked if we can make the insn
- + ;; conditional.
- + "(GET_CODE (PATTERN (insn)) != COND_EXEC
- + && cfun->machine->ifcvt_after_reload)"
- + [(set (match_dup 0)
- + (zero_extend:SI (match_dup 1)))]
- + ""
- + [(set_attr "length" "8")
- + (set_attr "cc" "cmp_cond_insn")
- + (set_attr "predicable" "yes")]
- + )
- +
- +(define_insn_and_split "zero_extendqisi_clobber_predicable"
- + [(parallel
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (zero_extend:SI (match_operand:QI 1 "register_operand" "r")))
- + (clobber (match_operand:SI 2 "register_operand" "=&r"))])]
- + "TARGET_V2_INSNS"
- + {
- + if ( current_insn_predicate != NULL_RTX )
- + {
- + return "%! mov\t%2, 0xff\;and%?\t%0, %1, %2";
- + }
- + else
- + {
- + return "#";
- + }
- +
- + }
- + ;; If we find out that we could not actually do if-conversion on the block
- + ;; containing this insn we convert it back to normal immediate format
- + ;; to avoid outputing a redundant move insn
- + ;; Do not split until after we have checked if we can make the insn
- + ;; conditional.
- + "(GET_CODE (PATTERN (insn)) != COND_EXEC
- + && cfun->machine->ifcvt_after_reload)"
- + [(set (match_dup 0)
- + (zero_extend:SI (match_dup 1)))]
- + ""
- + [(set_attr "length" "8")
- + (set_attr "cc" "cmp_cond_insn")
- + (set_attr "predicable" "yes")]
- + )
- +
- +(define_insn_and_split "zero_extendqihi_clobber_predicable"
- + [(parallel
- + [(set (match_operand:HI 0 "register_operand" "=r")
- + (zero_extend:HI (match_operand:QI 1 "register_operand" "r")))
- + (clobber (match_operand:SI 2 "register_operand" "=&r"))])]
- + "TARGET_V2_INSNS"
- + {
- + if ( current_insn_predicate != NULL_RTX )
- + {
- + return "%! mov\t%2, 0xff\;and%?\t%0, %1, %2";
- + }
- + else
- + {
- + return "#";
- + }
- +
- + }
- + ;; If we find out that we could not actually do if-conversion on the block
- + ;; containing this insn we convert it back to normal immediate format
- + ;; to avoid outputing a redundant move insn
- + ;; Do not split until after we have checked if we can make the insn
- + ;; conditional.
- + "(GET_CODE (PATTERN (insn)) != COND_EXEC
- + && cfun->machine->ifcvt_after_reload)"
- + [(set (match_dup 0)
- + (zero_extend:HI (match_dup 1)))]
- + ""
- + [(set_attr "length" "8")
- + (set_attr "cc" "cmp_cond_insn")
- + (set_attr "predicable" "yes")]
- + )
- +;;=============================================================================
- +;; divmod
- +;;-----------------------------------------------------------------------------
- +;; Signed division that produces both a quotient and a remainder.
- +;;=============================================================================
- +
- +(define_expand "divmodsi4"
- + [(parallel [
- + (parallel [
- + (set (match_operand:SI 0 "register_operand" "=r")
- + (div:SI (match_operand:SI 1 "register_operand" "r")
- + (match_operand:SI 2 "register_operand" "r")))
- + (set (match_operand:SI 3 "register_operand" "=r")
- + (mod:SI (match_dup 1)
- + (match_dup 2)))])
- + (use (match_dup 4))])]
- + ""
- + {
- + if (can_create_pseudo_p ()) {
- + operands[4] = gen_reg_rtx (DImode);
- + emit_insn(gen_divmodsi4_internal(operands[4],operands[1],operands[2]));
- + emit_move_insn(operands[0], gen_rtx_SUBREG( SImode, operands[4], 4));
- + emit_move_insn(operands[3], gen_rtx_SUBREG( SImode, operands[4], 0));
- + DONE;
- + } else {
- + FAIL;
- + }
- + })
- +
- +
- +(define_insn "divmodsi4_internal"
- + [(set (match_operand:DI 0 "register_operand" "=r")
- + (unspec:DI [(match_operand:SI 1 "register_operand" "r")
- + (match_operand:SI 2 "register_operand" "r")]
- + UNSPEC_DIVMODSI4_INTERNAL))]
- + ""
- + "divs %0, %1, %2"
- + [(set_attr "type" "div")
- + (set_attr "cc" "none")])
- +
- +
- +;;=============================================================================
- +;; udivmod
- +;;-----------------------------------------------------------------------------
- +;; Unsigned division that produces both a quotient and a remainder.
- +;;=============================================================================
- +(define_expand "udivmodsi4"
- + [(parallel [
- + (parallel [
- + (set (match_operand:SI 0 "register_operand" "=r")
- + (udiv:SI (match_operand:SI 1 "register_operand" "r")
- + (match_operand:SI 2 "register_operand" "r")))
- + (set (match_operand:SI 3 "register_operand" "=r")
- + (umod:SI (match_dup 1)
- + (match_dup 2)))])
- + (use (match_dup 4))])]
- + ""
- + {
- + if (can_create_pseudo_p ()) {
- + operands[4] = gen_reg_rtx (DImode);
- +
- + emit_insn(gen_udivmodsi4_internal(operands[4],operands[1],operands[2]));
- + emit_move_insn(operands[0], gen_rtx_SUBREG( SImode, operands[4], 4));
- + emit_move_insn(operands[3], gen_rtx_SUBREG( SImode, operands[4], 0));
- +
- + DONE;
- + } else {
- + FAIL;
- + }
- + })
- +
- +(define_insn "udivmodsi4_internal"
- + [(set (match_operand:DI 0 "register_operand" "=r")
- + (unspec:DI [(match_operand:SI 1 "register_operand" "r")
- + (match_operand:SI 2 "register_operand" "r")]
- + UNSPEC_UDIVMODSI4_INTERNAL))]
- + ""
- + "divu %0, %1, %2"
- + [(set_attr "type" "div")
- + (set_attr "cc" "none")])
- +
- +
- +;;=============================================================================
- +;; Arithmetic-shift left
- +;;-----------------------------------------------------------------------------
- +;; Arithmetic-shift reg0 left by reg2 or immediate value.
- +;;=============================================================================
- +
- +(define_insn "ashlsi3"
- + [(set (match_operand:SI 0 "register_operand" "=r,r,r")
- + (ashift:SI (match_operand:SI 1 "register_operand" "r,0,r")
- + (match_operand:SI 2 "register_const_int_operand" "r,Ku05,Ku05")))]
- + ""
- + "@
- + lsl %0, %1, %2
- + lsl %0, %2
- + lsl %0, %1, %2"
- + [(set_attr "length" "4,2,4")
- + (set_attr "cc" "set_ncz")])
- +
- +;;=============================================================================
- +;; Arithmetic-shift right
- +;;-----------------------------------------------------------------------------
- +;; Arithmetic-shift reg0 right by an immediate value.
- +;;=============================================================================
- +
- +(define_insn "ashrsi3"
- + [(set (match_operand:SI 0 "register_operand" "=r,r,r")
- + (ashiftrt:SI (match_operand:SI 1 "register_operand" "r,0,r")
- + (match_operand:SI 2 "register_const_int_operand" "r,Ku05,Ku05")))]
- + ""
- + "@
- + asr %0, %1, %2
- + asr %0, %2
- + asr %0, %1, %2"
- + [(set_attr "length" "4,2,4")
- + (set_attr "cc" "set_ncz")])
- +
- +;;=============================================================================
- +;; Logical shift right
- +;;-----------------------------------------------------------------------------
- +;; Logical shift reg0 right by an immediate value.
- +;;=============================================================================
- +
- +(define_insn "lshrsi3"
- + [(set (match_operand:SI 0 "register_operand" "=r,r,r")
- + (lshiftrt:SI (match_operand:SI 1 "register_operand" "r,0,r")
- + (match_operand:SI 2 "register_const_int_operand" "r,Ku05,Ku05")))]
- + ""
- + "@
- + lsr %0, %1, %2
- + lsr %0, %2
- + lsr %0, %1, %2"
- + [(set_attr "length" "4,2,4")
- + (set_attr "cc" "set_ncz")])
- +
- +
- +;;=============================================================================
- +;; neg
- +;;-----------------------------------------------------------------------------
- +;; Negate operand 1 and store the result in operand 0.
- +;;=============================================================================
- +(define_insn "negsi2"
- + [(set (match_operand:SI 0 "register_operand" "=r,r")
- + (neg:SI (match_operand:SI 1 "register_operand" "0,r")))]
- + ""
- + "@
- + neg\t%0
- + rsub\t%0, %1, 0"
- + [(set_attr "length" "2,4")
- + (set_attr "cc" "set_vncz")])
- +
- +(define_insn "negsi2_predicable"
- + [(set (match_operand:SI 0 "register_operand" "+r")
- + (neg:SI (match_dup 0)))]
- + "TARGET_V2_INSNS"
- + "rsub%?\t%0, 0"
- + [(set_attr "length" "4")
- + (set_attr "cc" "cmp_cond_insn")
- + (set_attr "predicable" "yes")])
- +
- +;;=============================================================================
- +;; abs
- +;;-----------------------------------------------------------------------------
- +;; Store the absolute value of operand 1 into operand 0.
- +;;=============================================================================
- +(define_insn "abssi2"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (abs:SI (match_operand:SI 1 "register_operand" "0")))]
- + ""
- + "abs\t%0"
- + [(set_attr "length" "2")
- + (set_attr "cc" "set_z")])
- +
- +
- +;;=============================================================================
- +;; one_cmpl
- +;;-----------------------------------------------------------------------------
- +;; Store the bitwise-complement of operand 1 into operand 0.
- +;;=============================================================================
- +
- +(define_insn "one_cmplsi2"
- + [(set (match_operand:SI 0 "register_operand" "=r,r")
- + (not:SI (match_operand:SI 1 "register_operand" "0,r")))]
- + ""
- + "@
- + com\t%0
- + rsub\t%0, %1, -1"
- + [(set_attr "length" "2,4")
- + (set_attr "cc" "set_z")])
- +
- +
- +(define_insn "one_cmplsi2_predicable"
- + [(set (match_operand:SI 0 "register_operand" "+r")
- + (not:SI (match_dup 0)))]
- + "TARGET_V2_INSNS"
- + "rsub%?\t%0, -1"
- + [(set_attr "length" "4")
- + (set_attr "cc" "cmp_cond_insn")
- + (set_attr "predicable" "yes")])
- +
- +
- +;;=============================================================================
- +;; Bit load
- +;;-----------------------------------------------------------------------------
- +;; Load a bit into Z and C flags
- +;;=============================================================================
- +(define_insn "bldsi"
- + [(set (cc0)
- + (and:SI (match_operand:SI 0 "register_operand" "r")
- + (match_operand:SI 1 "one_bit_set_operand" "i")))]
- + ""
- + "bld\t%0, %p1"
- + [(set_attr "length" "4")
- + (set_attr "cc" "bld")]
- + )
- +
- +
- +;;=============================================================================
- +;; Compare
- +;;-----------------------------------------------------------------------------
- +;; Compare reg0 with reg1 or an immediate value.
- +;;=============================================================================
- +
- +(define_expand "cmp<mode>"
- + [(set (cc0)
- + (compare:CMP
- + (match_operand:CMP 0 "register_operand" "")
- + (match_operand:CMP 1 "<CMP:cmp_predicate>" "")))]
- + ""
- + "{
- + avr32_compare_op0 = operands[0];
- + avr32_compare_op1 = operands[1];
- + }"
- +)
- +
- +(define_insn "cmp<mode>_internal"
- + [(set (cc0)
- + (compare:CMP
- + (match_operand:CMP 0 "register_operand" "r")
- + (match_operand:CMP 1 "<CMP:cmp_predicate>" "<CMP:cmp_constraint>")))]
- + ""
- + {
- +switch(GET_MODE(operands[0]))
- + {
- + case QImode:
- + avr32_branch_type = CMP_QI;
- + break;
- + case HImode:
- + avr32_branch_type = CMP_HI;
- + break;
- + case SImode:
- + avr32_branch_type = CMP_SI;
- + break;
- + case DImode:
- + avr32_branch_type = CMP_DI;
- + break;
- + default:
- + abort();
- + }
- + /* Check if the next insn already will output a compare. */
- + if (!next_insn_emits_cmp (insn))
- + set_next_insn_cond(insn,
- + avr32_output_cmp(get_next_insn_cond(insn), GET_MODE (operands[0]), operands[0], operands[1]));
- + return "";
- + }
- + [(set_attr "length" "4")
- + (set_attr "cc" "compare")])
- +
- +(define_expand "cmpsf"
- + [(set (cc0)
- + (compare:SF
- + (match_operand:SF 0 "general_operand" "")
- + (match_operand:SF 1 "general_operand" "")))]
- + "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
- + "{
- + if ( !REG_P(operands[0]) )
- + operands[0] = force_reg(SFmode, operands[0]);
- +
- + if ( !REG_P(operands[1]) )
- + operands[1] = force_reg(SFmode, operands[1]);
- +
- + avr32_compare_op0 = operands[0];
- + avr32_compare_op1 = operands[1];
- + emit_insn(gen_cmpsf_internal_uc3fp(operands[0], operands[1]));
- + DONE;
- + }"
- +)
- +
- +;;;=============================================================================
- +;; Test if zero
- +;;-----------------------------------------------------------------------------
- +;; Compare reg against zero and set the condition codes.
- +;;=============================================================================
- +
- +
- +(define_expand "tstsi"
- + [(set (cc0)
- + (match_operand:SI 0 "register_operand" ""))]
- + ""
- + {
- + avr32_compare_op0 = operands[0];
- + avr32_compare_op1 = const0_rtx;
- + }
- +)
- +
- +(define_insn "tstsi_internal"
- + [(set (cc0)
- + (match_operand:SI 0 "register_operand" "r"))]
- + ""
- + {
- + /* Check if the next insn already will output a compare. */
- + if (!next_insn_emits_cmp (insn))
- + set_next_insn_cond(insn,
- + avr32_output_cmp(get_next_insn_cond(insn), SImode, operands[0], const0_rtx));
- +
- + return "";
- + }
- + [(set_attr "length" "2")
- + (set_attr "cc" "compare")])
- +
- +
- +(define_expand "tstdi"
- + [(set (cc0)
- + (match_operand:DI 0 "register_operand" ""))]
- + ""
- + {
- + avr32_compare_op0 = operands[0];
- + avr32_compare_op1 = const0_rtx;
- + }
- +)
- +
- +(define_insn "tstdi_internal"
- + [(set (cc0)
- + (match_operand:DI 0 "register_operand" "r"))]
- + ""
- + {
- + /* Check if the next insn already will output a compare. */
- + if (!next_insn_emits_cmp (insn))
- + set_next_insn_cond(insn,
- + avr32_output_cmp(get_next_insn_cond(insn), DImode, operands[0], const0_rtx));
- + return "";
- + }
- + [(set_attr "length" "4")
- + (set_attr "type" "alu2")
- + (set_attr "cc" "compare")])
- +
- +
- +
- +;;=============================================================================
- +;; Convert operands
- +;;-----------------------------------------------------------------------------
- +;;
- +;;=============================================================================
- +(define_insn "truncdisi2"
- + [(set (match_operand:SI 0 "general_operand" "")
- + (truncate:SI (match_operand:DI 1 "general_operand" "")))]
- + ""
- + "truncdisi2")
- +
- +;;=============================================================================
- +;; Extend
- +;;-----------------------------------------------------------------------------
- +;;
- +;;=============================================================================
- +
- +
- +(define_insn "extendhisi2"
- + [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
- + (sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" "0,r,<RKu00>,m")))]
- + ""
- + {
- + switch ( which_alternative ){
- + case 0:
- + return "casts.h\t%0";
- + case 1:
- + return "bfexts\t%0, %1, 0, 16";
- + case 2:
- + case 3:
- + return "ld.sh\t%0, %1";
- + default:
- + abort();
- + }
- + }
- + [(set_attr "length" "2,4,2,4")
- + (set_attr "cc" "set_ncz,set_ncz,none,none")
- + (set_attr "type" "alu,alu,load_rm,load_rm")])
- +
- +(define_insn "extendqisi2"
- + [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
- + (sign_extend:SI (match_operand:QI 1 "extendqi_operand" "0,r,RKu00,m")))]
- + ""
- + {
- + switch ( which_alternative ){
- + case 0:
- + return "casts.b\t%0";
- + case 1:
- + return "bfexts\t%0, %1, 0, 8";
- + case 2:
- + case 3:
- + return "ld.sb\t%0, %1";
- + default:
- + abort();
- + }
- + }
- + [(set_attr "length" "2,4,2,4")
- + (set_attr "cc" "set_ncz,set_ncz,none,none")
- + (set_attr "type" "alu,alu,load_rm,load_rm")])
- +
- +(define_insn "extendqihi2"
- + [(set (match_operand:HI 0 "register_operand" "=r,r,r,r")
- + (sign_extend:HI (match_operand:QI 1 "extendqi_operand" "0,r,RKu00,m")))]
- + ""
- + {
- + switch ( which_alternative ){
- + case 0:
- + return "casts.b\t%0";
- + case 1:
- + return "bfexts\t%0, %1, 0, 8";
- + case 2:
- + case 3:
- + return "ld.sb\t%0, %1";
- + default:
- + abort();
- + }
- + }
- + [(set_attr "length" "2,4,2,4")
- + (set_attr "cc" "set_ncz,set_ncz,none,none")
- + (set_attr "type" "alu,alu,load_rm,load_rm")])
- +
- +
- +;;=============================================================================
- +;; Zero-extend
- +;;-----------------------------------------------------------------------------
- +;;
- +;;=============================================================================
- +
- +(define_insn "zero_extendhisi2"
- + [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
- + (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "0,r,<RKu00>,m")))]
- + ""
- + {
- + switch ( which_alternative ){
- + case 0:
- + return "castu.h\t%0";
- + case 1:
- + return "bfextu\t%0, %1, 0, 16";
- + case 2:
- + case 3:
- + return "ld.uh\t%0, %1";
- + default:
- + abort();
- + }
- + }
- +
- + [(set_attr "length" "2,4,2,4")
- + (set_attr "cc" "set_ncz,set_ncz,none,none")
- + (set_attr "type" "alu,alu,load_rm,load_rm")])
- +
- +(define_insn "zero_extendqisi2"
- + [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
- + (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "0,r,<RKu00>,m")))]
- + ""
- + {
- + switch ( which_alternative ){
- + case 0:
- + return "castu.b\t%0";
- + case 1:
- + return "bfextu\t%0, %1, 0, 8";
- + case 2:
- + case 3:
- + return "ld.ub\t%0, %1";
- + default:
- + abort();
- + }
- + }
- + [(set_attr "length" "2,4,2,4")
- + (set_attr "cc" "set_ncz, set_ncz, none, none")
- + (set_attr "type" "alu, alu, load_rm, load_rm")])
- +
- +(define_insn "zero_extendqihi2"
- + [(set (match_operand:HI 0 "register_operand" "=r,r,r,r")
- + (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "0,r,<RKu00>,m")))]
- + ""
- + {
- + switch ( which_alternative ){
- + case 0:
- + return "castu.b\t%0";
- + case 1:
- + return "bfextu\t%0, %1, 0, 8";
- + case 2:
- + case 3:
- + return "ld.ub\t%0, %1";
- + default:
- + abort();
- + }
- + }
- + [(set_attr "length" "2,4,2,4")
- + (set_attr "cc" "set_ncz, set_ncz, none, none")
- + (set_attr "type" "alu, alu, load_rm, load_rm")])
- +
- +
- +;;=============================================================================
- +;; Conditional load and extend insns
- +;;=============================================================================
- +(define_insn "ldsi<mode>_predicable_se"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (sign_extend:SI
- + (match_operand:INTM 1 "memory_operand" "<INTM:pred_mem_constraint>")))]
- + "TARGET_V2_INSNS"
- + "ld<INTM:load_postfix_s>%?\t%0, %1"
- + [(set_attr "length" "4")
- + (set_attr "cc" "cmp_cond_insn")
- + (set_attr "type" "load")
- + (set_attr "predicable" "yes")]
- +)
- +
- +(define_insn "ldsi<mode>_predicable_ze"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (zero_extend:SI
- + (match_operand:INTM 1 "memory_operand" "<INTM:pred_mem_constraint>")))]
- + "TARGET_V2_INSNS"
- + "ld<INTM:load_postfix_u>%?\t%0, %1"
- + [(set_attr "length" "4")
- + (set_attr "cc" "cmp_cond_insn")
- + (set_attr "type" "load")
- + (set_attr "predicable" "yes")]
- +)
- +
- +(define_insn "ldhi_predicable_ze"
- + [(set (match_operand:HI 0 "register_operand" "=r")
- + (zero_extend:HI
- + (match_operand:QI 1 "memory_operand" "RKs10")))]
- + "TARGET_V2_INSNS"
- + "ld.ub%?\t%0, %1"
- + [(set_attr "length" "4")
- + (set_attr "cc" "cmp_cond_insn")
- + (set_attr "type" "load")
- + (set_attr "predicable" "yes")]
- +)
- +
- +(define_insn "ldhi_predicable_se"
- + [(set (match_operand:HI 0 "register_operand" "=r")
- + (sign_extend:HI
- + (match_operand:QI 1 "memory_operand" "RKs10")))]
- + "TARGET_V2_INSNS"
- + "ld.sb%?\t%0, %1"
- + [(set_attr "length" "4")
- + (set_attr "cc" "cmp_cond_insn")
- + (set_attr "type" "load")
- + (set_attr "predicable" "yes")]
- +)
- +
- +;;=============================================================================
- +;; Conditional set register
- +;; sr{cond4} rd
- +;;-----------------------------------------------------------------------------
- +
- +;;Because of the same issue as with conditional moves and adds we must
- +;;not separate the compare instrcution from the scc instruction as
- +;;they might be sheduled "badly".
- +
- +(define_expand "s<code>"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (any_cond:SI (cc0)
- + (const_int 0)))]
- +""
- +{
- + if(TARGET_HARD_FLOAT && TARGET_ARCH_FPU)
- + FAIL;
- +})
- +
- +(define_insn "*s<code>"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (any_cond:SI (cc0)
- + (const_int 0)))]
- + ""
- +{
- + return "sr<cond>\t%0";
- +}
- +[(set_attr "length" "2")
- +(set_attr "cc" "none")])
- +
- +(define_insn "seq"
- +[(set (match_operand:SI 0 "register_operand" "=r")
- +(eq:SI (cc0)
- + (const_int 0)))]
- + ""
- +"sreq\t%0"
- +[(set_attr "length" "2")
- +(set_attr "cc" "none")])
- +
- +(define_insn "sne"
- +[(set (match_operand:SI 0 "register_operand" "=r")
- +(ne:SI (cc0)
- + (const_int 0)))]
- + ""
- +"srne\t%0"
- + [(set_attr "length" "2")
- + (set_attr "cc" "none")])
- +
- +(define_insn "smi"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (unspec:SI [(cc0)
- + (const_int 0)] UNSPEC_COND_MI))]
- + ""
- + "srmi\t%0"
- + [(set_attr "length" "2")
- + (set_attr "cc" "none")])
- +
- +(define_insn "spl"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (unspec:SI [(cc0)
- + (const_int 0)] UNSPEC_COND_PL))]
- + ""
- + "srpl\t%0"
- + [(set_attr "length" "2")
- + (set_attr "cc" "none")])
- +
- +
- +;;=============================================================================
- +;; Conditional branch
- +;;-----------------------------------------------------------------------------
- +;; Branch to label if the specified condition codes are set.
- +;;=============================================================================
- +; branch if negative
- +(define_insn "bmi"
- + [(set (pc)
- + (if_then_else (unspec:CC [(cc0) (const_int 0)] UNSPEC_COND_MI)
- + (label_ref (match_operand 0 "" ""))
- + (pc)))]
- + ""
- + "brmi %0"
- + [(set_attr "type" "branch")
- + (set (attr "length")
- + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
- + (le (minus (pc) (match_dup 0)) (const_int 256)))
- + (const_int 2)] ; use compact branch
- + (const_int 4))) ; use extended branch
- + (set_attr "cc" "none")])
- +
- +(define_insn "*bmi-reverse"
- + [(set (pc)
- + (if_then_else (unspec:CC [(cc0) (const_int 0)] UNSPEC_COND_MI)
- + (pc)
- + (label_ref (match_operand 0 "" ""))))]
- + ""
- + "brpl %0"
- + [(set_attr "type" "branch")
- + (set (attr "length")
- + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
- + (le (minus (pc) (match_dup 0)) (const_int 256)))
- + (const_int 2)] ; use compact branch
- + (const_int 4))) ; use extended branch
- + (set_attr "cc" "none")])
- +
- +; branch if positive
- +(define_insn "bpl"
- + [(set (pc)
- + (if_then_else (unspec:CC [(cc0) (const_int 0)] UNSPEC_COND_PL)
- + (label_ref (match_operand 0 "" ""))
- + (pc)))]
- + ""
- + "brpl %0"
- + [(set_attr "type" "branch")
- + (set (attr "length")
- + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
- + (le (minus (pc) (match_dup 0)) (const_int 256)))
- + (const_int 2)] ; use compact branch
- + (const_int 4))) ; use extended branch
- + (set_attr "cc" "none")])
- +
- +(define_insn "*bpl-reverse"
- + [(set (pc)
- + (if_then_else (unspec:CC [(cc0) (const_int 0)] UNSPEC_COND_PL)
- + (pc)
- + (label_ref (match_operand 0 "" ""))))]
- + ""
- + "brmi %0"
- + [(set_attr "type" "branch")
- + (set (attr "length")
- + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
- + (le (minus (pc) (match_dup 0)) (const_int 256)))
- + (const_int 2)] ; use compact branch
- + (const_int 4))) ; use extended branch
- + (set_attr "cc" "none")])
- +
- +; branch if equal
- +(define_insn "b<code>"
- + [(set (pc)
- + (if_then_else (any_cond_b:CC (cc0)
- + (const_int 0))
- + (label_ref (match_operand 0 "" ""))
- + (pc)))]
- + ""
- + {
- + if (TARGET_HARD_FLOAT && TARGET_ARCH_FPU && (avr32_branch_type == CMP_SF))
- + return get_attr_length(insn) == 6 ? "brvs .+6\;br<cond> %0" : "brvs .+8\;br<cond> %0";
- + else
- + return "br<cond> %0";
- + }
- + [(set_attr "type" "branch")
- + (set (attr "length")
- + (if_then_else (eq (const_int 1)(symbol_ref "TARGET_HARD_FLOAT && TARGET_ARCH_FPU"))
- + (if_then_else
- + (and (le (minus (match_dup 0) (pc)) (const_int 254))
- + (le (minus (pc) (match_dup 0)) (const_int 256)))
- + (const_int 6)
- + (const_int 8))
- + (if_then_else
- + (and (le (minus (match_dup 0) (pc)) (const_int 254))
- + (le (minus (pc) (match_dup 0)) (const_int 256)))
- + (const_int 2)
- + (const_int 4))))
- + (set_attr "cc" "none")])
- +
- +(define_insn "beq"
- + [(set (pc)
- + (if_then_else (eq:CC (cc0)
- + (const_int 0))
- + (label_ref (match_operand 0 "" ""))
- + (pc)))]
- + ""
- + "breq %0";
- + [(set_attr "type" "branch")
- + (set (attr "length")
- + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
- + (le (minus (pc) (match_dup 0)) (const_int 256)))
- + (const_int 2)] ; use compact branch
- + (const_int 4))) ; use extended branch
- + (set_attr "cc" "none")])
- +
- +(define_insn "bne"
- + [(set (pc)
- + (if_then_else (ne:CC (cc0)
- + (const_int 0))
- + (label_ref (match_operand 0 "" ""))
- + (pc)))]
- + ""
- + "brne %0";
- + [(set_attr "type" "branch")
- + (set (attr "length")
- + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
- + (le (minus (pc) (match_dup 0)) (const_int 256)))
- + (const_int 2)] ; use compact branch
- + (const_int 4))) ; use extended branch
- + (set_attr "cc" "none")])
- +
- +(define_insn "b<code>"
- + [(set (pc)
- + (if_then_else (any_cond4:CC (cc0)
- + (const_int 0))
- + (label_ref (match_operand 0 "" ""))
- + (pc)))]
- + ""
- + {
- + if(TARGET_HARD_FLOAT && TARGET_ARCH_FPU && (avr32_branch_type == CMP_SF))
- + return "brvs .+8\;br<cond> %l0";
- + else
- + return "br<cond> %l0";
- + }
- + [(set_attr "type" "branch")
- + (set (attr "length")
- + (cond [(eq (const_int 1)(symbol_ref "TARGET_HARD_FLOAT && TARGET_ARCH_FPU"))
- + (const_int 8)]
- + (const_int 4)))
- + (set_attr "cc" "none")])
- +
- +(define_insn "*b<code>-reverse"
- + [(set (pc)
- + (if_then_else (any_cond_b:CC (cc0)
- + (const_int 0))
- + (pc)
- + (label_ref (match_operand 0 "" ""))))]
- + ""
- + {
- + if (TARGET_HARD_FLOAT && TARGET_ARCH_FPU && (avr32_branch_type == CMP_SF))
- + return "brvs %0\;br<invcond> %0";
- + else
- + return "br<invcond> %0";
- + }
- + [(set_attr "type" "branch")
- + (set (attr "length")
- + (if_then_else (eq (const_int 1)(symbol_ref "TARGET_HARD_FLOAT && TARGET_ARCH_FPU"))
- + (if_then_else
- + (and (le (minus (match_dup 0) (pc)) (const_int 254))
- + (le (minus (pc) (match_dup 0)) (const_int 256)))
- + (const_int 6)
- + (const_int 8))
- + (if_then_else
- + (and (le (minus (match_dup 0) (pc)) (const_int 254))
- + (le (minus (pc) (match_dup 0)) (const_int 256)))
- + (const_int 2)
- + (const_int 4))))
- + (set_attr "cc" "none")])
- +
- +(define_insn "*beq-reverse"
- + [(set (pc)
- + (if_then_else (eq:CC (cc0)
- + (const_int 0))
- + (pc)
- + (label_ref (match_operand 0 "" ""))))]
- + ""
- + "brne %0";
- + [(set_attr "type" "branch")
- + (set (attr "length")
- + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
- + (le (minus (pc) (match_dup 0)) (const_int 256)))
- + (const_int 2)] ; use compact branch
- + (const_int 4))) ; use extended branch
- + (set_attr "cc" "none")])
- +
- +(define_insn "*bne-reverse"
- + [(set (pc)
- + (if_then_else (ne:CC (cc0)
- + (const_int 0))
- + (pc)
- + (label_ref (match_operand 0 "" ""))))]
- + ""
- + "breq %0";
- + [(set_attr "type" "branch")
- + (set (attr "length")
- + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
- + (le (minus (pc) (match_dup 0)) (const_int 256)))
- + (const_int 2)] ; use compact branch
- + (const_int 4))) ; use extended branch
- + (set_attr "cc" "none")])
- +
- +(define_insn "*b<code>-reverse"
- + [(set (pc)
- + (if_then_else (any_cond4:CC (cc0)
- + (const_int 0))
- + (pc)
- + (label_ref (match_operand 0 "" ""))))]
- + ""
- + {
- + if (TARGET_HARD_FLOAT && TARGET_ARCH_FPU && (avr32_branch_type == CMP_SF))
- + return "brvs %l0\;br<invcond> %l0";
- + else
- + return "br<invcond> %0";
- + }
- + [(set_attr "type" "branch")
- + (set (attr "length")
- + (cond [(eq (const_int 1)(symbol_ref "TARGET_HARD_FLOAT && TARGET_ARCH_FPU"))
- + (const_int 8)]
- + (const_int 4)))
- + (set_attr "cc" "none")])
- +
- +;=============================================================================
- +; Conditional Add/Subtract
- +;-----------------------------------------------------------------------------
- +; sub{cond4} Rd, imm
- +;=============================================================================
- +
- +
- +(define_expand "add<mode>cc"
- + [(set (match_operand:ADDCC 0 "register_operand" "")
- + (if_then_else:ADDCC (match_operator 1 "avr32_comparison_operator"
- + [(match_dup 4)
- + (match_dup 5)])
- + (match_operand:ADDCC 2 "register_operand" "")
- + (plus:ADDCC
- + (match_dup 2)
- + (match_operand:ADDCC 3 "" ""))))]
- + ""
- + {
- + if ( !(GET_CODE (operands[3]) == CONST_INT
- + || (TARGET_V2_INSNS && REG_P(operands[3]))) ){
- + FAIL;
- + }
- +
- + /* Delete compare instruction as it is merged into this instruction */
- + remove_insn (get_last_insn_anywhere ());
- +
- + operands[4] = avr32_compare_op0;
- + operands[5] = avr32_compare_op1;
- +
- + if ( TARGET_V2_INSNS
- + && REG_P(operands[3])
- + && REGNO(operands[0]) != REGNO(operands[2]) ){
- + emit_move_insn (operands[0], operands[2]);
- + operands[2] = operands[0];
- + }
- + }
- + )
- +
- +(define_insn "add<ADDCC:mode>cc_cmp<CMP:mode>_reg"
- + [(set (match_operand:ADDCC 0 "register_operand" "=r")
- + (if_then_else:ADDCC (match_operator 1 "avr32_comparison_operator"
- + [(match_operand:CMP 4 "register_operand" "r")
- + (match_operand:CMP 5 "<CMP:cmp_predicate>" "<CMP:cmp_constraint>")])
- + (match_dup 0)
- + (plus:ADDCC
- + (match_operand:ADDCC 2 "register_operand" "r")
- + (match_operand:ADDCC 3 "register_operand" "r"))))]
- + "TARGET_V2_INSNS"
- + {
- + operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[4]), operands[4], operands[5]);
- + return "add%i1\t%0, %2, %3";
- + }
- + [(set_attr "length" "8")
- + (set_attr "cc" "cmp_cond_insn")])
- +
- +(define_insn "add<ADDCC:mode>cc_cmp<CMP:mode>"
- + [(set (match_operand:ADDCC 0 "register_operand" "=r")
- + (if_then_else:ADDCC (match_operator 1 "avr32_comparison_operator"
- + [(match_operand:CMP 4 "register_operand" "r")
- + (match_operand:CMP 5 "<CMP:cmp_predicate>" "<CMP:cmp_constraint>")])
- + (match_operand:ADDCC 2 "register_operand" "0")
- + (plus:ADDCC
- + (match_dup 2)
- + (match_operand:ADDCC 3 "avr32_cond_immediate_operand" "Is08"))))]
- + ""
- + {
- + operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[4]), operands[4], operands[5]);
- + return "sub%i1\t%0, -%3";
- + }
- + [(set_attr "length" "8")
- + (set_attr "cc" "cmp_cond_insn")])
- +
- +;=============================================================================
- +; Conditional Move
- +;-----------------------------------------------------------------------------
- +; mov{cond4} Rd, (Rs/imm)
- +;=============================================================================
- +(define_expand "mov<mode>cc"
- + [(set (match_operand:MOVCC 0 "register_operand" "")
- + (if_then_else:MOVCC (match_operator 1 "avr32_comparison_operator"
- + [(match_dup 4)
- + (match_dup 5)])
- + (match_operand:MOVCC 2 "avr32_cond_register_immediate_operand" "")
- + (match_operand:MOVCC 3 "avr32_cond_register_immediate_operand" "")))]
- + ""
- + {
- + /* Delete compare instruction as it is merged into this instruction */
- + remove_insn (get_last_insn_anywhere ());
- +
- + operands[4] = avr32_compare_op0;
- + operands[5] = avr32_compare_op1;
- + }
- + )
- +
- +
- +(define_insn "mov<MOVCC:mode>cc_cmp<CMP:mode>"
- + [(set (match_operand:MOVCC 0 "register_operand" "=r,r,r")
- + (if_then_else:MOVCC (match_operator 1 "avr32_comparison_operator"
- + [(match_operand:CMP 4 "register_operand" "r,r,r")
- + (match_operand:CMP 5 "<CMP:cmp_predicate>" "<CMP:cmp_constraint>,<CMP:cmp_constraint>,<CMP:cmp_constraint>")])
- + (match_operand:MOVCC 2 "avr32_cond_register_immediate_operand" "0, rKs08,rKs08")
- + (match_operand:MOVCC 3 "avr32_cond_register_immediate_operand" "rKs08,0,rKs08")))]
- + ""
- + {
- + operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[4]), operands[4], operands[5]);
- +
- + switch( which_alternative ){
- + case 0:
- + return "mov%i1 %0, %3";
- + case 1:
- + return "mov%1 %0, %2";
- + case 2:
- + return "mov%1 %0, %2\;mov%i1 %0, %3";
- + default:
- + abort();
- + }
- +
- + }
- + [(set_attr "length" "8,8,12")
- + (set_attr "cc" "cmp_cond_insn")])
- +
- +
- +
- +
- +;;=============================================================================
- +;; jump
- +;;-----------------------------------------------------------------------------
- +;; Jump inside a function; an unconditional branch to a label.
- +;;=============================================================================
- +(define_insn "jump"
- + [(set (pc)
- + (label_ref (match_operand 0 "" "")))]
- + ""
- + {
- + if (get_attr_length(insn) > 4)
- + return "Can't jump this far";
- + return (get_attr_length(insn) == 2 ?
- + "rjmp %0" : "bral %0");
- + }
- + [(set_attr "type" "branch")
- + (set (attr "length")
- + (cond [(and (le (minus (match_dup 0) (pc)) (const_int 1022))
- + (le (minus (pc) (match_dup 0)) (const_int 1024)))
- + (const_int 2) ; use rjmp
- + (le (match_dup 0) (const_int 1048575))
- + (const_int 4)] ; use bral
- + (const_int 8))) ; do something else
- + (set_attr "cc" "none")])
- +
- +;;=============================================================================
- +;; call
- +;;-----------------------------------------------------------------------------
- +;; Subroutine call instruction returning no value.
- +;;=============================================================================
- +(define_insn "call_internal"
- + [(parallel [(call (mem:SI (match_operand:SI 0 "avr32_call_operand" "r,U,T,W"))
- + (match_operand 1 "" ""))
- + (clobber (reg:SI LR_REGNUM))])]
- + ""
- + {
- +
- + /* Check for a flashvault call. */
- + if (avr32_flashvault_call (SYMBOL_REF_DECL (operands[0])))
- + {
- + /* Assembly is already emitted. */
- + return "";
- + }
- +
- + switch (which_alternative) {
- + case 0:
- + return "icall\t%0";
- + case 1:
- + return "rcall\t%0";
- + case 2:
- + return "mcall\t%0";
- + case 3:
- + if (TARGET_HAS_ASM_ADDR_PSEUDOS)
- + return "call\t%0";
- + else
- + return "mcall\tr6[%0@got]";
- + default:
- + abort();
- + }
- + }
- + [(set_attr "type" "call")
- + (set_attr "length" "2,4,4,10")
- + (set_attr "cc" "clobber")])
- +
- +
- +(define_expand "call"
- + [(parallel [(call (match_operand:SI 0 "" "")
- + (match_operand 1 "" ""))
- + (clobber (reg:SI LR_REGNUM))])]
- + ""
- + {
- + rtx call_address;
- + if ( GET_CODE(operands[0]) != MEM )
- + FAIL;
- +
- + call_address = XEXP(operands[0], 0);
- +
- + /* If assembler supports call pseudo insn and the call address is a symbol then nothing special needs to be done. */
- + if (TARGET_HAS_ASM_ADDR_PSEUDOS && (GET_CODE(call_address) == SYMBOL_REF) )
- + {
- + /* We must however mark the function as using the GOT if flag_pic is set, since the call insn might turn into a mcall using the GOT ptr register. */
- + if (flag_pic)
- + {
- + crtl->uses_pic_offset_table = 1;
- + emit_call_insn(gen_call_internal(call_address, operands[1]));
- + DONE;
- + }
- + }
- + else
- + {
- + if (flag_pic && GET_CODE(call_address) == SYMBOL_REF )
- + {
- + crtl->uses_pic_offset_table = 1;
- + emit_call_insn(gen_call_internal(call_address, operands[1]));
- + DONE;
- + }
- +
- + if (!SYMBOL_REF_RCALL_FUNCTION_P(operands[0]) )
- + {
- + if (optimize_size && GET_CODE(call_address) == SYMBOL_REF )
- + {
- + call_address = force_const_mem(SImode, call_address);
- + }
- + else
- + {
- + call_address = force_reg(SImode, call_address);
- + }
- + }
- + }
- + emit_call_insn(gen_call_internal(call_address, operands[1]));
- + DONE;
- +
- + }
- +)
- +
- +;;=============================================================================
- +;; call_value
- +;;-----------------------------------------------------------------------------
- +;; Subroutine call instruction returning a value.
- +;;=============================================================================
- +(define_expand "call_value"
- + [(parallel [(set (match_operand:SI 0 "" "")
- + (call (match_operand:SI 1 "" "")
- + (match_operand 2 "" "")))
- + (clobber (reg:SI LR_REGNUM))])]
- + ""
- + {
- + rtx call_address;
- + if ( GET_CODE(operands[1]) != MEM )
- + FAIL;
- +
- + call_address = XEXP(operands[1], 0);
- +
- + /* Check for a flashvault call.
- + if (GET_CODE (call_address) == SYMBOL_REF
- + && avr32_flashvault_call (SYMBOL_REF_DECL (call_address)))
- + DONE;
- +
- + */
- +
- + /* If assembler supports call pseudo insn and the call
- + address is a symbol then nothing special needs to be done. */
- + if ( TARGET_HAS_ASM_ADDR_PSEUDOS
- + && (GET_CODE(call_address) == SYMBOL_REF) ){
- + /* We must however mark the function as using the GOT if
- + flag_pic is set, since the call insn might turn into
- + a mcall using the GOT ptr register. */
- + if ( flag_pic ) {
- + crtl->uses_pic_offset_table = 1;
- + emit_call_insn(gen_call_value_internal(operands[0], call_address, operands[2]));
- + DONE;
- + }
- + } else {
- + if ( flag_pic &&
- + GET_CODE(call_address) == SYMBOL_REF ){
- + crtl->uses_pic_offset_table = 1;
- + emit_call_insn(gen_call_value_internal(operands[0], call_address, operands[2]));
- + DONE;
- + }
- +
- + if ( !SYMBOL_REF_RCALL_FUNCTION_P(operands[1]) ){
- + if ( optimize_size &&
- + GET_CODE(call_address) == SYMBOL_REF){
- + call_address = force_const_mem(SImode, call_address);
- + } else {
- + call_address = force_reg(SImode, call_address);
- + }
- + }
- + }
- + emit_call_insn(gen_call_value_internal(operands[0], call_address,
- + operands[2]));
- + DONE;
- +
- + })
- +
- +(define_insn "call_value_internal"
- + [(parallel [(set (match_operand 0 "register_operand" "=r,r,r,r")
- + (call (mem:SI (match_operand:SI 1 "avr32_call_operand" "r,U,T,W"))
- + (match_operand 2 "" "")))
- + (clobber (reg:SI LR_REGNUM))])]
- + ;; Operand 2 not used on the AVR32.
- + ""
- + {
- + /* Check for a flashvault call. */
- + if (avr32_flashvault_call (SYMBOL_REF_DECL (operands[1])))
- + {
- + /* Assembly is already emitted. */
- + return "";
- + }
- +
- +
- + switch (which_alternative) {
- + case 0:
- + return "icall\t%1";
- + case 1:
- + return "rcall\t%1";
- + case 2:
- + return "mcall\t%1";
- + case 3:
- + if ( TARGET_HAS_ASM_ADDR_PSEUDOS )
- + return "call\t%1";
- + else
- + return "mcall\tr6[%1@got]";
- + default:
- + abort();
- + }
- + }
- + [(set_attr "type" "call")
- + (set_attr "length" "2,4,4,10")
- + (set_attr "cc" "call_set")])
- +
- +
- +;;=============================================================================
- +;; untyped_call
- +;;-----------------------------------------------------------------------------
- +;; Subrutine call instruction returning a value of any type.
- +;; The code is copied from m68k.md (except gen_blockage is removed)
- +;; Fixme!
- +;;=============================================================================
- +(define_expand "untyped_call"
- + [(parallel [(call (match_operand 0 "avr32_call_operand" "")
- + (const_int 0))
- + (match_operand 1 "" "")
- + (match_operand 2 "" "")])]
- + ""
- + {
- + int i;
- +
- + emit_call_insn (GEN_CALL (operands[0], const0_rtx, NULL, const0_rtx));
- +
- + for (i = 0; i < XVECLEN (operands[2], 0); i++) {
- + rtx set = XVECEXP (operands[2], 0, i);
- + emit_move_insn (SET_DEST (set), SET_SRC (set));
- + }
- +
- + /* The optimizer does not know that the call sets the function value
- + registers we stored in the result block. We avoid problems by
- + claiming that all hard registers are used and clobbered at this
- + point. */
- + emit_insn (gen_blockage ());
- +
- + DONE;
- + })
- +
- +
- +;;=============================================================================
- +;; return
- +;;=============================================================================
- +
- +(define_insn "return"
- + [(return)]
- + "USE_RETURN_INSN (FALSE)"
- + {
- + avr32_output_return_instruction(TRUE, FALSE, NULL, NULL);
- + return "";
- + }
- + [(set_attr "length" "4")
- + (set_attr "type" "call")]
- + )
- +
- +
- +(define_insn "return_cond"
- + [(set (pc)
- + (if_then_else (match_operand 0 "avr32_comparison_operand" "")
- + (return)
- + (pc)))]
- + "USE_RETURN_INSN (TRUE)"
- + "ret%0\tr12";
- + [(set_attr "type" "call")])
- +
- +(define_insn "return_cond_predicable"
- + [(return)]
- + "USE_RETURN_INSN (TRUE)"
- + "ret%?\tr12";
- + [(set_attr "type" "call")
- + (set_attr "predicable" "yes")])
- +
- +
- +(define_insn "return_imm"
- + [(parallel [(set (reg RETVAL_REGNUM) (match_operand 0 "immediate_operand" "i"))
- + (use (reg RETVAL_REGNUM))
- + (return)])]
- + "USE_RETURN_INSN (FALSE) &&
- + ((INTVAL(operands[0]) == -1) || (INTVAL(operands[0]) == 0) || (INTVAL(operands[0]) == 1))"
- + {
- + avr32_output_return_instruction(TRUE, FALSE, NULL, operands[0]);
- + return "";
- + }
- + [(set_attr "length" "4")
- + (set_attr "type" "call")]
- + )
- +
- +(define_insn "return_imm_cond"
- + [(parallel [(set (reg RETVAL_REGNUM) (match_operand 0 "immediate_operand" "i"))
- + (use (reg RETVAL_REGNUM))
- + (set (pc)
- + (if_then_else (match_operand 1 "avr32_comparison_operand" "")
- + (return)
- + (pc)))])]
- + "USE_RETURN_INSN (TRUE) &&
- + ((INTVAL(operands[0]) == -1) || (INTVAL(operands[0]) == 0) || (INTVAL(operands[0]) == 1))"
- + "ret%1\t%0";
- + [(set_attr "type" "call")]
- + )
- +
- +(define_insn "return_imm_predicable"
- + [(parallel [(set (reg RETVAL_REGNUM) (match_operand 0 "immediate_operand" "i"))
- + (use (reg RETVAL_REGNUM))
- + (return)])]
- + "USE_RETURN_INSN (TRUE) &&
- + ((INTVAL(operands[0]) == -1) || (INTVAL(operands[0]) == 0) || (INTVAL(operands[0]) == 1))"
- + "ret%?\t%0";
- + [(set_attr "type" "call")
- + (set_attr "predicable" "yes")])
- +
- +(define_insn "return_<mode>reg"
- + [(set (reg RETVAL_REGNUM) (match_operand:MOVM 0 "register_operand" "r"))
- + (use (reg RETVAL_REGNUM))
- + (return)]
- + "USE_RETURN_INSN (TRUE)"
- + "ret%?\t%0";
- + [(set_attr "type" "call")
- + (set_attr "predicable" "yes")])
- +
- +(define_insn "return_<mode>reg_cond"
- + [(set (reg RETVAL_REGNUM) (match_operand:MOVM 0 "register_operand" "r"))
- + (use (reg RETVAL_REGNUM))
- + (set (pc)
- + (if_then_else (match_operator 1 "avr32_comparison_operator"
- + [(cc0) (const_int 0)])
- + (return)
- + (pc)))]
- + "USE_RETURN_INSN (TRUE)"
- + "ret%1\t%0";
- + [(set_attr "type" "call")])
- +
- +;;=============================================================================
- +;; nonlocal_goto_receiver
- +;;-----------------------------------------------------------------------------
- +;; For targets with a return stack we must make sure to flush the return stack
- +;; since it will be corrupt after a nonlocal goto.
- +;;=============================================================================
- +(define_expand "nonlocal_goto_receiver"
- + [(const_int 0)]
- + "TARGET_RETURN_STACK"
- + "
- + {
- + emit_insn ( gen_frs() );
- + DONE;
- + }
- + "
- + )
- +
- +
- +;;=============================================================================
- +;; builtin_setjmp_receiver
- +;;-----------------------------------------------------------------------------
- +;; For pic code we need to reload the pic register.
- +;; For targets with a return stack we must make sure to flush the return stack
- +;; since it will probably be corrupted.
- +;;=============================================================================
- +(define_expand "builtin_setjmp_receiver"
- + [(label_ref (match_operand 0 "" ""))]
- + "flag_pic"
- + "
- + {
- + if ( TARGET_RETURN_STACK )
- + emit_insn ( gen_frs() );
- +
- + avr32_load_pic_register ();
- + DONE;
- + }
- + "
- +)
- +
- +
- +;;=============================================================================
- +;; indirect_jump
- +;;-----------------------------------------------------------------------------
- +;; Jump to an address in reg or memory.
- +;;=============================================================================
- +(define_expand "indirect_jump"
- + [(set (pc)
- + (match_operand:SI 0 "general_operand" ""))]
- + ""
- + {
- + /* One of the ops has to be in a register. */
- + if ( (flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS )
- + && !avr32_legitimate_pic_operand_p(operands[0]) )
- + operands[0] = legitimize_pic_address (operands[0], SImode, 0);
- + else if ( flag_pic && avr32_address_operand(operands[0], GET_MODE(operands[0])) )
- + /* If we have an address operand then this function uses the pic register. */
- + crtl->uses_pic_offset_table = 1;
- + })
- +
- +
- +(define_insn "indirect_jump_internal"
- + [(set (pc)
- + (match_operand:SI 0 "avr32_non_rmw_general_operand" "r,m,W"))]
- + ""
- + {
- + switch( which_alternative ){
- + case 0:
- + return "mov\tpc, %0";
- + case 1:
- + if ( avr32_const_pool_ref_operand(operands[0], GET_MODE(operands[0])) )
- + return "lddpc\tpc, %0";
- + else
- + return "ld.w\tpc, %0";
- + case 2:
- + if ( flag_pic )
- + return "ld.w\tpc, r6[%0@got]";
- + else
- + return "lda.w\tpc, %0";
- + default:
- + abort();
- + }
- + }
- + [(set_attr "length" "2,4,8")
- + (set_attr "type" "call,call,call")
- + (set_attr "cc" "none,none,clobber")])
- +
- +
- +
- +;;=============================================================================
- +;; casesi and tablejump
- +;;=============================================================================
- +(define_insn "tablejump_add"
- + [(set (pc)
- + (plus:SI (match_operand:SI 0 "register_operand" "r")
- + (mult:SI (match_operand:SI 1 "register_operand" "r")
- + (match_operand:SI 2 "immediate_operand" "Ku04" ))))
- + (use (label_ref (match_operand 3 "" "")))]
- + "flag_pic &&
- + ((INTVAL(operands[2]) == 0) || (INTVAL(operands[2]) == 2) ||
- + (INTVAL(operands[2]) == 4) || (INTVAL(operands[2]) == 8))"
- + "add\tpc, %0, %1 << %p2"
- + [(set_attr "length" "4")
- + (set_attr "cc" "clobber")])
- +
- +(define_insn "tablejump_insn"
- + [(set (pc) (match_operand:SI 0 "memory_operand" "m"))
- + (use (label_ref (match_operand 1 "" "")))]
- + "!flag_pic"
- + "ld.w\tpc, %0"
- + [(set_attr "length" "4")
- + (set_attr "type" "call")
- + (set_attr "cc" "none")])
- +
- +(define_expand "casesi"
- + [(match_operand:SI 0 "register_operand" "") ; index to jump on
- + (match_operand:SI 1 "const_int_operand" "") ; lower bound
- + (match_operand:SI 2 "const_int_operand" "") ; total range
- + (match_operand:SI 3 "" "") ; table label
- + (match_operand:SI 4 "" "")] ; Out of range label
- + ""
- + "
- + {
- + rtx reg;
- + rtx index = operands[0];
- + rtx low_bound = operands[1];
- + rtx range = operands[2];
- + rtx table_label = operands[3];
- + rtx oor_label = operands[4];
- +
- + index = force_reg ( SImode, index );
- + if (low_bound != const0_rtx)
- + {
- + if (!avr32_const_ok_for_constraint_p(INTVAL (low_bound), 'I', \"Is21\")){
- + reg = force_reg(SImode, GEN_INT (INTVAL (low_bound)));
- + emit_insn (gen_subsi3 (reg, index,
- + reg));
- + } else {
- + reg = gen_reg_rtx (SImode);
- + emit_insn (gen_addsi3 (reg, index,
- + GEN_INT (-INTVAL (low_bound))));
- + }
- + index = reg;
- + }
- +
- + if (!avr32_const_ok_for_constraint_p (INTVAL (range), 'K', \"Ks21\"))
- + range = force_reg (SImode, range);
- +
- + emit_cmp_and_jump_insns ( index, range, GTU, NULL_RTX, SImode, 1, oor_label );
- + reg = gen_reg_rtx (SImode);
- + emit_move_insn ( reg, gen_rtx_LABEL_REF (VOIDmode, table_label));
- +
- + if ( flag_pic )
- + emit_jump_insn ( gen_tablejump_add ( reg, index, GEN_INT(4), table_label));
- + else
- + emit_jump_insn (
- + gen_tablejump_insn ( gen_rtx_MEM ( SImode,
- + gen_rtx_PLUS ( SImode,
- + reg,
- + gen_rtx_MULT ( SImode,
- + index,
- + GEN_INT(4)))),
- + table_label));
- + DONE;
- + }"
- +)
- +
- +
- +
- +(define_insn "prefetch"
- + [(prefetch (match_operand:SI 0 "avr32_ks16_address_operand" "p")
- + (match_operand 1 "const_int_operand" "")
- + (match_operand 2 "const_int_operand" ""))]
- + ""
- + {
- + return "pref\t%0";
- + }
- +
- + [(set_attr "length" "4")
- + (set_attr "type" "load")
- + (set_attr "cc" "none")])
- +
- +
- +
- +;;=============================================================================
- +;; prologue
- +;;-----------------------------------------------------------------------------
- +;; This pattern, if defined, emits RTL for entry to a function. The function
- +;; entry i responsible for setting up the stack frame, initializing the frame
- +;; pointer register, saving callee saved registers, etc.
- +;;=============================================================================
- +(define_expand "prologue"
- + [(clobber (const_int 0))]
- + ""
- + "
- + avr32_expand_prologue();
- + DONE;
- + "
- + )
- +
- +;;=============================================================================
- +;; eh_return
- +;;-----------------------------------------------------------------------------
- +;; This pattern, if defined, affects the way __builtin_eh_return, and
- +;; thence the call frame exception handling library routines, are
- +;; built. It is intended to handle non-trivial actions needed along
- +;; the abnormal return path.
- +;;
- +;; The address of the exception handler to which the function should
- +;; return is passed as operand to this pattern. It will normally need
- +;; to copied by the pattern to some special register or memory
- +;; location. If the pattern needs to determine the location of the
- +;; target call frame in order to do so, it may use
- +;; EH_RETURN_STACKADJ_RTX, if defined; it will have already been
- +;; assigned.
- +;;
- +;; If this pattern is not defined, the default action will be to
- +;; simply copy the return address to EH_RETURN_HANDLER_RTX. Either
- +;; that macro or this pattern needs to be defined if call frame
- +;; exception handling is to be used.
- +
- +;; We can't expand this before we know where the link register is stored.
- +(define_insn_and_split "eh_return"
- + [(unspec_volatile [(match_operand:SI 0 "register_operand" "r")]
- + VUNSPEC_EH_RETURN)
- + (clobber (match_scratch:SI 1 "=&r"))]
- + ""
- + "#"
- + "reload_completed"
- + [(const_int 0)]
- + "
- + {
- + avr32_set_return_address (operands[0], operands[1]);
- + DONE;
- + }"
- + )
- +
- +
- +;;=============================================================================
- +;; ffssi2
- +;;-----------------------------------------------------------------------------
- +(define_insn "ffssi2"
- + [ (set (match_operand:SI 0 "register_operand" "=r")
- + (ffs:SI (match_operand:SI 1 "register_operand" "r"))) ]
- + ""
- + "mov %0, %1
- + brev %0
- + clz %0, %0
- + sub %0, -1
- + cp %0, 33
- + moveq %0, 0"
- + [(set_attr "length" "18")
- + (set_attr "cc" "clobber")]
- + )
- +
- +
- +
- +;;=============================================================================
- +;; swap_h
- +;;-----------------------------------------------------------------------------
- +(define_insn "*swap_h"
- + [ (set (match_operand:SI 0 "register_operand" "=r")
- + (ior:SI (ashift:SI (match_dup 0) (const_int 16))
- + (lshiftrt:SI (match_dup 0) (const_int 16))))]
- + ""
- + "swap.h %0"
- + [(set_attr "length" "2")]
- + )
- +
- +(define_insn_and_split "bswap_16"
- + [ (set (match_operand:HI 0 "avr32_bswap_operand" "=r,RKs13,r")
- + (ior:HI (and:HI (lshiftrt:HI (match_operand:HI 1 "avr32_bswap_operand" "r,r,RKs13")
- + (const_int 8))
- + (const_int 255))
- + (ashift:HI (and:HI (match_dup 1)
- + (const_int 255))
- + (const_int 8))))]
- + ""
- + {
- + switch ( which_alternative ){
- + case 0:
- + if ( REGNO(operands[0]) == REGNO(operands[1]))
- + return "swap.bh\t%0";
- + else
- + return "mov\t%0, %1\;swap.bh\t%0";
- + case 1:
- + return "stswp.h\t%0, %1";
- + case 2:
- + return "ldswp.sh\t%0, %1";
- + default:
- + abort();
- + }
- + }
- +
- + "(reload_completed &&
- + REG_P(operands[0]) && REG_P(operands[1])
- + && (REGNO(operands[0]) != REGNO(operands[1])))"
- + [(set (match_dup 0) (match_dup 1))
- + (set (match_dup 0)
- + (ior:HI (and:HI (lshiftrt:HI (match_dup 0)
- + (const_int 8))
- + (const_int 255))
- + (ashift:HI (and:HI (match_dup 0)
- + (const_int 255))
- + (const_int 8))))]
- + ""
- +
- + [(set_attr "length" "4,4,4")
- + (set_attr "type" "alu,store,load_rm")]
- + )
- +
- +(define_insn_and_split "bswap_32"
- + [ (set (match_operand:SI 0 "avr32_bswap_operand" "=r,RKs14,r")
- + (ior:SI (ior:SI (lshiftrt:SI (and:SI (match_operand:SI 1 "avr32_bswap_operand" "r,r,RKs14")
- + (const_int -16777216))
- + (const_int 24))
- + (lshiftrt:SI (and:SI (match_dup 1)
- + (const_int 16711680))
- + (const_int 8)))
- + (ior:SI (ashift:SI (and:SI (match_dup 1)
- + (const_int 65280))
- + (const_int 8))
- + (ashift:SI (and:SI (match_dup 1)
- + (const_int 255))
- + (const_int 24)))))]
- + ""
- + {
- + switch ( which_alternative ){
- + case 0:
- + if ( REGNO(operands[0]) == REGNO(operands[1]))
- + return "swap.b\t%0";
- + else
- + return "#";
- + case 1:
- + return "stswp.w\t%0, %1";
- + case 2:
- + return "ldswp.w\t%0, %1";
- + default:
- + abort();
- + }
- + }
- + "(reload_completed &&
- + REG_P(operands[0]) && REG_P(operands[1])
- + && (REGNO(operands[0]) != REGNO(operands[1])))"
- + [(set (match_dup 0) (match_dup 1))
- + (set (match_dup 0)
- + (ior:SI (ior:SI (lshiftrt:SI (and:SI (match_dup 0)
- + (const_int -16777216))
- + (const_int 24))
- + (lshiftrt:SI (and:SI (match_dup 0)
- + (const_int 16711680))
- + (const_int 8)))
- + (ior:SI (ashift:SI (and:SI (match_dup 0)
- + (const_int 65280))
- + (const_int 8))
- + (ashift:SI (and:SI (match_dup 0)
- + (const_int 255))
- + (const_int 24)))))]
- + ""
- +
- + [(set_attr "length" "4,4,4")
- + (set_attr "type" "alu,store,load_rm")]
- + )
- +
- +
- +;;=============================================================================
- +;; blockage
- +;;-----------------------------------------------------------------------------
- +;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
- +;; all of memory. This blocks insns from being moved across this point.
- +
- +(define_insn "blockage"
- + [(unspec_volatile [(const_int 0)] VUNSPEC_BLOCKAGE)]
- + ""
- + ""
- + [(set_attr "length" "0")]
- +)
- +
- +;;=============================================================================
- +;; clzsi2
- +;;-----------------------------------------------------------------------------
- +(define_insn "clzsi2"
- + [ (set (match_operand:SI 0 "register_operand" "=r")
- + (clz:SI (match_operand:SI 1 "register_operand" "r"))) ]
- + ""
- + "clz %0, %1"
- + [(set_attr "length" "4")
- + (set_attr "cc" "set_z")]
- + )
- +
- +;;=============================================================================
- +;; ctzsi2
- +;;-----------------------------------------------------------------------------
- +(define_insn "ctzsi2"
- + [ (set (match_operand:SI 0 "register_operand" "=r,r")
- + (ctz:SI (match_operand:SI 1 "register_operand" "0,r"))) ]
- + ""
- + "@
- + brev\t%0\;clz\t%0, %0
- + mov\t%0, %1\;brev\t%0\;clz\t%0, %0"
- + [(set_attr "length" "8")
- + (set_attr "cc" "set_z")]
- + )
- +
- +;;=============================================================================
- +;; cache instructions
- +;;-----------------------------------------------------------------------------
- +(define_insn "cache"
- + [ (unspec_volatile [(match_operand:SI 0 "avr32_ks11_address_operand" "p")
- + (match_operand:SI 1 "immediate_operand" "Ku05")] VUNSPEC_CACHE)]
- + ""
- + "cache %0, %1"
- + [(set_attr "length" "4")]
- + )
- +
- +(define_insn "sync"
- + [ (unspec_volatile [(match_operand:SI 0 "immediate_operand" "Ku08")] VUNSPEC_SYNC)]
- + ""
- + "sync %0"
- + [(set_attr "length" "4")]
- + )
- +
- +;;=============================================================================
- +;; TLB instructions
- +;;-----------------------------------------------------------------------------
- +(define_insn "tlbr"
- + [ (unspec_volatile [(const_int 0)] VUNSPEC_TLBR)]
- + ""
- + "tlbr"
- + [(set_attr "length" "2")]
- + )
- +
- +(define_insn "tlbw"
- + [ (unspec_volatile [(const_int 0)] VUNSPEC_TLBW)]
- + ""
- + "tlbw"
- + [(set_attr "length" "2")]
- + )
- +
- +(define_insn "tlbs"
- + [ (unspec_volatile [(const_int 0)] VUNSPEC_TLBS)]
- + ""
- + "tlbs"
- + [(set_attr "length" "2")]
- + )
- +
- +;;=============================================================================
- +;; Breakpoint instruction
- +;;-----------------------------------------------------------------------------
- +(define_insn "breakpoint"
- + [ (unspec_volatile [(const_int 0)] VUNSPEC_BREAKPOINT)]
- + ""
- + "breakpoint"
- + [(set_attr "length" "2")]
- + )
- +
- +
- +;;=============================================================================
- +;; mtsr/mfsr instruction
- +;;-----------------------------------------------------------------------------
- +(define_insn "mtsr"
- + [ (unspec_volatile [(match_operand 0 "immediate_operand" "i")
- + (match_operand:SI 1 "register_operand" "r")] VUNSPEC_MTSR)]
- + ""
- + "mtsr\t%0, %1"
- + [(set_attr "length" "4")]
- + )
- +
- +(define_insn "mfsr"
- + [ (set (match_operand:SI 0 "register_operand" "=r")
- + (unspec_volatile:SI [(match_operand 1 "immediate_operand" "i")] VUNSPEC_MFSR)) ]
- + ""
- + "mfsr\t%0, %1"
- + [(set_attr "length" "4")]
- + )
- +
- +;;=============================================================================
- +;; mtdr/mfdr instruction
- +;;-----------------------------------------------------------------------------
- +(define_insn "mtdr"
- + [ (unspec_volatile [(match_operand 0 "immediate_operand" "i")
- + (match_operand:SI 1 "register_operand" "r")] VUNSPEC_MTDR)]
- + ""
- + "mtdr\t%0, %1"
- + [(set_attr "length" "4")]
- + )
- +
- +(define_insn "mfdr"
- + [ (set (match_operand:SI 0 "register_operand" "=r")
- + (unspec_volatile:SI [(match_operand 1 "immediate_operand" "i")] VUNSPEC_MFDR)) ]
- + ""
- + "mfdr\t%0, %1"
- + [(set_attr "length" "4")]
- + )
- +
- +;;=============================================================================
- +;; musfr
- +;;-----------------------------------------------------------------------------
- +(define_insn "musfr"
- + [ (unspec_volatile [(match_operand:SI 0 "register_operand" "r")] VUNSPEC_MUSFR)]
- + ""
- + "musfr\t%0"
- + [(set_attr "length" "2")
- + (set_attr "cc" "clobber")]
- + )
- +
- +(define_insn "mustr"
- + [ (set (match_operand:SI 0 "register_operand" "=r")
- + (unspec_volatile:SI [(const_int 0)] VUNSPEC_MUSTR)) ]
- + ""
- + "mustr\t%0"
- + [(set_attr "length" "2")]
- + )
- +
- +(define_insn "ssrf"
- + [ (unspec_volatile [(match_operand:SI 0 "immediate_operand" "Ku05")] VUNSPEC_SSRF)]
- + ""
- + "ssrf %0"
- + [(set_attr "length" "2")
- + (set_attr "cc" "clobber")]
- + )
- +
- +(define_insn "csrf"
- + [ (unspec_volatile [(match_operand:SI 0 "immediate_operand" "Ku05")] VUNSPEC_CSRF)]
- + ""
- + "csrf %0"
- + [(set_attr "length" "2")
- + (set_attr "cc" "clobber")]
- + )
- +
- +;;=============================================================================
- +;; Flush Return Stack instruction
- +;;-----------------------------------------------------------------------------
- +(define_insn "frs"
- + [ (unspec_volatile [(const_int 0)] VUNSPEC_FRS)]
- + ""
- + "frs"
- + [(set_attr "length" "2")
- + (set_attr "cc" "none")]
- + )
- +
- +
- +;;=============================================================================
- +;; Saturation Round Scale instruction
- +;;-----------------------------------------------------------------------------
- +(define_insn "sats"
- + [ (set (match_operand:SI 0 "register_operand" "+r")
- + (unspec:SI [(match_dup 0)
- + (match_operand 1 "immediate_operand" "Ku05")
- + (match_operand 2 "immediate_operand" "Ku05")]
- + UNSPEC_SATS)) ]
- + "TARGET_DSP"
- + "sats\t%0 >> %1, %2"
- + [(set_attr "type" "alu_sat")
- + (set_attr "length" "4")]
- + )
- +
- +(define_insn "satu"
- + [ (set (match_operand:SI 0 "register_operand" "+r")
- + (unspec:SI [(match_dup 0)
- + (match_operand 1 "immediate_operand" "Ku05")
- + (match_operand 2 "immediate_operand" "Ku05")]
- + UNSPEC_SATU)) ]
- + "TARGET_DSP"
- + "satu\t%0 >> %1, %2"
- + [(set_attr "type" "alu_sat")
- + (set_attr "length" "4")]
- + )
- +
- +(define_insn "satrnds"
- + [ (set (match_operand:SI 0 "register_operand" "+r")
- + (unspec:SI [(match_dup 0)
- + (match_operand 1 "immediate_operand" "Ku05")
- + (match_operand 2 "immediate_operand" "Ku05")]
- + UNSPEC_SATRNDS)) ]
- + "TARGET_DSP"
- + "satrnds\t%0 >> %1, %2"
- + [(set_attr "type" "alu_sat")
- + (set_attr "length" "4")]
- + )
- +
- +(define_insn "satrndu"
- + [ (set (match_operand:SI 0 "register_operand" "+r")
- + (unspec:SI [(match_dup 0)
- + (match_operand 1 "immediate_operand" "Ku05")
- + (match_operand 2 "immediate_operand" "Ku05")]
- + UNSPEC_SATRNDU)) ]
- + "TARGET_DSP"
- + "sats\t%0 >> %1, %2"
- + [(set_attr "type" "alu_sat")
- + (set_attr "length" "4")]
- + )
- +
- +(define_insn "sleep"
- + [(unspec_volatile [(const_int 0)] VUNSPEC_SLEEP)
- + (match_operand:SI 0 "const_int_operand" "")]
- + ""
- + "sleep %0"
- + [(set_attr "length" "1")
- + (set_attr "cc" "none")
- + ])
- +
- +(define_expand "delay_cycles"
- + [(unspec_volatile [(match_operand:SI 0 "const_int_operand" "i")]
- + VUNSPEC_DELAY_CYCLES)]
- + ""
- + "
- + unsigned int cycles = UINTVAL (operands[0]);
- + if (IN_RANGE(cycles,0x10000 ,0xFFFFFFFF))
- + {
- + unsigned int msb = (cycles & 0xFFFF0000);
- + unsigned int shift = 16;
- + msb = (msb >> shift);
- + unsigned int cycles_used = (msb*0x10000);
- + emit_insn (gen_delay_cycles_2 (gen_int_mode (msb, SImode)));
- + cycles -= cycles_used;
- + }
- + if (IN_RANGE(cycles, 4, 0xFFFF))
- + {
- + unsigned int loop_count = (cycles/ 4);
- + unsigned int cycles_used = (loop_count*4);
- + emit_insn (gen_delay_cycles_1 (gen_int_mode (loop_count, SImode)));
- + cycles -= cycles_used;
- + }
- + while (cycles >= 3)
- + {
- + emit_insn (gen_nop3 ());
- + cycles -= 3;
- + }
- + if (cycles == 1 || cycles == 2)
- + {
- + while (cycles--)
- + emit_insn (gen_nop ());
- + }
- + DONE;
- + ")
- +
- +(define_insn "delay_cycles_1"
- +[(unspec_volatile [(const_int 0)] VUNSPEC_DELAY_CYCLES_1)
- + (match_operand:SI 0 "immediate_operand" "")
- + (clobber (match_scratch:SI 1 "=&r"))]
- + ""
- + "mov\t%1, %0
- + 1: sub\t%1, 1
- + brne\t1b
- + nop"
- +)
- +
- +(define_insn "delay_cycles_2"
- +[(unspec_volatile [(const_int 0)] VUNSPEC_DELAY_CYCLES_2)
- + (match_operand:SI 0 "immediate_operand" "")
- + (clobber (match_scratch:SI 1 "=&r"))
- + (clobber (match_scratch:SI 2 "=&r"))]
- + ""
- + "mov\t%1, %0
- + 1: mov\t%2, 16383
- + 2: sub\t%2, 1
- + brne\t2b
- + nop
- + sub\t%1, 1
- + brne\t1b
- + nop"
- +)
- +
- +;; CPU instructions
- +
- +;;=============================================================================
- +;; nop
- +;;-----------------------------------------------------------------------------
- +;; No-op instruction.
- +;;=============================================================================
- +(define_insn "nop"
- + [(unspec_volatile [(const_int 0)] VUNSPEC_NOP)]
- + ""
- + "nop"
- + [(set_attr "length" "1")
- + (set_attr "type" "alu")
- + (set_attr "cc" "none")])
- +
- +;; NOP3
- +(define_insn "nop3"
- + [(unspec_volatile [(const_int 0)] VUNSPEC_NOP3)]
- + ""
- + "rjmp\t2"
- + [(set_attr "length" "3")
- + (set_attr "type" "alu")
- + (set_attr "cc" "none")])
- +
- +;; Special patterns for dealing with the constant pool
- +
- +(define_insn "align_4"
- + [(unspec_volatile [(const_int 0)] VUNSPEC_ALIGN)]
- + ""
- + {
- + assemble_align (32);
- + return "";
- + }
- + [(set_attr "length" "2")]
- +)
- +
- +
- +(define_insn "consttable_start"
- + [(unspec_volatile [(const_int 0)] VUNSPEC_POOL_START)]
- + ""
- + {
- + return ".cpool";
- + }
- + [(set_attr "length" "0")]
- + )
- +
- +(define_insn "consttable_end"
- + [(unspec_volatile [(const_int 0)] VUNSPEC_POOL_END)]
- + ""
- + {
- + making_const_table = FALSE;
- + return "";
- + }
- + [(set_attr "length" "0")]
- +)
- +
- +
- +(define_insn "consttable_4"
- + [(unspec_volatile [(match_operand 0 "" "")] VUNSPEC_POOL_4)]
- + ""
- + {
- + making_const_table = TRUE;
- + switch (GET_MODE_CLASS (GET_MODE (operands[0])))
- + {
- + case MODE_FLOAT:
- + {
- + REAL_VALUE_TYPE r;
- + char real_string[1024];
- + REAL_VALUE_FROM_CONST_DOUBLE (r, operands[0]);
- + real_to_decimal(real_string, &r, 1024, 0, 1);
- + asm_fprintf (asm_out_file, "\t.float\t%s\n", real_string);
- + break;
- + }
- + default:
- + assemble_integer (operands[0], 4, 0, 1);
- + break;
- + }
- + return "";
- + }
- + [(set_attr "length" "4")]
- +)
- +
- +(define_insn "consttable_8"
- + [(unspec_volatile [(match_operand 0 "" "")] VUNSPEC_POOL_8)]
- + ""
- + {
- + making_const_table = TRUE;
- + switch (GET_MODE_CLASS (GET_MODE (operands[0])))
- + {
- + case MODE_FLOAT:
- + {
- + REAL_VALUE_TYPE r;
- + char real_string[1024];
- + REAL_VALUE_FROM_CONST_DOUBLE (r, operands[0]);
- + real_to_decimal(real_string, &r, 1024, 0, 1);
- + asm_fprintf (asm_out_file, "\t.double\t%s\n", real_string);
- + break;
- + }
- + default:
- + assemble_integer(operands[0], 8, 0, 1);
- + break;
- + }
- + return "";
- + }
- + [(set_attr "length" "8")]
- +)
- +
- +(define_insn "consttable_16"
- + [(unspec_volatile [(match_operand 0 "" "")] VUNSPEC_POOL_16)]
- + ""
- + {
- + making_const_table = TRUE;
- + assemble_integer(operands[0], 16, 0, 1);
- + return "";
- + }
- + [(set_attr "length" "16")]
- +)
- +
- +;;=============================================================================
- +;; coprocessor instructions
- +;;-----------------------------------------------------------------------------
- +(define_insn "cop"
- + [ (unspec_volatile [(match_operand 0 "immediate_operand" "Ku03")
- + (match_operand 1 "immediate_operand" "Ku04")
- + (match_operand 2 "immediate_operand" "Ku04")
- + (match_operand 3 "immediate_operand" "Ku04")
- + (match_operand 4 "immediate_operand" "Ku07")] VUNSPEC_COP)]
- + ""
- + "cop\tcp%0, cr%1, cr%2, cr%3, %4"
- + [(set_attr "length" "4")]
- + )
- +
- +(define_insn "mvcrsi"
- + [ (set (match_operand:SI 0 "avr32_cop_move_operand" "=r,<,Z")
- + (unspec_volatile:SI [(match_operand 1 "immediate_operand" "Ku03,Ku03,Ku03")
- + (match_operand 2 "immediate_operand" "Ku04,Ku04,Ku04")]
- + VUNSPEC_MVCR)) ]
- + ""
- + "@
- + mvcr.w\tcp%1, %0, cr%2
- + stcm.w\tcp%1, %0, cr%2
- + stc.w\tcp%1, %0, cr%2"
- + [(set_attr "length" "4")]
- + )
- +
- +(define_insn "mvcrdi"
- + [ (set (match_operand:DI 0 "avr32_cop_move_operand" "=r,<,Z")
- + (unspec_volatile:DI [(match_operand 1 "immediate_operand" "Ku03,Ku03,Ku03")
- + (match_operand 2 "immediate_operand" "Ku04,Ku04,Ku04")]
- + VUNSPEC_MVCR)) ]
- + ""
- + "@
- + mvcr.d\tcp%1, %0, cr%2
- + stcm.d\tcp%1, %0, cr%2-cr%i2
- + stc.d\tcp%1, %0, cr%2"
- + [(set_attr "length" "4")]
- + )
- +
- +(define_insn "mvrcsi"
- + [ (unspec_volatile:SI [(match_operand 0 "immediate_operand" "Ku03,Ku03,Ku03")
- + (match_operand 1 "immediate_operand" "Ku04,Ku04,Ku04")
- + (match_operand:SI 2 "avr32_cop_move_operand" "r,>,Z")]
- + VUNSPEC_MVRC)]
- + ""
- + {
- + switch (which_alternative){
- + case 0:
- + return "mvrc.w\tcp%0, cr%1, %2";
- + case 1:
- + return "ldcm.w\tcp%0, %2, cr%1";
- + case 2:
- + return "ldc.w\tcp%0, cr%1, %2";
- + default:
- + abort();
- + }
- + }
- + [(set_attr "length" "4")]
- + )
- +
- +(define_insn "mvrcdi"
- + [ (unspec_volatile:DI [(match_operand 0 "immediate_operand" "Ku03,Ku03,Ku03")
- + (match_operand 1 "immediate_operand" "Ku04,Ku04,Ku04")
- + (match_operand:DI 2 "avr32_cop_move_operand" "r,>,Z")]
- + VUNSPEC_MVRC)]
- + ""
- + {
- + switch (which_alternative){
- + case 0:
- + return "mvrc.d\tcp%0, cr%1, %2";
- + case 1:
- + return "ldcm.d\tcp%0, %2, cr%1-cr%i1";
- + case 2:
- + return "ldc.d\tcp%0, cr%1, %2";
- + default:
- + abort();
- + }
- + }
- + [(set_attr "length" "4")]
- + )
- +
- +;;=============================================================================
- +;; epilogue
- +;;-----------------------------------------------------------------------------
- +;; This pattern emits RTL for exit from a function. The function exit is
- +;; responsible for deallocating the stack frame, restoring callee saved
- +;; registers and emitting the return instruction.
- +;; ToDo: using TARGET_ASM_FUNCTION_PROLOGUE instead.
- +;;=============================================================================
- +(define_expand "epilogue"
- + [(unspec_volatile [(return)] VUNSPEC_EPILOGUE)]
- + ""
- + "
- + if (USE_RETURN_INSN (FALSE)){
- + emit_jump_insn (gen_return ());
- + DONE;
- + }
- + emit_jump_insn (gen_rtx_UNSPEC_VOLATILE (VOIDmode,
- + gen_rtvec (1,
- + gen_rtx_RETURN (VOIDmode)),
- + VUNSPEC_EPILOGUE));
- + DONE;
- + "
- + )
- +
- +(define_insn "*epilogue_insns"
- + [(unspec_volatile [(return)] VUNSPEC_EPILOGUE)]
- + ""
- + {
- + avr32_output_return_instruction (FALSE, FALSE, NULL, NULL);
- + return "";
- + }
- + ; Length is absolute worst case
- + [(set_attr "type" "branch")
- + (set_attr "length" "12")]
- + )
- +
- +(define_insn "*epilogue_insns_ret_imm"
- + [(parallel [(set (reg RETVAL_REGNUM) (match_operand 0 "immediate_operand" "i"))
- + (use (reg RETVAL_REGNUM))
- + (unspec_volatile [(return)] VUNSPEC_EPILOGUE)])]
- + "((INTVAL(operands[0]) == -1) || (INTVAL(operands[0]) == 0) || (INTVAL(operands[0]) == 1))"
- + {
- + avr32_output_return_instruction (FALSE, FALSE, NULL, operands[0]);
- + return "";
- + }
- + ; Length is absolute worst case
- + [(set_attr "type" "branch")
- + (set_attr "length" "12")]
- + )
- +
- +(define_insn "sibcall_epilogue"
- + [(unspec_volatile [(const_int 0)] VUNSPEC_EPILOGUE)]
- + ""
- + {
- + avr32_output_return_instruction (FALSE, FALSE, NULL, NULL);
- + return "";
- + }
- +;; Length is absolute worst case
- + [(set_attr "type" "branch")
- + (set_attr "length" "12")]
- + )
- +
- +(define_insn "*sibcall_epilogue_insns_ret_imm"
- + [(parallel [(set (reg RETVAL_REGNUM) (match_operand 0 "immediate_operand" "i"))
- + (use (reg RETVAL_REGNUM))
- + (unspec_volatile [(const_int 0)] VUNSPEC_EPILOGUE)])]
- + "((INTVAL(operands[0]) == -1) || (INTVAL(operands[0]) == 0) || (INTVAL(operands[0]) == 1))"
- + {
- + avr32_output_return_instruction (FALSE, FALSE, NULL, operands[0]);
- + return "";
- + }
- + ; Length is absolute worst case
- + [(set_attr "type" "branch")
- + (set_attr "length" "12")]
- + )
- +
- +(define_insn "ldxi"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (mem:SI (plus:SI
- + (match_operand:SI 1 "register_operand" "r")
- + (mult:SI (zero_extract:SI (match_operand:SI 2 "register_operand" "r")
- + (const_int 8)
- + (match_operand:SI 3 "immediate_operand" "Ku05"))
- + (const_int 4)))))]
- + "(INTVAL(operands[3]) == 24 || INTVAL(operands[3]) == 16 || INTVAL(operands[3]) == 8
- + || INTVAL(operands[3]) == 0)"
- + {
- + switch ( INTVAL(operands[3]) ){
- + case 0:
- + return "ld.w %0, %1[%2:b << 2]";
- + case 8:
- + return "ld.w %0, %1[%2:l << 2]";
- + case 16:
- + return "ld.w %0, %1[%2:u << 2]";
- + case 24:
- + return "ld.w %0, %1[%2:t << 2]";
- + default:
- + internal_error("illegal operand for ldxi");
- + }
- + }
- + [(set_attr "type" "load")
- + (set_attr "length" "4")
- + (set_attr "cc" "none")])
- +
- +
- +
- +
- +
- +
- +;;=============================================================================
- +;; Peephole optimizing
- +;;-----------------------------------------------------------------------------
- +;; Changing
- +;; sub r8, r7, 8
- +;; st.w r8[0x0], r12
- +;; to
- +;; sub r8, r7, 8
- +;; st.w r7[-0x8], r12
- +;;=============================================================================
- +; (set (reg:SI 9 r8)
- +; (plus:SI (reg/f:SI 6 r7)
- +; (const_int ...)))
- +; (set (mem:SI (reg:SI 9 r8))
- +; (reg:SI 12 r12))
- +(define_peephole2
- + [(set (match_operand:SI 0 "register_operand" "")
- + (plus:SI (match_operand:SI 1 "register_operand" "")
- + (match_operand:SI 2 "immediate_operand" "")))
- + (set (mem:SI (match_dup 0))
- + (match_operand:SI 3 "register_operand" ""))]
- + "REGNO(operands[0]) != REGNO(operands[1]) && avr32_const_ok_for_constraint_p(INTVAL(operands[2]), 'K', \"Ks16\")"
- + [(set (match_dup 0)
- + (plus:SI (match_dup 1)
- + (match_dup 2)))
- + (set (mem:SI (plus:SI (match_dup 1)
- + (match_dup 2)))
- + (match_dup 3))]
- + "")
- +
- +;;=============================================================================
- +;; Peephole optimizing
- +;;-----------------------------------------------------------------------------
- +;; Changing
- +;; sub r6, r7, 4
- +;; ld.w r6, r6[0x0]
- +;; to
- +;; sub r6, r7, 4
- +;; ld.w r6, r7[-0x4]
- +;;=============================================================================
- +; (set (reg:SI 7 r6)
- +; (plus:SI (reg/f:SI 6 r7)
- +; (const_int -4 [0xfffffffc])))
- +; (set (reg:SI 7 r6)
- +; (mem:SI (reg:SI 7 r6)))
- +(define_peephole2
- + [(set (match_operand:SI 0 "register_operand" "")
- + (plus:SI (match_operand:SI 1 "register_operand" "")
- + (match_operand:SI 2 "immediate_operand" "")))
- + (set (match_operand:SI 3 "register_operand" "")
- + (mem:SI (match_dup 0)))]
- + "REGNO(operands[0]) != REGNO(operands[1]) && avr32_const_ok_for_constraint_p(INTVAL(operands[2]), 'K', \"Ks16\")"
- + [(set (match_dup 0)
- + (plus:SI (match_dup 1)
- + (match_dup 2)))
- + (set (match_dup 3)
- + (mem:SI (plus:SI (match_dup 1)
- + (match_dup 2))))]
- + "")
- +
- +;;=============================================================================
- +;; Peephole optimizing
- +;;-----------------------------------------------------------------------------
- +;; Changing
- +;; ld.sb r0, r7[-0x6]
- +;; cashs.b r0
- +;; to
- +;; ld.sb r0, r7[-0x6]
- +;;=============================================================================
- +(define_peephole2
- + [(set (match_operand:QI 0 "register_operand" "")
- + (match_operand:QI 1 "load_sb_memory_operand" ""))
- + (set (match_operand:SI 2 "register_operand" "")
- + (sign_extend:SI (match_dup 0)))]
- + "(REGNO(operands[0]) == REGNO(operands[2]) || peep2_reg_dead_p(2, operands[0]))"
- + [(set (match_dup 2)
- + (sign_extend:SI (match_dup 1)))]
- + "")
- +
- +;;=============================================================================
- +;; Peephole optimizing
- +;;-----------------------------------------------------------------------------
- +;; Changing
- +;; ld.ub r0, r7[-0x6]
- +;; cashu.b r0
- +;; to
- +;; ld.ub r0, r7[-0x6]
- +;;=============================================================================
- +(define_peephole2
- + [(set (match_operand:QI 0 "register_operand" "")
- + (match_operand:QI 1 "memory_operand" ""))
- + (set (match_operand:SI 2 "register_operand" "")
- + (zero_extend:SI (match_dup 0)))]
- + "(REGNO(operands[0]) == REGNO(operands[2])) || peep2_reg_dead_p(2, operands[0])"
- + [(set (match_dup 2)
- + (zero_extend:SI (match_dup 1)))]
- + "")
- +
- +;;=============================================================================
- +;; Peephole optimizing
- +;;-----------------------------------------------------------------------------
- +;; Changing
- +;; ld.sh r0, r7[-0x6]
- +;; casts.h r0
- +;; to
- +;; ld.sh r0, r7[-0x6]
- +;;=============================================================================
- +(define_peephole2
- + [(set (match_operand:HI 0 "register_operand" "")
- + (match_operand:HI 1 "memory_operand" ""))
- + (set (match_operand:SI 2 "register_operand" "")
- + (sign_extend:SI (match_dup 0)))]
- + "(REGNO(operands[0]) == REGNO(operands[2])) || peep2_reg_dead_p(2, operands[0])"
- + [(set (match_dup 2)
- + (sign_extend:SI (match_dup 1)))]
- + "")
- +
- +;;=============================================================================
- +;; Peephole optimizing
- +;;-----------------------------------------------------------------------------
- +;; Changing
- +;; ld.uh r0, r7[-0x6]
- +;; castu.h r0
- +;; to
- +;; ld.uh r0, r7[-0x6]
- +;;=============================================================================
- +(define_peephole2
- + [(set (match_operand:HI 0 "register_operand" "")
- + (match_operand:HI 1 "memory_operand" ""))
- + (set (match_operand:SI 2 "register_operand" "")
- + (zero_extend:SI (match_dup 0)))]
- + "(REGNO(operands[0]) == REGNO(operands[2])) || peep2_reg_dead_p(2, operands[0])"
- + [(set (match_dup 2)
- + (zero_extend:SI (match_dup 1)))]
- + "")
- +
- +;;=============================================================================
- +;; Peephole optimizing
- +;;-----------------------------------------------------------------------------
- +;; Changing
- +;; mul rd, rx, ry
- +;; add rd2, rd
- +;; or
- +;; add rd2, rd, rd2
- +;; to
- +;; mac rd2, rx, ry
- +;;=============================================================================
- +(define_peephole2
- + [(set (match_operand:SI 0 "register_operand" "")
- + (mult:SI (match_operand:SI 1 "register_operand" "")
- + (match_operand:SI 2 "register_operand" "")))
- + (set (match_operand:SI 3 "register_operand" "")
- + (plus:SI (match_dup 3)
- + (match_dup 0)))]
- + "peep2_reg_dead_p(2, operands[0])"
- + [(set (match_dup 3)
- + (plus:SI (mult:SI (match_dup 1)
- + (match_dup 2))
- + (match_dup 3)))]
- + "")
- +
- +(define_peephole2
- + [(set (match_operand:SI 0 "register_operand" "")
- + (mult:SI (match_operand:SI 1 "register_operand" "")
- + (match_operand:SI 2 "register_operand" "")))
- + (set (match_operand:SI 3 "register_operand" "")
- + (plus:SI (match_dup 0)
- + (match_dup 3)))]
- + "peep2_reg_dead_p(2, operands[0])"
- + [(set (match_dup 3)
- + (plus:SI (mult:SI (match_dup 1)
- + (match_dup 2))
- + (match_dup 3)))]
- + "")
- +
- +
- +;;=============================================================================
- +;; Peephole optimizing
- +;;-----------------------------------------------------------------------------
- +;; Changing
- +;; bfextu rd, rs, k5, 1 or and(h/l) rd, one_bit_set_mask
- +;; to
- +;; bld rs, k5
- +;;
- +;; If rd is dead after the operation.
- +;;=============================================================================
- +(define_peephole2
- + [ (set (match_operand:SI 0 "register_operand" "")
- + (zero_extract:SI (match_operand:SI 1 "register_operand" "")
- + (const_int 1)
- + (match_operand:SI 2 "immediate_operand" "")))
- + (set (cc0)
- + (match_dup 0))]
- + "peep2_reg_dead_p(2, operands[0])"
- + [(set (cc0)
- + (and:SI (match_dup 1)
- + (match_dup 2)))]
- + "operands[2] = GEN_INT(1 << INTVAL(operands[2]));")
- +
- +(define_peephole2
- + [ (set (match_operand:SI 0 "register_operand" "")
- + (and:SI (match_operand:SI 1 "register_operand" "")
- + (match_operand:SI 2 "one_bit_set_operand" "")))
- + (set (cc0)
- + (match_dup 0))]
- + "peep2_reg_dead_p(2, operands[0])"
- + [(set (cc0)
- + (and:SI (match_dup 1)
- + (match_dup 2)))]
- + "")
- +
- +;;=============================================================================
- +;; Peephole optimizing
- +;;-----------------------------------------------------------------------------
- +;; Load with extracted index: ld.w Rd, Rb[Ri:{t/u/b/l} << 2]
- +;;
- +;;=============================================================================
- +
- +
- +(define_peephole
- + [(set (match_operand:SI 0 "register_operand" "")
- + (zero_extract:SI (match_operand:SI 1 "register_operand" "")
- + (const_int 8)
- + (match_operand:SI 2 "avr32_extract_shift_operand" "")))
- + (set (match_operand:SI 3 "register_operand" "")
- + (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
- + (match_operand:SI 4 "register_operand" ""))))]
- +
- + "(dead_or_set_p(insn, operands[0]))"
- + {
- + switch ( INTVAL(operands[2]) ){
- + case 0:
- + return "ld.w %3, %4[%1:b << 2]";
- + case 8:
- + return "ld.w %3, %4[%1:l << 2]";
- + case 16:
- + return "ld.w %3, %4[%1:u << 2]";
- + case 24:
- + return "ld.w %3, %4[%1:t << 2]";
- + default:
- + internal_error("illegal operand for ldxi");
- + }
- + }
- + [(set_attr "type" "load")
- + (set_attr "length" "4")
- + (set_attr "cc" "clobber")]
- + )
- +
- +
- +
- +(define_peephole
- + [(set (match_operand:SI 0 "register_operand" "")
- + (and:SI (match_operand:SI 1 "register_operand" "") (const_int 255)))
- + (set (match_operand:SI 2 "register_operand" "")
- + (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
- + (match_operand:SI 3 "register_operand" ""))))]
- +
- + "(dead_or_set_p(insn, operands[0]))"
- +
- + "ld.w %2, %3[%1:b << 2]"
- + [(set_attr "type" "load")
- + (set_attr "length" "4")
- + (set_attr "cc" "clobber")]
- + )
- +
- +
- +(define_peephole2
- + [(set (match_operand:SI 0 "register_operand" "")
- + (zero_extract:SI (match_operand:SI 1 "register_operand" "")
- + (const_int 8)
- + (match_operand:SI 2 "avr32_extract_shift_operand" "")))
- + (set (match_operand:SI 3 "register_operand" "")
- + (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
- + (match_operand:SI 4 "register_operand" ""))))]
- +
- + "(peep2_reg_dead_p(2, operands[0]))
- + || (REGNO(operands[0]) == REGNO(operands[3]))"
- + [(set (match_dup 3)
- + (mem:SI (plus:SI
- + (match_dup 4)
- + (mult:SI (zero_extract:SI (match_dup 1)
- + (const_int 8)
- + (match_dup 2))
- + (const_int 4)))))]
- + )
- +
- +(define_peephole2
- + [(set (match_operand:SI 0 "register_operand" "")
- + (zero_extend:SI (match_operand:QI 1 "register_operand" "")))
- + (set (match_operand:SI 2 "register_operand" "")
- + (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
- + (match_operand:SI 3 "register_operand" ""))))]
- +
- + "(peep2_reg_dead_p(2, operands[0]))
- + || (REGNO(operands[0]) == REGNO(operands[2]))"
- + [(set (match_dup 2)
- + (mem:SI (plus:SI
- + (match_dup 3)
- + (mult:SI (zero_extract:SI (match_dup 1)
- + (const_int 8)
- + (const_int 0))
- + (const_int 4)))))]
- + "operands[1] = gen_rtx_REG(SImode, REGNO(operands[1]));"
- + )
- +
- +
- +(define_peephole2
- + [(set (match_operand:SI 0 "register_operand" "")
- + (and:SI (match_operand:SI 1 "register_operand" "")
- + (const_int 255)))
- + (set (match_operand:SI 2 "register_operand" "")
- + (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
- + (match_operand:SI 3 "register_operand" ""))))]
- +
- + "(peep2_reg_dead_p(2, operands[0]))
- + || (REGNO(operands[0]) == REGNO(operands[2]))"
- + [(set (match_dup 2)
- + (mem:SI (plus:SI
- + (match_dup 3)
- + (mult:SI (zero_extract:SI (match_dup 1)
- + (const_int 8)
- + (const_int 0))
- + (const_int 4)))))]
- + ""
- + )
- +
- +
- +
- +(define_peephole2
- + [(set (match_operand:SI 0 "register_operand" "")
- + (lshiftrt:SI (match_operand:SI 1 "register_operand" "")
- + (const_int 24)))
- + (set (match_operand:SI 2 "register_operand" "")
- + (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
- + (match_operand:SI 3 "register_operand" ""))))]
- +
- + "(peep2_reg_dead_p(2, operands[0]))
- + || (REGNO(operands[0]) == REGNO(operands[2]))"
- + [(set (match_dup 2)
- + (mem:SI (plus:SI
- + (match_dup 3)
- + (mult:SI (zero_extract:SI (match_dup 1)
- + (const_int 8)
- + (const_int 24))
- + (const_int 4)))))]
- + ""
- + )
- +
- +
- +;;************************************************
- +;; ANDN
- +;;
- +;;************************************************
- +
- +
- +(define_peephole2
- + [(set (match_operand:SI 0 "register_operand" "")
- + (not:SI (match_operand:SI 1 "register_operand" "")))
- + (set (match_operand:SI 2 "register_operand" "")
- + (and:SI (match_dup 2)
- + (match_dup 0)))]
- + "peep2_reg_dead_p(2, operands[0])"
- +
- + [(set (match_dup 2)
- + (and:SI (match_dup 2)
- + (not:SI (match_dup 1))
- + ))]
- + ""
- +)
- +
- +(define_peephole2
- + [(set (match_operand:SI 0 "register_operand" "")
- + (not:SI (match_operand:SI 1 "register_operand" "")))
- + (set (match_operand:SI 2 "register_operand" "")
- + (and:SI (match_dup 0)
- + (match_dup 2)
- + ))]
- + "peep2_reg_dead_p(2, operands[0])"
- +
- + [(set (match_dup 2)
- + (and:SI (match_dup 2)
- + (not:SI (match_dup 1))
- + ))]
- +
- + ""
- +)
- +
- +
- +;;=================================================================
- +;; Addabs peephole
- +;;=================================================================
- +
- +(define_peephole
- + [(set (match_operand:SI 2 "register_operand" "=r")
- + (abs:SI (match_operand:SI 1 "register_operand" "r")))
- + (set (match_operand:SI 0 "register_operand" "=r")
- + (plus:SI (match_operand:SI 3 "register_operand" "r")
- + (match_dup 2)))]
- + "dead_or_set_p(insn, operands[2])"
- + "addabs %0, %3, %1"
- + [(set_attr "length" "4")
- + (set_attr "cc" "set_z")])
- +
- +(define_peephole
- + [(set (match_operand:SI 2 "register_operand" "=r")
- + (abs:SI (match_operand:SI 1 "register_operand" "r")))
- + (set (match_operand:SI 0 "register_operand" "=r")
- + (plus:SI (match_dup 2)
- + (match_operand:SI 3 "register_operand" "r")))]
- + "dead_or_set_p(insn, operands[2])"
- + "addabs %0, %3, %1"
- + [(set_attr "length" "4")
- + (set_attr "cc" "set_z")])
- +
- +
- +;;=================================================================
- +;; Detect roundings
- +;;=================================================================
- +
- +(define_insn "*round"
- + [(set (match_operand:SI 0 "register_operand" "+r")
- + (ashiftrt:SI (plus:SI (match_dup 0)
- + (match_operand:SI 1 "immediate_operand" "i"))
- + (match_operand:SI 2 "immediate_operand" "i")))]
- + "avr32_rnd_operands(operands[1], operands[2])"
- +
- + "satrnds %0 >> %2, 31"
- +
- + [(set_attr "type" "alu_sat")
- + (set_attr "length" "4")]
- +
- + )
- +
- +
- +(define_peephole2
- + [(set (match_operand:SI 0 "register_operand" "")
- + (plus:SI (match_dup 0)
- + (match_operand:SI 1 "immediate_operand" "")))
- + (set (match_dup 0)
- + (ashiftrt:SI (match_dup 0)
- + (match_operand:SI 2 "immediate_operand" "")))]
- + "avr32_rnd_operands(operands[1], operands[2])"
- +
- + [(set (match_dup 0)
- + (ashiftrt:SI (plus:SI (match_dup 0)
- + (match_dup 1))
- + (match_dup 2)))]
- + )
- +
- +(define_peephole
- + [(set (match_operand:SI 0 "register_operand" "r")
- + (plus:SI (match_dup 0)
- + (match_operand:SI 1 "immediate_operand" "i")))
- + (set (match_dup 0)
- + (ashiftrt:SI (match_dup 0)
- + (match_operand:SI 2 "immediate_operand" "i")))]
- + "avr32_rnd_operands(operands[1], operands[2])"
- +
- + "satrnds %0 >> %2, 31"
- +
- + [(set_attr "type" "alu_sat")
- + (set_attr "length" "4")
- + (set_attr "cc" "clobber")]
- +
- + )
- +
- +
- +;;=================================================================
- +;; mcall
- +;;=================================================================
- +(define_peephole
- + [(set (match_operand:SI 0 "register_operand" "")
- + (match_operand 1 "avr32_const_pool_ref_operand" ""))
- + (parallel [(call (mem:SI (match_dup 0))
- + (match_operand 2 "" ""))
- + (clobber (reg:SI LR_REGNUM))])]
- + "dead_or_set_p(insn, operands[0])"
- + "mcall %1"
- + [(set_attr "type" "call")
- + (set_attr "length" "4")
- + (set_attr "cc" "clobber")]
- +)
- +
- +(define_peephole
- + [(set (match_operand:SI 2 "register_operand" "")
- + (match_operand 1 "avr32_const_pool_ref_operand" ""))
- + (parallel [(set (match_operand 0 "register_operand" "")
- + (call (mem:SI (match_dup 2))
- + (match_operand 3 "" "")))
- + (clobber (reg:SI LR_REGNUM))])]
- + "dead_or_set_p(insn, operands[2])"
- + "mcall %1"
- + [(set_attr "type" "call")
- + (set_attr "length" "4")
- + (set_attr "cc" "call_set")]
- +)
- +
- +
- +(define_peephole2
- + [(set (match_operand:SI 0 "register_operand" "")
- + (match_operand 1 "avr32_const_pool_ref_operand" ""))
- + (parallel [(call (mem:SI (match_dup 0))
- + (match_operand 2 "" ""))
- + (clobber (reg:SI LR_REGNUM))])]
- + "peep2_reg_dead_p(2, operands[0])"
- + [(parallel [(call (mem:SI (match_dup 1))
- + (match_dup 2))
- + (clobber (reg:SI LR_REGNUM))])]
- + ""
- +)
- +
- +(define_peephole2
- + [(set (match_operand:SI 0 "register_operand" "")
- + (match_operand 1 "avr32_const_pool_ref_operand" ""))
- + (parallel [(set (match_operand 2 "register_operand" "")
- + (call (mem:SI (match_dup 0))
- + (match_operand 3 "" "")))
- + (clobber (reg:SI LR_REGNUM))])]
- + "(peep2_reg_dead_p(2, operands[0]) || (REGNO(operands[2]) == REGNO(operands[0])))"
- + [(parallel [(set (match_dup 2)
- + (call (mem:SI (match_dup 1))
- + (match_dup 3)))
- + (clobber (reg:SI LR_REGNUM))])]
- + ""
- +)
- +
- +;;=================================================================
- +;; Returning a value
- +;;=================================================================
- +
- +
- +(define_peephole
- + [(set (match_operand 0 "register_operand" "")
- + (match_operand 1 "register_operand" ""))
- + (return)]
- + "USE_RETURN_INSN (TRUE) && (REGNO(operands[0]) == RETVAL_REGNUM)
- + && (REGNO(operands[1]) != LR_REGNUM)
- + && (REGNO_REG_CLASS(REGNO(operands[1])) == GENERAL_REGS)"
- + "retal %1"
- + [(set_attr "type" "call")
- + (set_attr "length" "2")]
- + )
- +
- +
- +(define_peephole
- + [(set (match_operand 0 "register_operand" "r")
- + (match_operand 1 "immediate_operand" "i"))
- + (return)]
- + "(USE_RETURN_INSN (FALSE) && (REGNO(operands[0]) == RETVAL_REGNUM) &&
- + ((INTVAL(operands[1]) == -1) || (INTVAL(operands[1]) == 0) || (INTVAL(operands[1]) == 1)))"
- + {
- + avr32_output_return_instruction (TRUE, FALSE, NULL, operands[1]);
- + return "";
- + }
- + [(set_attr "type" "call")
- + (set_attr "length" "4")]
- + )
- +
- +(define_peephole
- + [(set (match_operand 0 "register_operand" "r")
- + (match_operand 1 "immediate_operand" "i"))
- + (unspec_volatile [(return)] VUNSPEC_EPILOGUE)]
- + "(REGNO(operands[0]) == RETVAL_REGNUM) &&
- + ((INTVAL(operands[1]) == -1) || (INTVAL(operands[1]) == 0) || (INTVAL(operands[1]) == 1))"
- + {
- + avr32_output_return_instruction (FALSE, FALSE, NULL, operands[1]);
- + return "";
- + }
- + ; Length is absolute worst case
- + [(set_attr "type" "branch")
- + (set_attr "length" "12")]
- + )
- +
- +(define_peephole
- + [(set (match_operand 0 "register_operand" "=r")
- + (if_then_else (match_operator 1 "avr32_comparison_operator"
- + [(match_operand 4 "register_operand" "r")
- + (match_operand 5 "register_immediate_operand" "rKs21")])
- + (match_operand 2 "avr32_cond_register_immediate_operand" "rKs08")
- + (match_operand 3 "avr32_cond_register_immediate_operand" "rKs08")))
- + (return)]
- + "USE_RETURN_INSN (TRUE) && (REGNO(operands[0]) == RETVAL_REGNUM)"
- + {
- + operands[1] = avr32_output_cmp(operands[1], GET_MODE(operands[4]), operands[4], operands[5]);
- +
- + if ( GET_CODE(operands[2]) == REG
- + && GET_CODE(operands[3]) == REG
- + && REGNO(operands[2]) != LR_REGNUM
- + && REGNO(operands[3]) != LR_REGNUM ){
- + return "ret%1 %2\;ret%i1 %3";
- + } else if ( GET_CODE(operands[2]) == REG
- + && GET_CODE(operands[3]) == CONST_INT ){
- + if ( INTVAL(operands[3]) == -1
- + || INTVAL(operands[3]) == 0
- + || INTVAL(operands[3]) == 1 ){
- + return "ret%1 %2\;ret%i1 %d3";
- + } else {
- + return "mov%1 r12, %2\;mov%i1 r12, %3\;retal r12";
- + }
- + } else if ( GET_CODE(operands[2]) == CONST_INT
- + && GET_CODE(operands[3]) == REG ){
- + if ( INTVAL(operands[2]) == -1
- + || INTVAL(operands[2]) == 0
- + || INTVAL(operands[2]) == 1 ){
- + return "ret%1 %d2\;ret%i1 %3";
- + } else {
- + return "mov%1 r12, %2\;mov%i1 r12, %3\;retal r12";
- + }
- + } else {
- + if ( (INTVAL(operands[2]) == -1
- + || INTVAL(operands[2]) == 0
- + || INTVAL(operands[2]) == 1 )
- + && (INTVAL(operands[3]) == -1
- + || INTVAL(operands[3]) == 0
- + || INTVAL(operands[3]) == 1 )){
- + return "ret%1 %d2\;ret%i1 %d3";
- + } else {
- + return "mov%1 r12, %2\;mov%i1 r12, %3\;retal r12";
- + }
- + }
- + }
- +
- + [(set_attr "length" "10")
- + (set_attr "cc" "none")
- + (set_attr "type" "call")])
- +
- +
- +
- +;;=================================================================
- +;; mulnhh.w
- +;;=================================================================
- +
- +(define_peephole2
- + [(set (match_operand:HI 0 "register_operand" "")
- + (neg:HI (match_operand:HI 1 "register_operand" "")))
- + (set (match_operand:SI 2 "register_operand" "")
- + (mult:SI
- + (sign_extend:SI (match_dup 0))
- + (sign_extend:SI (match_operand:HI 3 "register_operand" ""))))]
- + "(peep2_reg_dead_p(2, operands[0])) || (REGNO(operands[2]) == REGNO(operands[0]))"
- + [ (set (match_dup 2)
- + (mult:SI
- + (sign_extend:SI (neg:HI (match_dup 1)))
- + (sign_extend:SI (match_dup 3))))]
- + ""
- + )
- +
- +(define_peephole2
- + [(set (match_operand:HI 0 "register_operand" "")
- + (neg:HI (match_operand:HI 1 "register_operand" "")))
- + (set (match_operand:SI 2 "register_operand" "")
- + (mult:SI
- + (sign_extend:SI (match_operand:HI 3 "register_operand" ""))
- + (sign_extend:SI (match_dup 0))))]
- + "(peep2_reg_dead_p(2, operands[0])) || (REGNO(operands[2]) == REGNO(operands[0]))"
- + [ (set (match_dup 2)
- + (mult:SI
- + (sign_extend:SI (neg:HI (match_dup 1)))
- + (sign_extend:SI (match_dup 3))))]
- + ""
- + )
- +
- +
- +
- +;;=================================================================
- +;; Vector set and extract operations
- +;;=================================================================
- +(define_insn "vec_setv2hi_hi"
- + [(set (match_operand:V2HI 0 "register_operand" "=r")
- + (vec_merge:V2HI
- + (match_dup 0)
- + (vec_duplicate:V2HI
- + (match_operand:HI 1 "register_operand" "r"))
- + (const_int 1)))]
- + ""
- + "bfins\t%0, %1, 16, 16"
- + [(set_attr "type" "alu")
- + (set_attr "length" "4")
- + (set_attr "cc" "clobber")])
- +
- +(define_insn "vec_setv2hi_lo"
- + [(set (match_operand:V2HI 0 "register_operand" "+r")
- + (vec_merge:V2HI
- + (match_dup 0)
- + (vec_duplicate:V2HI
- + (match_operand:HI 1 "register_operand" "r"))
- + (const_int 2)))]
- + ""
- + "bfins\t%0, %1, 0, 16"
- + [(set_attr "type" "alu")
- + (set_attr "length" "4")
- + (set_attr "cc" "clobber")])
- +
- +(define_expand "vec_setv2hi"
- + [(set (match_operand:V2HI 0 "register_operand" "")
- + (vec_merge:V2HI
- + (match_dup 0)
- + (vec_duplicate:V2HI
- + (match_operand:HI 1 "register_operand" ""))
- + (match_operand 2 "immediate_operand" "")))]
- + ""
- + { operands[2] = GEN_INT(INTVAL(operands[2]) + 1); }
- + )
- +
- +(define_insn "vec_extractv2hi"
- + [(set (match_operand:HI 0 "register_operand" "=r")
- + (vec_select:HI
- + (match_operand:V2HI 1 "register_operand" "r")
- + (parallel [(match_operand:SI 2 "immediate_operand" "i")])))]
- + ""
- + {
- + if ( INTVAL(operands[2]) == 0 )
- + return "bfextu\t%0, %1, 16, 16";
- + else
- + return "bfextu\t%0, %1, 0, 16";
- + }
- + [(set_attr "type" "alu")
- + (set_attr "length" "4")
- + (set_attr "cc" "clobber")])
- +
- +(define_insn "vec_extractv4qi"
- + [(set (match_operand:QI 0 "register_operand" "=r")
- + (vec_select:QI
- + (match_operand:V4QI 1 "register_operand" "r")
- + (parallel [(match_operand:SI 2 "immediate_operand" "i")])))]
- + ""
- + {
- + switch ( INTVAL(operands[2]) ){
- + case 0:
- + return "bfextu\t%0, %1, 24, 8";
- + case 1:
- + return "bfextu\t%0, %1, 16, 8";
- + case 2:
- + return "bfextu\t%0, %1, 8, 8";
- + case 3:
- + return "bfextu\t%0, %1, 0, 8";
- + default:
- + abort();
- + }
- + }
- + [(set_attr "type" "alu")
- + (set_attr "length" "4")
- + (set_attr "cc" "clobber")])
- +
- +
- +(define_insn "concatv2hi"
- + [(set (match_operand:V2HI 0 "register_operand" "=r, r, r")
- + (vec_concat:V2HI
- + (match_operand:HI 1 "register_operand" "r, r, 0")
- + (match_operand:HI 2 "register_operand" "r, 0, r")))]
- + ""
- + "@
- + mov\t%0, %1\;bfins\t%0, %2, 0, 16
- + bfins\t%0, %2, 0, 16
- + bfins\t%0, %1, 16, 16"
- + [(set_attr "length" "6, 4, 4")
- + (set_attr "type" "alu")])
- +
- +
- +;; Load the atomic operation description
- +(include "sync.md")
- +
- +;; Load the SIMD description
- +(include "simd.md")
- +
- +;; Include the FPU for uc3
- +(include "uc3fpu.md")
- --- /dev/null
- +++ b/gcc/config/avr32/avr32-modes.def
- @@ -0,0 +1 @@
- +VECTOR_MODES (INT, 4); /* V4QI V2HI */
- --- /dev/null
- +++ b/gcc/config/avr32/avr32.opt
- @@ -0,0 +1,93 @@
- +; Options for the ATMEL AVR32 port of the compiler.
- +
- +; Copyright 2007 Atmel Corporation.
- +;
- +; This file is part of GCC.
- +;
- +; GCC is free software; you can redistribute it and/or modify it under
- +; the terms of the GNU General Public License as published by the Free
- +; Software Foundation; either version 2, or (at your option) any later
- +; version.
- +;
- +; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
- +; WARRANTY; without even the implied warranty of MERCHANTABILITY or
- +; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- +; for more details.
- +;
- +; You should have received a copy of the GNU General Public License
- +; along with GCC; see the file COPYING. If not, write to the Free
- +; Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
- +; 02110-1301, USA.
- +
- +muse-rodata-section
- +Target Report Mask(USE_RODATA_SECTION)
- +Use section .rodata for read-only data instead of .text.
- +
- +mhard-float
- +Target Report Mask(HARD_FLOAT)
- +Use FPU instructions instead of floating point emulation.
- +
- +msoft-float
- +Target Report InverseMask(HARD_FLOAT, SOFT_FLOAT)
- +Use floating point emulation for floating point operations.
- +
- +mforce-double-align
- +Target Report RejectNegative Mask(FORCE_DOUBLE_ALIGN)
- +Force double-word alignment for double-word memory accesses.
- +
- +mno-init-got
- +Target Report RejectNegative Mask(NO_INIT_GOT)
- +Do not initialize GOT register before using it when compiling PIC code.
- +
- +mrelax
- +Target Report Mask(RELAX)
- +Let invoked assembler and linker do relaxing (Enabled by default when optimization level is >1).
- +
- +mmd-reorg-opt
- +Target Report Undocumented Mask(MD_REORG_OPTIMIZATION)
- +Perform machine dependent optimizations in reorg stage.
- +
- +masm-addr-pseudos
- +Target Report Mask(HAS_ASM_ADDR_PSEUDOS)
- +Use assembler pseudo-instructions lda.w and call for handling direct addresses. (Enabled by default)
- +
- +mpart=
- +Target Report RejectNegative Joined Var(avr32_part_name)
- +Specify the AVR32 part name
- +
- +mcpu=
- +Target Report RejectNegative Joined Undocumented Var(avr32_part_name)
- +Specify the AVR32 part name (deprecated)
- +
- +march=
- +Target Report RejectNegative Joined Var(avr32_arch_name)
- +Specify the AVR32 architecture name
- +
- +mfast-float
- +Target Report Mask(FAST_FLOAT)
- +Enable fast floating-point library. Enabled by default if the -funsafe-math-optimizations switch is specified.
- +
- +mimm-in-const-pool
- +Target Report Var(avr32_imm_in_const_pool) Init(-1)
- +Put large immediates in constant pool. This is enabled by default for archs with insn-cache.
- +
- +mno-pic
- +Target Report RejectNegative Mask(NO_PIC)
- +Do not generate position-independent code. (deprecated, use -fno-pic instead)
- +
- +mcond-exec-before-reload
- +Target Report Undocumented Mask(COND_EXEC_BEFORE_RELOAD)
- +Enable experimental conditional execution preparation before the reload stage.
- +
- +mrmw-addressable-data
- +Target Report Mask(RMW_ADDRESSABLE_DATA)
- +Signal that all data is in range for the Atomic Read-Modify-Write memory instructions, and that
- +gcc can safely generate these whenever possible.
- +
- +mflashvault
- +Target Var(TARGET_FLASHVAULT)
- +Generate code for flashvault
- +
- +mlist-devices
- +Target RejectNegative Var(avr32_list_supported_parts)
- +Print the list of parts supported while printing --target-help.
- --- /dev/null
- +++ b/gcc/config/avr32/avr32-protos.h
- @@ -0,0 +1,196 @@
- +/*
- + Prototypes for exported functions defined in avr32.c
- + Copyright 2003,2004,2005,2006,2007,2008,2009 Atmel Corporation.
- +
- + This file is part of GCC.
- +
- + This program is free software; you can redistribute it and/or modify
- + it under the terms of the GNU General Public License as published by
- + the Free Software Foundation; either version 2 of the License, or
- + (at your option) any later version.
- +
- + This program is distributed in the hope that it will be useful,
- + but WITHOUT ANY WARRANTY; without even the implied warranty of
- + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- + GNU General Public License for more details.
- +
- + You should have received a copy of the GNU General Public License
- + along with this program; if not, write to the Free Software
- + Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
- +
- +
- +#ifndef AVR32_PROTOS_H
- +#define AVR32_PROTOS_H
- +
- +extern const int swap_reg[];
- +
- +extern int avr32_valid_macmac_bypass (rtx, rtx);
- +extern int avr32_valid_mulmac_bypass (rtx, rtx);
- +
- +extern int avr32_decode_lcomm_symbol_offset (rtx, int *);
- +extern void avr32_encode_lcomm_symbol_offset (tree, char *, int);
- +
- +extern const char *avr32_strip_name_encoding (const char *);
- +
- +extern rtx avr32_get_note_reg_equiv (rtx insn);
- +
- +extern int avr32_use_return_insn (int iscond);
- +
- +extern void avr32_make_reglist16 (int reglist16_vect, char *reglist16_string);
- +
- +extern void avr32_make_reglist8 (int reglist8_vect, char *reglist8_string);
- +extern void avr32_make_fp_reglist_w (int reglist_mask, char *reglist_string);
- +extern void avr32_make_fp_reglist_d (int reglist_mask, char *reglist_string);
- +
- +extern void avr32_output_return_instruction (int single_ret_inst,
- + int iscond, rtx cond,
- + rtx r12_imm);
- +extern void avr32_expand_prologue (void);
- +extern void avr32_set_return_address (rtx source, rtx scratch);
- +
- +extern int avr32_hard_regno_mode_ok (int regno, enum machine_mode mode);
- +extern int avr32_extra_constraint_s (rtx value, const int strict);
- +extern int avr32_eh_return_data_regno (const int n);
- +extern int avr32_initial_elimination_offset (const int from, const int to);
- +extern rtx avr32_function_arg (CUMULATIVE_ARGS * cum, enum machine_mode mode,
- + tree type, int named);
- +extern void avr32_init_cumulative_args (CUMULATIVE_ARGS * cum, tree fntype,
- + rtx libname, tree fndecl);
- +extern void avr32_function_arg_advance (CUMULATIVE_ARGS * cum,
- + enum machine_mode mode,
- + tree type, int named);
- +#ifdef ARGS_SIZE_RTX
- +/* expr.h defines ARGS_SIZE_RTX and `enum direction'. */
- +extern enum direction avr32_function_arg_padding (enum machine_mode mode,
- + tree type);
- +#endif /* ARGS_SIZE_RTX */
- +extern rtx avr32_function_value (tree valtype, tree func, bool outgoing);
- +extern rtx avr32_libcall_value (enum machine_mode mode);
- +extern int avr32_sched_use_dfa_pipeline_interface (void);
- +extern bool avr32_return_in_memory (tree type, tree fntype);
- +extern void avr32_regs_to_save (char *operand);
- +extern void avr32_target_asm_function_prologue (FILE * file,
- + HOST_WIDE_INT size);
- +extern void avr32_target_asm_function_epilogue (FILE * file,
- + HOST_WIDE_INT size);
- +extern void avr32_trampoline_template (FILE * file);
- +extern void avr32_initialize_trampoline (rtx addr, rtx fnaddr,
- + rtx static_chain);
- +extern int avr32_legitimate_address (enum machine_mode mode, rtx x,
- + int strict);
- +extern int avr32_legitimate_constant_p (rtx x);
- +
- +extern int avr32_legitimate_pic_operand_p (rtx x);
- +
- +extern rtx avr32_find_symbol (rtx x);
- +extern void avr32_select_section (rtx exp, int reloc, int align);
- +extern void avr32_encode_section_info (tree decl, rtx rtl, int first);
- +extern void avr32_asm_file_end (FILE * stream);
- +extern void avr32_asm_output_ascii (FILE * stream, char *ptr, int len);
- +extern void avr32_asm_output_common (FILE * stream, const char *name,
- + int size, int rounded);
- +extern void avr32_asm_output_label (FILE * stream, const char *name);
- +extern void avr32_asm_declare_object_name (FILE * stream, char *name,
- + tree decl);
- +extern void avr32_asm_globalize_label (FILE * stream, const char *name);
- +extern void avr32_asm_weaken_label (FILE * stream, const char *name);
- +extern void avr32_asm_output_external (FILE * stream, tree decl,
- + const char *name);
- +extern void avr32_asm_output_external_libcall (FILE * stream, rtx symref);
- +extern void avr32_asm_output_labelref (FILE * stream, const char *name);
- +extern void avr32_notice_update_cc (rtx exp, rtx insn);
- +extern void avr32_print_operand (FILE * stream, rtx x, int code);
- +extern void avr32_print_operand_address (FILE * stream, rtx x);
- +
- +extern int avr32_symbol (rtx x);
- +
- +extern void avr32_select_rtx_section (enum machine_mode mode, rtx x,
- + unsigned HOST_WIDE_INT align);
- +
- +extern int avr32_load_multiple_operation (rtx op, enum machine_mode mode);
- +extern int avr32_store_multiple_operation (rtx op, enum machine_mode mode);
- +
- +extern int avr32_const_ok_for_constraint_p (HOST_WIDE_INT value, char c,
- + const char *str);
- +
- +extern bool avr32_cannot_force_const_mem (rtx x);
- +
- +extern void avr32_init_builtins (void);
- +
- +extern rtx avr32_expand_builtin (tree exp, rtx target, rtx subtarget,
- + enum machine_mode mode, int ignore);
- +
- +extern bool avr32_must_pass_in_stack (enum machine_mode mode, tree type);
- +
- +extern bool avr32_strict_argument_naming (CUMULATIVE_ARGS * ca);
- +
- +extern bool avr32_pass_by_reference (CUMULATIVE_ARGS * cum,
- + enum machine_mode mode,
- + tree type, bool named);
- +
- +extern rtx avr32_gen_load_multiple (rtx * regs, int count, rtx from,
- + int write_back, int in_struct_p,
- + int scalar_p);
- +extern rtx avr32_gen_store_multiple (rtx * regs, int count, rtx to,
- + int in_struct_p, int scalar_p);
- +extern int avr32_gen_movmemsi (rtx * operands);
- +
- +extern int avr32_rnd_operands (rtx add, rtx shift);
- +extern int avr32_adjust_insn_length (rtx insn, int length);
- +
- +extern int symbol_mentioned_p (rtx x);
- +extern int label_mentioned_p (rtx x);
- +extern rtx legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg);
- +extern int avr32_address_register_rtx_p (rtx x, int strict_p);
- +extern int avr32_legitimate_index_p (enum machine_mode mode, rtx index,
- + int strict_p);
- +
- +extern int avr32_const_double_immediate (rtx value);
- +extern void avr32_init_expanders (void);
- +extern rtx avr32_return_addr (int count, rtx frame);
- +extern bool avr32_got_mentioned_p (rtx addr);
- +
- +extern void avr32_final_prescan_insn (rtx insn, rtx * opvec, int noperands);
- +
- +extern int avr32_expand_movcc (enum machine_mode mode, rtx operands[]);
- +extern int avr32_expand_addcc (enum machine_mode mode, rtx operands[]);
- +#ifdef RTX_CODE
- +extern int avr32_expand_scc (RTX_CODE cond, rtx * operands);
- +#endif
- +
- +extern int avr32_store_bypass (rtx insn_out, rtx insn_in);
- +extern int avr32_mul_waw_bypass (rtx insn_out, rtx insn_in);
- +extern int avr32_valid_load_double_bypass (rtx insn_out, rtx insn_in);
- +extern int avr32_valid_load_quad_bypass (rtx insn_out, rtx insn_in);
- +extern rtx avr32_output_cmp (rtx cond, enum machine_mode mode,
- + rtx op0, rtx op1);
- +
- +rtx get_next_insn_cond (rtx cur_insn);
- +int set_next_insn_cond (rtx cur_insn, rtx cond);
- +rtx next_insn_emits_cmp (rtx cur_insn);
- +void avr32_override_options (void);
- +void avr32_load_pic_register (void);
- +#ifdef GCC_BASIC_BLOCK_H
- +rtx avr32_ifcvt_modify_insn (ce_if_block_t *ce_info, rtx pattern, rtx insn,
- + int *num_true_changes);
- +rtx avr32_ifcvt_modify_test (ce_if_block_t *ce_info, rtx test );
- +void avr32_ifcvt_modify_cancel ( ce_if_block_t *ce_info, int *num_true_changes);
- +#endif
- +void avr32_optimization_options (int level, int size);
- +int avr32_const_ok_for_move (HOST_WIDE_INT c);
- +
- +void avr32_split_const_expr (enum machine_mode mode,
- + enum machine_mode new_mode,
- + rtx expr,
- + rtx *split_expr);
- +void avr32_get_intval (enum machine_mode mode,
- + rtx const_expr,
- + HOST_WIDE_INT *val);
- +
- +int avr32_cond_imm_clobber_splittable (rtx insn,
- + rtx operands[]);
- +
- +bool avr32_flashvault_call(tree decl);
- +extern void avr32_emit_swdivsf (rtx, rtx, rtx);
- +
- +#endif /* AVR32_PROTOS_H */
- --- /dev/null
- +++ b/gcc/config/avr32/crti.asm
- @@ -0,0 +1,64 @@
- +/*
- + Init/fini stuff for AVR32.
- + Copyright 2003-2006 Atmel Corporation.
- +
- + Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
- +
- + This file is part of GCC.
- +
- + This program is free software; you can redistribute it and/or modify
- + it under the terms of the GNU General Public License as published by
- + the Free Software Foundation; either version 2 of the License, or
- + (at your option) any later version.
- +
- + This program is distributed in the hope that it will be useful,
- + but WITHOUT ANY WARRANTY; without even the implied warranty of
- + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- + GNU General Public License for more details.
- +
- + You should have received a copy of the GNU General Public License
- + along with this program; if not, write to the Free Software
- + Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
- +
- +
- +/* The code in sections .init and .fini is supposed to be a single
- + regular function. The function in .init is called directly from
- + start in crt1.asm. The function in .fini is atexit()ed in crt1.asm
- + too.
- +
- + crti.asm contributes the prologue of a function to these sections,
- + and crtn.asm comes up the epilogue. STARTFILE_SPEC should list
- + crti.o before any other object files that might add code to .init
- + or .fini sections, and ENDFILE_SPEC should list crtn.o after any
- + such object files. */
- +
- + .file "crti.asm"
- +
- + .section ".init"
- +/* Just load the GOT */
- + .align 2
- + .global _init
- +_init:
- + stm --sp, r6, lr
- + lddpc r6, 1f
- +0:
- + rsub r6, pc
- + rjmp 2f
- + .align 2
- +1: .long 0b - _GLOBAL_OFFSET_TABLE_
- +2:
- +
- + .section ".fini"
- +/* Just load the GOT */
- + .align 2
- + .global _fini
- +_fini:
- + stm --sp, r6, lr
- + lddpc r6, 1f
- +0:
- + rsub r6, pc
- + rjmp 2f
- + .align 2
- +1: .long 0b - _GLOBAL_OFFSET_TABLE_
- +2:
- +
- --- /dev/null
- +++ b/gcc/config/avr32/crtn.asm
- @@ -0,0 +1,44 @@
- +/* Copyright (C) 2001 Free Software Foundation, Inc.
- + Written By Nick Clifton
- +
- + This file is free software; you can redistribute it and/or modify it
- + under the terms of the GNU General Public License as published by the
- + Free Software Foundation; either version 2, or (at your option) any
- + later version.
- +
- + In addition to the permissions in the GNU General Public License, the
- + Free Software Foundation gives you unlimited permission to link the
- + compiled version of this file with other programs, and to distribute
- + those programs without any restriction coming from the use of this
- + file. (The General Public License restrictions do apply in other
- + respects; for example, they cover modification of the file, and
- + distribution when not linked into another program.)
- +
- + This file is distributed in the hope that it will be useful, but
- + WITHOUT ANY WARRANTY; without even the implied warranty of
- + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- + General Public License for more details.
- +
- + You should have received a copy of the GNU General Public License
- + along with this program; see the file COPYING. If not, write to
- + the Free Software Foundation, 59 Temple Place - Suite 330,
- + Boston, MA 02111-1307, USA.
- +
- + As a special exception, if you link this library with files
- + compiled with GCC to produce an executable, this does not cause
- + the resulting executable to be covered by the GNU General Public License.
- + This exception does not however invalidate any other reasons why
- + the executable file might be covered by the GNU General Public License.
- +*/
- +
- +
- +
- +
- + .file "crtn.asm"
- +
- + .section ".init"
- + ldm sp++, r6, pc
- +
- + .section ".fini"
- + ldm sp++, r6, pc
- +
- --- /dev/null
- +++ b/gcc/config/avr32/lib1funcs.S
- @@ -0,0 +1,2903 @@
- +/* Macro for moving immediate value to register. */
- +.macro mov_imm reg, imm
- +.if (((\imm & 0xfffff) == \imm) || ((\imm | 0xfff00000) == \imm))
- + mov \reg, \imm
- +#if __AVR32_UC__ >= 2
- +.elseif ((\imm & 0xffff) == 0)
- + movh \reg, hi(\imm)
- +
- +#endif
- +.else
- + mov \reg, lo(\imm)
- + orh \reg, hi(\imm)
- +.endif
- +.endm
- +
- +
- +
- +/* Adjust the unpacked double number if it is a subnormal number.
- + The exponent and mantissa pair are stored
- + in [mant_hi,mant_lo] and [exp]. A register with the correct sign bit in
- + the MSB is passed in [sign]. Needs two scratch
- + registers [scratch1] and [scratch2]. An adjusted and packed double float
- + is present in [mant_hi,mant_lo] after macro has executed */
- +.macro adjust_subnormal_df exp, mant_lo, mant_hi, sign, scratch1, scratch2
- + /* We have an exponent which is <=0 indicating a subnormal number
- + As it should be stored as if the exponent was 1 (although the
- + exponent field is all zeros to indicate a subnormal number)
- + we have to shift down the mantissa to its correct position. */
- + neg \exp
- + sub \exp,-1 /* amount to shift down */
- + cp.w \exp,54
- + brlo 50f /* if more than 53 shift steps, the
- + entire mantissa will disappear
- + without any rounding to occur */
- + mov \mant_hi, 0
- + mov \mant_lo, 0
- + rjmp 52f
- +50:
- + sub \exp,-10 /* do the shift to position the
- + mantissa at the same time
- + note! this does not include the
- + final 1 step shift to add the sign */
- +
- + /* when shifting, save all shifted out bits in [scratch2]. we may need to
- + look at them to make correct rounding. */
- +
- + rsub \scratch1,\exp,32 /* get inverted shift count */
- + cp.w \exp,32 /* handle shifts >= 32 separately */
- + brhs 51f
- +
- + /* small (<32) shift amount, both words are part of the shift */
- + lsl \scratch2,\mant_lo,\scratch1 /* save bits to shift out from lsw*/
- + lsl \scratch1,\mant_hi,\scratch1 /* get bits from msw destined for lsw*/
- + lsr \mant_lo,\mant_lo,\exp /* shift down lsw */
- + lsr \mant_hi,\mant_hi,\exp /* shift down msw */
- + or \mant_hi,\scratch1 /* add bits from msw with prepared lsw */
- + rjmp 50f
- +
- + /* large (>=32) shift amount, only lsw will have bits left after shift.
- + note that shift operations will use ((shift count) mod 32) so
- + we do not need to subtract 32 from shift count. */
- +51:
- + lsl \scratch2,\mant_hi,\scratch1 /* save bits to shift out from msw */
- + or \scratch2,\mant_lo /* also save all bits from lsw */
- + mov \mant_lo,\mant_hi /* msw -> lsw (i.e. "shift 32 first") */
- + mov \mant_hi,0 /* clear msw */
- + lsr \mant_lo,\mant_lo,\exp /* make rest of shift inside lsw */
- +
- +50:
- + /* result is almost ready to return, except that least significant bit
- + and the part we already shifted out may cause the result to be
- + rounded */
- + bld \mant_lo,0 /* get bit to be shifted out */
- + brcc 51f /* if bit was 0, no rounding */
- +
- + /* msb of part to remove is 1, so rounding depends on rest of bits */
- + tst \scratch2,\scratch2 /* get shifted out tail */
- + brne 50f /* if rest > 0, do round */
- + bld \mant_lo,1 /* we have to look at lsb in result */
- + brcc 51f /* if lsb is 0, don't round */
- +
- +50:
- + /* subnormal result requires rounding
- + rounding may cause subnormal to become smallest normal number
- + luckily, smallest normal number has exactly the representation
- + we got by rippling a one bit up from mantissa into exponent field. */
- + sub \mant_lo,-1
- + subcc \mant_hi,-1
- +
- +51:
- + /* shift and return packed double with correct sign */
- + rol \sign
- + ror \mant_hi
- + ror \mant_lo
- +52:
- +.endm
- +
- +
- +/* Adjust subnormal single float number with exponent [exp]
- + and mantissa [mant] and round. */
- +.macro adjust_subnormal_sf sf, exp, mant, sign, scratch
- + /* subnormal number */
- + rsub \exp,\exp, 1 /* shift amount */
- + cp.w \exp, 25
- + movhs \mant, 0
- + brhs 90f /* Return zero */
- + rsub \scratch, \exp, 32
- + lsl \scratch, \mant,\scratch/* Check if there are any bits set
- + in the bits discarded in the mantissa */
- + srne \scratch /* If so set the lsb of the shifted mantissa */
- + lsr \mant,\mant,\exp /* Shift the mantissa */
- + or \mant, \scratch /* Round lsb if any bits were shifted out */
- + /* Rounding : For explaination, see round_sf. */
- + mov \scratch, 0x7f /* Set rounding constant */
- + bld \mant, 8
- + subeq \scratch, -1 /* For odd numbers use rounding constant 0x80 */
- + add \mant, \scratch /* Add rounding constant to mantissa */
- + /* We can't overflow because mantissa is at least shifted one position
- + to the right so the implicit bit is zero. We can however get the implicit
- + bit set after rounding which means that we have the lowest normal number
- + but this is ok since this bit has the same position as the LSB of the
- + exponent */
- + lsr \sf, \mant, 7
- + /* Rotate in sign */
- + lsl \sign, 1
- + ror \sf
- +90:
- +.endm
- +
- +
- +/* Round the unpacked df number with exponent [exp] and
- + mantissa [mant_hi, mant_lo]. Uses scratch register
- + [scratch] */
- +.macro round_df exp, mant_lo, mant_hi, scratch
- + mov \scratch, 0x3ff /* Rounding constant */
- + bld \mant_lo,11 /* Check if lsb in the final result is
- + set */
- + subeq \scratch, -1 /* Adjust rounding constant to 0x400
- + if rounding 0.5 upwards */
- + add \mant_lo, \scratch /* Round */
- + acr \mant_hi /* If overflowing we know that
- + we have all zeros in the bits not
- + scaled out so we can leave them
- + but we must increase the exponent with
- + two since we had an implicit bit
- + which is lost + the extra overflow bit */
- + subcs \exp, -2 /* Update exponent */
- +.endm
- +
- +/* Round single float number stored in [mant] and [exp] */
- +.macro round_sf exp, mant, scratch
- + /* Round:
- + For 0.5 we round to nearest even integer
- + for all other cases we round to nearest integer.
- + This means that if the digit left of the "point" (.)
- + is 1 we can add 0x80 to the mantissa since the
- + corner case 0x180 will round up to 0x200. If the
- + digit left of the "point" is 0 we will have to
- + add 0x7f since this will give 0xff and hence a
- + truncation/rounding downwards for the corner
- + case when the 9 lowest bits are 0x080 */
- + mov \scratch, 0x7f /* Set rounding constant */
- + /* Check if the mantissa is even or odd */
- + bld \mant, 8
- + subeq \scratch, -1 /* Rounding constant should be 0x80 */
- + add \mant, \scratch
- + subcs \exp, -2 /* Adjust exponent if we overflowed */
- +.endm
- +
- +
- +
- +/* Pack a single float number stored in [mant] and [exp]
- + into a single float number in [sf] */
- +.macro pack_sf sf, exp, mant
- + bld \mant,31 /* implicit bit to z */
- + subne \exp,1 /* if subnormal (implicit bit 0)
- + adjust exponent to storage format */
- +
- + lsr \sf, \mant, 7
- + bfins \sf, \exp, 24, 8
- +.endm
- +
- +/* Pack exponent [exp] and mantissa [mant_hi, mant_lo]
- + into [df_hi, df_lo]. [df_hi] is shifted
- + one bit up so the sign bit can be shifted into it */
- +
- +.macro pack_df exp, mant_lo, mant_hi, df_lo, df_hi
- + bld \mant_hi,31 /* implicit bit to z */
- + subne \exp,1 /* if subnormal (implicit bit 0)
- + adjust exponent to storage format */
- +
- + lsr \mant_lo,11 /* shift back lsw */
- + or \df_lo,\mant_lo,\mant_hi<<21 /* combine with low bits from msw */
- + lsl \mant_hi,1 /* get rid of implicit bit */
- + lsr \mant_hi,11 /* shift back msw except for one step*/
- + or \df_hi,\mant_hi,\exp<<21 /* combine msw with exponent */
- +.endm
- +
- +/* Normalize single float number stored in [mant] and [exp]
- + using scratch register [scratch] */
- +.macro normalize_sf exp, mant, scratch
- + /* Adjust exponent and mantissa */
- + clz \scratch, \mant
- + sub \exp, \scratch
- + lsl \mant, \mant, \scratch
- +.endm
- +
- +/* Normalize the exponent and mantissa pair stored
- + in [mant_hi,mant_lo] and [exp]. Needs two scratch
- + registers [scratch1] and [scratch2]. */
- +.macro normalize_df exp, mant_lo, mant_hi, scratch1, scratch2
- + clz \scratch1,\mant_hi /* Check if we have zeros in high bits */
- + breq 80f /* No need for scaling if no zeros in high bits */
- + brcs 81f /* Check for all zeros */
- +
- + /* shift amount is smaller than 32, and involves both msw and lsw*/
- + rsub \scratch2,\scratch1,32 /* shift mantissa */
- + lsl \mant_hi,\mant_hi,\scratch1
- + lsr \scratch2,\mant_lo,\scratch2
- + or \mant_hi,\scratch2
- + lsl \mant_lo,\mant_lo,\scratch1
- + sub \exp,\scratch1 /* adjust exponent */
- + rjmp 80f /* Finished */
- +81:
- + /* shift amount is greater than 32 */
- + clz \scratch1,\mant_lo /* shift mantissa */
- + movcs \scratch1, 0
- + subcc \scratch1,-32
- + lsl \mant_hi,\mant_lo,\scratch1
- + mov \mant_lo,0
- + sub \exp,\scratch1 /* adjust exponent */
- +80:
- +.endm
- +
- +
- +/* Fast but approximate multiply of two 64-bit numbers to give a 64 bit result.
- + The multiplication of [al]x[bl] is discarded.
- + Operands in [ah], [al], [bh], [bl].
- + Scratch registers in [sh], [sl].
- + Returns results in registers [rh], [rl].*/
- +.macro mul_approx_df ah, al, bh, bl, rh, rl, sh, sl
- + mulu.d \sl, \ah, \bl
- + macu.d \sl, \al, \bh
- + mulu.d \rl, \ah, \bh
- + add \rl, \sh
- + acr \rh
- +.endm
- +
- +
- +
- +#if defined(L_avr32_f64_mul) || defined(L_avr32_f64_mul_fast)
- + .align 2
- +#if defined(L_avr32_f64_mul)
- + .global __avr32_f64_mul
- + .type __avr32_f64_mul,@function
- +__avr32_f64_mul:
- +#else
- + .global __avr32_f64_mul_fast
- + .type __avr32_f64_mul_fast,@function
- +__avr32_f64_mul_fast:
- +#endif
- + or r12, r10, r11 << 1
- + breq __avr32_f64_mul_op1_zero
- +
- +#if defined(L_avr32_f64_mul)
- + pushm r4-r7, lr
- +#else
- + stm --sp, r5,r6,r7,lr
- +#endif
- +
- +#define AVR32_F64_MUL_OP1_INT_BITS 1
- +#define AVR32_F64_MUL_OP2_INT_BITS 10
- +#define AVR32_F64_MUL_RES_INT_BITS 11
- +
- + /* op1 in {r11,r10}*/
- + /* op2 in {r9,r8}*/
- + eor lr, r11, r9 /* MSB(lr) = Sign(op1) ^ Sign(op2) */
- +
- + /* Unpack op1 to 1.63 format*/
- + /* exp: r7 */
- + /* sf: r11, r10 */
- + bfextu r7, r11, 20, 11 /* Extract exponent */
- +
- + mov r5, 1
- +
- + /* Check if normalization is needed */
- + breq __avr32_f64_mul_op1_subnormal /*If number is subnormal, normalize it */
- +
- + lsl r11, (12-AVR32_F64_MUL_OP1_INT_BITS-1) /* Extract mantissa, leave room for implicit bit */
- + or r11, r11, r10>>(32-(12-AVR32_F64_MUL_OP1_INT_BITS-1))
- + lsl r10, (12-AVR32_F64_MUL_OP1_INT_BITS-1)
- + bfins r11, r5, 32 - (1 + AVR32_F64_MUL_OP1_INT_BITS), 1 + AVR32_F64_MUL_OP1_INT_BITS /* Insert implicit bit */
- +
- +
- +22:
- + /* Unpack op2 to 10.54 format */
- + /* exp: r6 */
- + /* sf: r9, r8 */
- + bfextu r6, r9, 20, 11 /* Extract exponent */
- +
- + /* Check if normalization is needed */
- + breq __avr32_f64_mul_op2_subnormal /*If number is subnormal, normalize it */
- +
- + lsl r8, 1 /* Extract mantissa, leave room for implicit bit */
- + rol r9
- + bfins r9, r5, 32 - (1 + AVR32_F64_MUL_OP2_INT_BITS), 1 + AVR32_F64_MUL_OP2_INT_BITS /* Insert implicit bit */
- +
- +23:
- +
- + /* Check if any operands are NaN or INF */
- + cp r7, 0x7ff
- + breq __avr32_f64_mul_op_nan_or_inf /* Check op1 for NaN or Inf */
- + cp r6, 0x7ff
- + breq __avr32_f64_mul_op_nan_or_inf /* Check op2 for NaN or Inf */
- +
- +
- + /* Calculate new exponent in r12*/
- + add r12, r7, r6
- + sub r12, (1023-1)
- +
- +#if defined(L_avr32_f64_mul)
- + /* Do the multiplication.
- + Place result in [r11, r10, r7, r6]. The result is in 11.117 format. */
- + mulu.d r4, r11, r8
- + macu.d r4, r10, r9
- + mulu.d r6, r10, r8
- + mulu.d r10, r11, r9
- + add r7, r4
- + adc r10, r10, r5
- + acr r11
- +#else
- + /* Do the multiplication using approximate calculation. discard the al x bl
- + calculation.
- + Place result in [r11, r10, r7]. The result is in 11.85 format. */
- +
- + /* Do the multiplication using approximate calculation.
- + Place result in r11, r10. Use r7, r6 as scratch registers */
- + mulu.d r6, r11, r8
- + macu.d r6, r10, r9
- + mulu.d r10, r11, r9
- + add r10, r7
- + acr r11
- +#endif
- + /* Adjust exponent and mantissa */
- + /* [r12]:exp, [r11, r10]:mant [r7, r6]:sticky bits */
- + /* Mantissa may be of the format 00000000000.0xxx or 00000000000.1xxx. */
- + /* In the first case, shift one pos to left.*/
- + bld r11, 32-AVR32_F64_MUL_RES_INT_BITS-1
- + breq 0f
- + lsl r7, 1
- + rol r10
- + rol r11
- + sub r12, 1
- +0:
- + cp r12, 0
- + brle __avr32_f64_mul_res_subnormal /*Result was subnormal.*/
- +
- + /* Check for Inf. */
- + cp.w r12, 0x7ff
- + brge __avr32_f64_mul_res_inf
- +
- + /* Insert exponent. */
- + bfins r11, r12, 20, 11
- +
- + /* Result was not subnormal. Perform rounding. */
- + /* For the fast version we discard the sticky bits and always round
- + the halfwaycase up. */
- +24:
- +#if defined(L_avr32_f64_mul)
- + or r6, r6, r10 << 31 /* Or in parity bit into stickybits */
- + or r7, r7, r6 >> 1 /* Or together sticky and still make the msb
- + of r7 represent the halfway bit. */
- + eorh r7, 0x8000 /* Toggle halfway bit. */
- + /* We should now round up by adding one for the following cases:
- +
- + halfway sticky|parity round-up
- + 0 x no
- + 1 0 no
- + 1 1 yes
- +
- + Since we have inverted the halfway bit we can use the satu instruction
- + by saturating to 1 bit to implement this.
- + */
- + satu r7 >> 0, 1
- +#else
- + lsr r7, 31
- +#endif
- + add r10, r7
- + acr r11
- +
- + /* Insert sign bit*/
- + bld lr, 31
- + bst r11, 31
- +
- + /* Return result in [r11,r10] */
- +#if defined(L_avr32_f64_mul)
- + popm r4-r7, pc
- +#else
- + ldm sp++, r5, r6, r7,pc
- +#endif
- +
- +
- +__avr32_f64_mul_op1_subnormal:
- + andh r11, 0x000f /* Remove sign bit and exponent */
- + clz r12, r10 /* Count leading zeros in lsw */
- + clz r6, r11 /* Count leading zeros in msw */
- + subcs r12, -32 + AVR32_F64_MUL_OP1_INT_BITS
- + movcs r6, r12
- + subcc r6, AVR32_F64_MUL_OP1_INT_BITS
- + cp.w r6, 32
- + brge 0f
- +
- + /* shifting involves both msw and lsw*/
- + rsub r12, r6, 32 /* shift mantissa */
- + lsl r11, r11, r6
- + lsr r12, r10, r12
- + or r11, r12
- + lsl r10, r10, r6
- + sub r6, 12-AVR32_F64_MUL_OP1_INT_BITS
- + sub r7, r6 /* adjust exponent */
- + rjmp 22b /* Finished */
- +0:
- + /* msw is zero so only need to consider lsw */
- + lsl r11, r10, r6
- + breq __avr32_f64_mul_res_zero
- + mov r10, 0
- + sub r6, 12-AVR32_F64_MUL_OP1_INT_BITS
- + sub r7, r6 /* adjust exponent */
- + rjmp 22b
- +
- +
- +__avr32_f64_mul_op2_subnormal:
- + andh r9, 0x000f /* Remove sign bit and exponent */
- + clz r12, r8 /* Count leading zeros in lsw */
- + clz r5, r9 /* Count leading zeros in msw */
- + subcs r12, -32 + AVR32_F64_MUL_OP2_INT_BITS
- + movcs r5, r12
- + subcc r5, AVR32_F64_MUL_OP2_INT_BITS
- + cp.w r5, 32
- + brge 0f
- +
- + /* shifting involves both msw and lsw*/
- + rsub r12, r5, 32 /* shift mantissa */
- + lsl r9, r9, r5
- + lsr r12, r8, r12
- + or r9, r12
- + lsl r8, r8, r5
- + sub r5, 12 - AVR32_F64_MUL_OP2_INT_BITS
- + sub r6, r5 /* adjust exponent */
- + rjmp 23b /* Finished */
- +0:
- + /* msw is zero so only need to consider lsw */
- + lsl r9, r8, r5
- + breq __avr32_f64_mul_res_zero
- + mov r8, 0
- + sub r5, 12 - AVR32_F64_MUL_OP2_INT_BITS
- + sub r6, r5 /* adjust exponent */
- + rjmp 23b
- +
- +
- +__avr32_f64_mul_op_nan_or_inf:
- + /* Same code for OP1 and OP2*/
- + /* Since we are here, at least one of the OPs were NaN or INF*/
- + andh r9, 0x000f /* Remove sign bit and exponent */
- + andh r11, 0x000f /* Remove sign bit and exponent */
- + /* Merge the regs in each operand to check for zero*/
- + or r11, r10 /* op1 */
- + or r9, r8 /* op2 */
- + /* Check if op1 is NaN or INF */
- + cp r7, 0x7ff
- + brne __avr32_f64_mul_op1_not_naninf
- + /* op1 was NaN or INF.*/
- + cp r11, 0
- + brne __avr32_f64_mul_res_nan /* op1 was NaN. Result will be NaN*/
- + /*op1 was INF. check if op2 is NaN or INF*/
- + cp r6, 0x7ff
- + brne __avr32_f64_mul_res_inf /*op1 was INF, op2 was neither NaN nor INF*/
- + /* op1 is INF, op2 is either NaN or INF*/
- + cp r9, 0
- + breq __avr32_f64_mul_res_inf /*op2 was also INF*/
- + rjmp __avr32_f64_mul_res_nan /*op2 was NaN*/
- +
- +__avr32_f64_mul_op1_not_naninf:
- + /* op1 was not NaN nor INF. Then op2 must be NaN or INF*/
- + cp r9, 0
- + breq __avr32_f64_mul_res_inf /*op2 was INF, return INF*/
- + rjmp __avr32_f64_mul_res_nan /*else return NaN*/
- +
- +__avr32_f64_mul_res_subnormal:/* Multiply result was subnormal. */
- +#if defined(L_avr32_f64_mul)
- + /* Check how much we must scale down the mantissa. */
- + neg r12
- + sub r12, -1 /* We do no longer have an implicit bit. */
- + satu r12 >> 0, 6 /* Saturate shift amount to max 63. */
- + cp.w r12, 32
- + brge 0f
- + /* Shift amount <32 */
- + rsub r8, r12, 32
- + or r6, r7
- + lsr r7, r7, r12
- + lsl r9, r10, r8
- + or r7, r9
- + lsr r10, r10, r12
- + lsl r9, r11, r8
- + or r10, r9
- + lsr r11, r11, r12
- + rjmp 24b
- +0:
- + /* Shift amount >=32 */
- + rsub r8, r12, 32
- + moveq r9, 0
- + breq 0f
- + lsl r9, r11, r8
- +0:
- + or r6, r7
- + or r6, r6, r10 << 1
- + lsr r10, r10, r12
- + or r7, r9, r10
- + lsr r10, r11, r12
- + mov r11, 0
- + rjmp 24b
- +#else
- + /* Flush to zero for the fast version. */
- + mov r11, lr /*Get correct sign*/
- + andh r11, 0x8000, COH
- + mov r10, 0
- + ldm sp++, r5, r6, r7,pc
- +#endif
- +
- +__avr32_f64_mul_res_zero:/* Multiply result is zero. */
- + mov r11, lr /*Get correct sign*/
- + andh r11, 0x8000, COH
- + mov r10, 0
- +#if defined(L_avr32_f64_mul)
- + popm r4-r7, pc
- +#else
- + ldm sp++, r5, r6, r7,pc
- +#endif
- +
- +__avr32_f64_mul_res_nan: /* Return NaN. */
- + mov r11, -1
- + mov r10, -1
- +#if defined(L_avr32_f64_mul)
- + popm r4-r7, pc
- +#else
- + ldm sp++, r5, r6, r7,pc
- +#endif
- +
- +__avr32_f64_mul_res_inf: /* Return INF. */
- + mov r11, 0xfff00000
- + bld lr, 31
- + bst r11, 31
- + mov r10, 0
- +#if defined(L_avr32_f64_mul)
- + popm r4-r7, pc
- +#else
- + ldm sp++, r5, r6, r7,pc
- +#endif
- +
- +__avr32_f64_mul_op1_zero:
- + /* Get sign */
- + eor r11, r11, r9
- + andh r11, 0x8000, COH
- + /* Check if op2 is Inf or NaN. */
- + bfextu r12, r9, 20, 11
- + cp.w r12, 0x7ff
- + retne r12 /* Return 0.0 */
- + /* Return NaN */
- + mov r10, -1
- + mov r11, -1
- + ret r12
- +
- +
- +
- +#endif
- +
- +
- +#if defined(L_avr32_f64_addsub) || defined(L_avr32_f64_addsub_fast)
- + .align 2
- +
- +__avr32_f64_sub_from_add:
- + /* Switch sign on op2 */
- + eorh r9, 0x8000
- +
- +#if defined(L_avr32_f64_addsub_fast)
- + .global __avr32_f64_sub_fast
- + .type __avr32_f64_sub_fast,@function
- +__avr32_f64_sub_fast:
- +#else
- + .global __avr32_f64_sub
- + .type __avr32_f64_sub,@function
- +__avr32_f64_sub:
- +#endif
- +
- + /* op1 in {r11,r10}*/
- + /* op2 in {r9,r8}*/
- +
- +#if defined(L_avr32_f64_addsub_fast)
- + /* If op2 is zero just return op1 */
- + or r12, r8, r9 << 1
- + reteq r12
- +#endif
- +
- + /* Check signs */
- + eor r12, r11, r9
- + /* Different signs, use addition. */
- + brmi __avr32_f64_add_from_sub
- +
- + stm --sp, r5, r6, r7, lr
- +
- + /* Get sign of op1 into r12 */
- + mov r12, r11
- + andh r12, 0x8000, COH
- +
- + /* Remove sign from operands */
- + cbr r11, 31
- + cbr r9, 31
- +
- + /* Put the largest number in [r11, r10]
- + and the smallest number in [r9, r8] */
- + cp r10, r8
- + cpc r11, r9
- + brhs 1f /* Skip swap if operands already correctly ordered*/
- + /* Operands were not correctly ordered, swap them*/
- + mov r7, r11
- + mov r11, r9
- + mov r9, r7
- + mov r7, r10
- + mov r10, r8
- + mov r8, r7
- + eorh r12, 0x8000 /* Invert sign in r12*/
- +1:
- + /* Unpack largest operand - opH */
- + /* exp: r7 */
- + /* sf: r11, r10 */
- + lsr r7, r11, 20 /* Extract exponent */
- + lsl r11, 11 /* Extract mantissa, leave room for implicit bit */
- + or r11, r11, r10>>21
- + lsl r10, 11
- + sbr r11, 31 /* Insert implicit bit */
- +
- +
- + /* Unpack smallest operand - opL */
- + /* exp: r6 */
- + /* sf: r9, r8 */
- + lsr r6, r9, 20 /* Extract exponent */
- + breq __avr32_f64_sub_opL_subnormal /* If either zero or subnormal */
- + lsl r9, 11 /* Extract mantissa, leave room for implicit bit */
- + or r9, r9, r8>>21
- + lsl r8, 11
- + sbr r9, 31 /* Insert implicit bit */
- +
- +
- +__avr32_f64_sub_opL_subnormal_done:
- + /* opH is NaN or Inf. */
- + cp.w r7, 0x7ff
- + breq __avr32_f64_sub_opH_nan_or_inf
- +
- + /* Get shift amount to scale mantissa of op2. */
- + rsub r6, r7
- + breq __avr32_f64_sub_shift_done /* No need to shift, exponents are equal*/
- +
- + /* Scale mantissa [r9, r8] with amount [r6].
- + Uses scratch registers [r5] and [lr].
- + In IEEE mode:Must not forget the sticky bits we intend to shift out. */
- +
- + rsub r5,r6,32 /* get (32 - shift count)
- + (if shift count > 32 we get a
- + negative value, but that will
- + work as well in the code below.) */
- +
- + cp.w r6,32 /* handle shifts >= 32 separately */
- + brhs __avr32_f64_sub_longshift
- +
- + /* small (<32) shift amount, both words are part of the shift
- + first remember whether part that is lost contains any 1 bits ... */
- + lsl lr,r8,r5 /* shift away bits that are part of
- + final mantissa. only part that goes
- + to lr are bits that will be lost */
- +
- + /* ... and now to the actual shift */
- + lsl r5,r9,r5 /* get bits from msw destined for lsw*/
- + lsr r8,r8,r6 /* shift down lsw of mantissa */
- + lsr r9,r9,r6 /* shift down msw of mantissa */
- + or r8,r5 /* combine these bits with prepared lsw*/
- +#if defined(L_avr32_f64_addsub)
- + cp.w lr,0 /* if any '1' bit in part we lost ...*/
- + srne lr
- + or r8, lr /* ... we need to set sticky bit*/
- +#endif
- +
- +__avr32_f64_sub_shift_done:
- + /* Now subtract the mantissas. */
- + sub r10, r8
- + sbc r11, r11, r9
- +
- + /* Normalize the exponent and mantissa pair stored in
- + [r11,r10] and exponent in [r7]. Needs two scratch registers [r6] and [lr]. */
- + clz r6,r11 /* Check if we have zeros in high bits */
- + breq __avr32_f64_sub_longnormalize_done /* No need for scaling if no zeros in high bits */
- + brcs __avr32_f64_sub_longnormalize
- +
- +
- + /* shift amount is smaller than 32, and involves both msw and lsw*/
- + rsub lr,r6,32 /* shift mantissa */
- + lsl r11,r11,r6
- + lsr lr,r10,lr
- + or r11,lr
- + lsl r10,r10,r6
- +
- + sub r7,r6 /* adjust exponent */
- + brle __avr32_f64_sub_subnormal_result
- +__avr32_f64_sub_longnormalize_done:
- +
- +#if defined(L_avr32_f64_addsub)
- + /* Insert the bits we will remove from the mantissa r9[31:21] */
- + lsl r9, r10, (32 - 11)
- +#else
- + /* Keep the last bit shifted out. */
- + bfextu r9, r10, 10, 1
- +#endif
- +
- + /* Pack final result*/
- + /* Input: [r7]:exp, [r11, r10]:mant, [r12]:sign in MSB */
- + /* Result in [r11,r10] */
- + /* Insert mantissa */
- + lsr r10, 11
- + or r10, r10, r11<<21
- + lsr r11, 11
- + /* Insert exponent and sign bit*/
- + bfins r11, r7, 20, 11
- + or r11, r12
- +
- + /* Round */
- +__avr32_f64_sub_round:
- +#if defined(L_avr32_f64_addsub)
- + mov_imm r7, 0x80000000
- + bld r10, 0
- + subne r7, -1
- +
- + cp.w r9, r7
- + srhs r9
- +#endif
- + add r10, r9
- + acr r11
- +
- + /* Return result in [r11,r10] */
- + ldm sp++, r5, r6, r7,pc
- +
- +
- +
- +__avr32_f64_sub_opL_subnormal:
- + /* Extract the of mantissa */
- + lsl r9, 11 /* Extract mantissa, leave room for implicit bit */
- + or r9, r9, r8>>21
- + lsl r8, 11
- +
- + /* Set exponent to 1 if we do not have a zero. */
- + or lr, r9, r8
- + movne r6,1
- +
- + /* Check if opH is also subnormal. If so, clear implicit bit in r11*/
- + rsub lr, r7, 0
- + moveq r7,1
- + bst r11, 31
- +
- + /* Check if op1 is zero, if so set exponent to 0. */
- + or lr, r11, r10
- + moveq r7,0
- +
- + rjmp __avr32_f64_sub_opL_subnormal_done
- +
- +__avr32_f64_sub_opH_nan_or_inf:
- + /* Check if opH is NaN, if so return NaN */
- + cbr r11, 31
- + or lr, r11, r10
- + brne __avr32_f64_sub_return_nan
- +
- + /* opH is Inf. */
- + /* Check if opL is Inf. or NaN */
- + cp.w r6, 0x7ff
- + breq __avr32_f64_sub_return_nan
- + /* Return infinity with correct sign. */
- + or r11, r12, r7 << 20
- + ldm sp++, r5, r6, r7, pc/* opL not Inf or NaN, return opH */
- +__avr32_f64_sub_return_nan:
- + mov r10, -1 /* Generate NaN in r11, r10 */
- + mov r11, -1
- + ldm sp++, r5, r6, r7, pc/* opL Inf or NaN, return NaN */
- +
- +
- +__avr32_f64_sub_subnormal_result:
- +#if defined(L_avr32_f64_addsub)
- + /* Check how much we must scale down the mantissa. */
- + neg r7
- + sub r7, -1 /* We do no longer have an implicit bit. */
- + satu r7 >> 0, 6 /* Saturate shift amount to max 63. */
- + cp.w r7, 32
- + brge 0f
- + /* Shift amount <32 */
- + rsub r8, r7, 32
- + lsl r9, r10, r8
- + srne r6
- + lsr r10, r10, r7
- + or r10, r6 /* Sticky bit from the
- + part that was shifted out. */
- + lsl r9, r11, r8
- + or r10, r10, r9
- + lsr r11, r10, r7
- + /* Set exponent */
- + mov r7, 0
- + rjmp __avr32_f64_sub_longnormalize_done
- +0:
- + /* Shift amount >=32 */
- + rsub r8, r7, 64
- + lsl r9, r11, r8
- + or r9, r10
- + srne r6
- + lsr r10, r11, r7
- + or r10, r6 /* Sticky bit from the
- + part that was shifted out. */
- + mov r11, 0
- + /* Set exponent */
- + mov r7, 0
- + rjmp __avr32_f64_sub_longnormalize_done
- +#else
- + /* Just flush subnormals to zero. */
- + mov r10, 0
- + mov r11, 0
- +#endif
- + ldm sp++, r5, r6, r7, pc
- +
- +__avr32_f64_sub_longshift:
- + /* large (>=32) shift amount, only lsw will have bits left after shift.
- + note that shift operations will use ((shift count=r6) mod 32) so
- + we do not need to subtract 32 from shift count. */
- + /* Saturate the shift amount to 63. If the amount
- + is any larger op2 is insignificant. */
- + satu r6 >> 0, 6
- +
- +#if defined(L_avr32_f64_addsub)
- + /* first remember whether part that is lost contains any 1 bits ... */
- + moveq lr, r8 /* If shift amount is 32, no bits from msw are lost. */
- + breq 0f
- + lsl lr,r9,r5 /* save all lost bits from msw */
- + or lr,r8 /* also save lost bits (all) from lsw
- + now lr != 0 if we lose any bits */
- +#endif
- +0:
- + /* ... and now to the actual shift */
- + lsr r8,r9,r6 /* Move msw to lsw and shift. */
- + mov r9,0 /* clear msw */
- +#if defined(L_avr32_f64_addsub)
- + cp.w lr,0 /* if any '1' bit in part we lost ...*/
- + srne lr
- + or r8, lr /* ... we need to set sticky bit*/
- +#endif
- + rjmp __avr32_f64_sub_shift_done
- +
- +__avr32_f64_sub_longnormalize:
- + /* shift amount is greater than 32 */
- + clz r6,r10 /* shift mantissa */
- + /* If the resulting mantissa is zero the result is
- + zero so force exponent to zero. */
- + movcs r7, 0
- + movcs r6, 0
- + movcs r12, 0 /* Also clear sign bit. A zero result from subtraction
- + always is +0.0 */
- + subcc r6,-32
- + lsl r11,r10,r6
- + mov r10,0
- + sub r7,r6 /* adjust exponent */
- + brle __avr32_f64_sub_subnormal_result
- + rjmp __avr32_f64_sub_longnormalize_done
- +
- +
- +
- + .align 2
- +__avr32_f64_add_from_sub:
- + /* Switch sign on op2 */
- + eorh r9, 0x8000
- +
- +#if defined(L_avr32_f64_addsub_fast)
- + .global __avr32_f64_add_fast
- + .type __avr32_f64_add_fast,@function
- +__avr32_f64_add_fast:
- +#else
- + .global __avr32_f64_add
- + .type __avr32_f64_add,@function
- +__avr32_f64_add:
- +#endif
- +
- + /* op1 in {r11,r10}*/
- + /* op2 in {r9,r8}*/
- +
- +#if defined(L_avr32_f64_addsub_fast)
- + /* If op2 is zero just return op1 */
- + or r12, r8, r9 << 1
- + reteq r12
- +#endif
- +
- + /* Check signs */
- + eor r12, r11, r9
- + /* Different signs, use subtraction. */
- + brmi __avr32_f64_sub_from_add
- +
- + stm --sp, r5, r6, r7, lr
- +
- + /* Get sign of op1 into r12 */
- + mov r12, r11
- + andh r12, 0x8000, COH
- +
- + /* Remove sign from operands */
- + cbr r11, 31
- + cbr r9, 31
- +
- + /* Put the number with the largest exponent in [r11, r10]
- + and the number with the smallest exponent in [r9, r8] */
- + cp r11, r9
- + brhs 1f /* Skip swap if operands already correctly ordered */
- + /* Operands were not correctly ordered, swap them */
- + mov r7, r11
- + mov r11, r9
- + mov r9, r7
- + mov r7, r10
- + mov r10, r8
- + mov r8, r7
- +1:
- + mov lr, 0 /* Set sticky bits to zero */
- + /* Unpack largest operand - opH */
- + /* exp: r7 */
- + /* sf: r11, r10 */
- + bfextu R7, R11, 20, 11 /* Extract exponent */
- + bfextu r11, r11, 0, 20 /* Extract mantissa */
- + sbr r11, 20 /* Insert implicit bit */
- +
- + /* Unpack smallest operand - opL */
- + /* exp: r6 */
- + /* sf: r9, r8 */
- + bfextu R6, R9, 20, 11 /* Extract exponent */
- + breq __avr32_f64_add_op2_subnormal
- + bfextu r9, r9, 0, 20 /* Extract mantissa */
- + sbr r9, 20 /* Insert implicit bit */
- +
- +2:
- + /* opH is NaN or Inf. */
- + cp.w r7, 0x7ff
- + breq __avr32_f64_add_opH_nan_or_inf
- +
- + /* Get shift amount to scale mantissa of op2. */
- + rsub r6, r7
- + breq __avr32_f64_add_shift_done /* No need to shift, exponents are equal*/
- +
- + /* Scale mantissa [r9, r8] with amount [r6].
- + Uses scratch registers [r5] and [lr].
- + In IEEE mode:Must not forget the sticky bits we intend to shift out. */
- + rsub r5,r6,32 /* get (32 - shift count)
- + (if shift count > 32 we get a
- + negative value, but that will
- + work as well in the code below.) */
- +
- + cp.w r6,32 /* handle shifts >= 32 separately */
- + brhs __avr32_f64_add_longshift
- +
- + /* small (<32) shift amount, both words are part of the shift
- + first remember whether part that is lost contains any 1 bits ... */
- + lsl lr,r8,r5 /* shift away bits that are part of
- + final mantissa. only part that goes
- + to lr are bits that will be lost */
- +
- + /* ... and now to the actual shift */
- + lsl r5,r9,r5 /* get bits from msw destined for lsw*/
- + lsr r8,r8,r6 /* shift down lsw of mantissa */
- + lsr r9,r9,r6 /* shift down msw of mantissa */
- + or r8,r5 /* combine these bits with prepared lsw*/
- +
- +__avr32_f64_add_shift_done:
- + /* Now add the mantissas. */
- + add r10, r8
- + adc r11, r11, r9
- +
- + /* Check if we overflowed. */
- + bld r11, 21
- + breq __avr32_f64_add_res_of:
- +
- +__avr32_f64_add_res_of_done:
- +
- + /* Pack final result*/
- + /* Input: [r7]:exp, [r11, r10]:mant, [r12]:sign in MSB */
- + /* Result in [r11,r10] */
- + /* Insert exponent and sign bit*/
- + bfins r11, r7, 20, 11
- + or r11, r12
- +
- + /* Round */
- +__avr32_f64_add_round:
- +#if defined(L_avr32_f64_addsub)
- + bfextu r12, r10, 0, 1 /* Extract parity bit.*/
- + or lr, r12 /* or it together with the sticky bits. */
- + eorh lr, 0x8000 /* Toggle round bit. */
- + /* We should now round up by adding one for the following cases:
- +
- + halfway sticky|parity round-up
- + 0 x no
- + 1 0 no
- + 1 1 yes
- +
- + Since we have inverted the halfway bit we can use the satu instruction
- + by saturating to 1 bit to implement this.
- + */
- + satu lr >> 0, 1
- +#else
- + lsr lr, 31
- +#endif
- + add r10, lr
- + acr r11
- +
- + /* Return result in [r11,r10] */
- + ldm sp++, r5, r6, r7,pc
- +
- +
- +__avr32_f64_add_opH_nan_or_inf:
- + /* Check if opH is NaN, if so return NaN */
- + cbr r11, 20
- + or lr, r11, r10
- + brne __avr32_f64_add_return_nan
- +
- + /* opH is Inf. */
- + /* Check if opL is Inf. or NaN */
- + cp.w r6, 0x7ff
- + breq __avr32_f64_add_opL_nan_or_inf
- + ldm sp++, r5, r6, r7, pc/* opL not Inf or NaN, return opH */
- +__avr32_f64_add_opL_nan_or_inf:
- + cbr r9, 20
- + or lr, r9, r8
- + brne __avr32_f64_add_return_nan
- + mov r10, 0 /* Generate Inf in r11, r10 */
- + mov_imm r11, 0x7ff00000
- + or r11, r12 /* Put sign bit back */
- + ldm sp++, r5, r6, r7, pc/* opL Inf, return Inf */
- +__avr32_f64_add_return_nan:
- + mov r10, -1 /* Generate NaN in r11, r10 */
- + mov r11, -1
- + ldm sp++, r5, r6, r7, pc/* opL Inf or NaN, return NaN */
- +
- +
- +__avr32_f64_add_longshift:
- + /* large (>=32) shift amount, only lsw will have bits left after shift.
- + note that shift operations will use ((shift count=r6) mod 32) so
- + we do not need to subtract 32 from shift count. */
- + /* Saturate the shift amount to 63. If the amount
- + is any larger op2 is insignificant. */
- + satu r6 >> 0, 6
- + /* If shift amount is 32 there are no bits from the msw that are lost. */
- + moveq lr, r8
- + breq 0f
- + /* first remember whether part that is lost contains any 1 bits ... */
- + lsl lr,r9,r5 /* save all lost bits from msw */
- +#if defined(L_avr32_f64_addsub)
- + cp.w r8, 0
- + srne r8
- + or lr,r8 /* also save lost bits (all) from lsw
- + now lr != 0 if we lose any bits */
- +#endif
- +0:
- + /* ... and now to the actual shift */
- + lsr r8,r9,r6 /* msw -> lsw and make rest of shift inside lsw*/
- + mov r9,0 /* clear msw */
- + rjmp __avr32_f64_add_shift_done
- +
- +__avr32_f64_add_res_of:
- + /* We overflowed. Scale down mantissa by shifting right one position. */
- + or lr, lr, lr << 1 /* Remember stickybits*/
- + lsr r11, 1
- + ror r10
- + ror lr
- + sub r7, -1 /* Increment exponent */
- +
- + /* Clear mantissa to set result to Inf if the exponent is 255. */
- + cp.w r7, 0x7ff
- + moveq r10, 0
- + moveq r11, 0
- + moveq lr, 0
- + rjmp __avr32_f64_add_res_of_done
- +
- +__avr32_f64_add_op2_subnormal:
- + /* Set epxponent to 1 */
- + mov r6, 1
- +
- + /* Check if op2 is also subnormal. */
- + cp.w r7, 0
- + brne 2b
- +
- + cbr r11, 20
- + /* Both operands are subnormal. Just addd the mantissas
- + and the exponent will automatically be set to 1 if
- + we overflow into a normal number. */
- + add r10, r8
- + adc r11, r11, r9
- +
- + /* Add sign bit */
- + or r11, r12
- +
- + /* Return result in [r11,r10] */
- + ldm sp++, r5, r6, r7,pc
- +
- +
- +
- +#endif
- +
- +#ifdef L_avr32_f64_to_u32
- + /* This goes into L_fixdfsi */
- +#endif
- +
- +
- +#ifdef L_avr32_f64_to_s32
- + .global __avr32_f64_to_u32
- + .type __avr32_f64_to_u32,@function
- +__avr32_f64_to_u32:
- + cp.w r11, 0
- + retmi 0 /* Negative returns 0 */
- +
- + /* Fallthrough to df to signed si conversion */
- + .global __avr32_f64_to_s32
- + .type __avr32_f64_to_s32,@function
- +__avr32_f64_to_s32:
- + lsl r12,r11,1
- + lsr r12,21 /* extract exponent*/
- + sub r12,1023 /* convert to unbiased exponent.*/
- + retlo 0 /* too small exponent implies zero. */
- +
- +1:
- + rsub r12,r12,31 /* shift count = 31 - exponent */
- + mov r9,r11 /* save sign for later...*/
- + lsl r11,11 /* remove exponent and sign*/
- + sbr r11,31 /* add implicit bit*/
- + or r11,r11,r10>>21 /* get rest of bits from lsw of double */
- + lsr r11,r11,r12 /* shift down mantissa to final place */
- + lsl r9,1 /* sign -> carry */
- + retcc r11 /* if positive, we are done */
- + neg r11 /* if negative float, negate result */
- + ret r11
- +
- +#endif /* L_fixdfsi*/
- +
- +#ifdef L_avr32_f64_to_u64
- + /* Actual function is in L_fixdfdi */
- +#endif
- +
- +#ifdef L_avr32_f64_to_s64
- + .global __avr32_f64_to_u64
- + .type __avr32_f64_to_u64,@function
- +__avr32_f64_to_u64:
- + cp.w r11,0
- + /* Negative numbers return zero */
- + movmi r10, 0
- + movmi r11, 0
- + retmi r11
- +
- +
- +
- + /* Fallthrough */
- + .global __avr32_f64_to_s64
- + .type __avr32_f64_to_s64,@function
- +__avr32_f64_to_s64:
- + lsl r9,r11,1
- + lsr r9,21 /* get exponent*/
- + sub r9,1023 /* convert to correct range*/
- + /* Return zero if exponent to small */
- + movlo r10, 0
- + movlo r11, 0
- + retlo r11
- +
- + mov r8,r11 /* save sign for later...*/
- +1:
- + lsl r11,11 /* remove exponent */
- + sbr r11,31 /* add implicit bit*/
- + or r11,r11,r10>>21 /* get rest of bits from lsw of double*/
- + lsl r10,11 /* align lsw correctly as well */
- + rsub r9,r9,63 /* shift count = 63 - exponent */
- + breq 1f
- +
- + cp.w r9,32 /* is shift count more than one reg? */
- + brhs 0f
- +
- + mov r12,r11 /* save msw */
- + lsr r10,r10,r9 /* small shift count, shift down lsw */
- + lsr r11,r11,r9 /* small shift count, shift down msw */
- + rsub r9,r9,32 /* get 32-size of shifted out tail */
- + lsl r12,r12,r9 /* align part to move from msw to lsw */
- + or r10,r12 /* combine to get new lsw */
- + rjmp 1f
- +
- +0:
- + lsr r10,r11,r9 /* large shift count,only lsw get bits
- + note that shift count is modulo 32*/
- + mov r11,0 /* msw will be 0 */
- +
- +1:
- + lsl r8,1 /* sign -> carry */
- + retcc r11 /* if positive, we are done */
- +
- + neg r11 /* if negative float, negate result */
- + neg r10
- + scr r11
- + ret r11
- +
- +#endif
- +
- +#ifdef L_avr32_u32_to_f64
- + /* Code located in L_floatsidf */
- +#endif
- +
- +#ifdef L_avr32_s32_to_f64
- + .global __avr32_u32_to_f64
- + .type __avr32_u32_to_f64,@function
- +__avr32_u32_to_f64:
- + sub r11, r12, 0 /* Move to r11 and force Z flag to be updated */
- + mov r12, 0 /* always positive */
- + rjmp 0f /* Jump to common code for floatsidf */
- +
- + .global __avr32_s32_to_f64
- + .type __avr32_s32_to_f64,@function
- +__avr32_s32_to_f64:
- + mov r11, r12 /* Keep original value in r12 for sign */
- + abs r11 /* Absolute value if r12 */
- +0:
- + mov r10,0 /* let remaining bits be zero */
- + reteq r11 /* zero long will return zero float */
- +
- + pushm lr
- + mov r9,31+1023 /* set exponent */
- +
- + normalize_df r9 /*exp*/, r10, r11 /* mantissa */, r8, lr /* scratch */
- +
- + /* Check if a subnormal result was created */
- + cp.w r9, 0
- + brgt 0f
- +
- + adjust_subnormal_df r9 /* exp */, r10, r11 /* Mantissa */, r12 /*sign*/, r8, lr /* scratch */
- + popm pc
- +0:
- +
- + /* Round result */
- + round_df r9 /*exp*/, r10, r11 /* Mantissa */, r8 /*scratch*/
- + cp.w r9,0x7ff
- + brlt 0f
- + /*Return infinity */
- + mov r10, 0
- + mov_imm r11, 0xffe00000
- + rjmp __floatsidf_return_op1
- +
- +0:
- +
- + /* Pack */
- + pack_df r9 /*exp*/, r10, r11 /* mantissa */, r10, r11 /* Output df number*/
- +__floatsidf_return_op1:
- + lsl r12,1 /* shift in sign bit */
- + ror r11
- +
- + popm pc
- +#endif
- +
- +
- +#ifdef L_avr32_f32_cmp_eq
- + .global __avr32_f32_cmp_eq
- + .type __avr32_f32_cmp_eq,@function
- +__avr32_f32_cmp_eq:
- + cp.w r12, r11
- + breq 0f
- + /* If not equal check for +/-0 */
- + /* Or together the two values and shift out the sign bit.
- + If the result is zero, then the two values are both zero. */
- + or r12, r11
- + lsl r12, 1
- + reteq 1
- + ret 0
- +0:
- + /* Numbers were equal. Check for NaN or Inf */
- + mov_imm r11, 0xff000000
- + lsl r12, 1
- + cp.w r12, r11
- + retls 1 /* 0 if NaN, 1 otherwise */
- + ret 0
- +#endif
- +
- +#if defined(L_avr32_f32_cmp_ge) || defined(L_avr32_f32_cmp_lt)
- +#ifdef L_avr32_f32_cmp_ge
- + .global __avr32_f32_cmp_ge
- + .type __avr32_f32_cmp_ge,@function
- +__avr32_f32_cmp_ge:
- +#endif
- +#ifdef L_avr32_f32_cmp_lt
- + .global __avr32_f32_cmp_lt
- + .type __avr32_f32_cmp_lt,@function
- +__avr32_f32_cmp_lt:
- +#endif
- + lsl r10, r12, 1 /* Remove sign bits */
- + lsl r9, r11, 1
- + subfeq r10, 0
- +#ifdef L_avr32_f32_cmp_ge
- + reteq 1 /* Both number are zero. Return true. */
- +#endif
- +#ifdef L_avr32_f32_cmp_lt
- + reteq 0 /* Both number are zero. Return false. */
- +#endif
- + mov_imm r8, 0xff000000
- + cp.w r10, r8
- + rethi 0 /* Op0 is NaN */
- + cp.w r9, r8
- + rethi 0 /* Op1 is Nan */
- +
- + eor r8, r11, r12
- + bld r12, 31
- +#ifdef L_avr32_f32_cmp_ge
- + srcc r8 /* Set result to true if op0 is positive*/
- +#endif
- +#ifdef L_avr32_f32_cmp_lt
- + srcs r8 /* Set result to true if op0 is negative*/
- +#endif
- + retmi r8 /* Return if signs are different */
- + brcs 0f /* Both signs negative? */
- +
- + /* Both signs positive */
- + cp.w r12, r11
- +#ifdef L_avr32_f32_cmp_ge
- + reths 1
- + retlo 0
- +#endif
- +#ifdef L_avr32_f32_cmp_lt
- + reths 0
- + retlo 1
- +#endif
- +0:
- + /* Both signs negative */
- + cp.w r11, r12
- +#ifdef L_avr32_f32_cmp_ge
- + reths 1
- + retlo 0
- +#endif
- +#ifdef L_avr32_f32_cmp_lt
- + reths 0
- + retlo 1
- +#endif
- +#endif
- +
- +
- +#ifdef L_avr32_f64_cmp_eq
- + .global __avr32_f64_cmp_eq
- + .type __avr32_f64_cmp_eq,@function
- +__avr32_f64_cmp_eq:
- + cp.w r10,r8
- + cpc r11,r9
- + breq 0f
- +
- + /* Args were not equal*/
- + /* Both args could be zero with different sign bits */
- + lsl r11,1 /* get rid of sign bits */
- + lsl r9,1
- + or r11,r10 /* Check if all bits are zero */
- + or r11,r9
- + or r11,r8
- + reteq 1 /* If all zeros the arguments are equal
- + so return 1 else return 0 */
- + ret 0
- +0:
- + /* check for NaN */
- + lsl r11,1
- + mov_imm r12, 0xffe00000
- + cp.w r10,0
- + cpc r11,r12 /* check if nan or inf */
- + retls 1 /* If Arg is NaN return 0 else 1*/
- + ret 0 /* Return */
- +
- +#endif
- +
- +
- +#if defined(L_avr32_f64_cmp_ge) || defined(L_avr32_f64_cmp_lt)
- +
- +#ifdef L_avr32_f64_cmp_ge
- + .global __avr32_f64_cmp_ge
- + .type __avr32_f64_cmp_ge,@function
- +__avr32_f64_cmp_ge:
- +#endif
- +#ifdef L_avr32_f64_cmp_lt
- + .global __avr32_f64_cmp_lt
- + .type __avr32_f64_cmp_lt,@function
- +__avr32_f64_cmp_lt:
- +#endif
- +
- + /* compare magnitude of op1 and op2 */
- + st.w --sp, lr
- + st.w --sp, r7
- + lsl r11,1 /* Remove sign bit of op1 */
- + srcs r12 /* Sign op1 to lsb of r12*/
- + lsl r9,1 /* Remove sign bit of op2 */
- + srcs r7
- + rol r12 /* Sign op2 to lsb of lr, sign bit op1 bit 1 of r12*/
- +
- +
- + /* Check for Nan */
- + mov_imm lr, 0xffe00000
- + cp.w r10,0
- + cpc r11,lr
- + brhi 0f /* We have NaN */
- + cp.w r8,0
- + cpc r9,lr
- + brhi 0f /* We have NaN */
- +
- + cp.w r11, 0
- + subfeq r10, 0
- + breq 3f /* op1 zero */
- + ld.w r7, sp++
- + ld.w lr, sp++
- +
- + cp.w r12,3 /* both operands negative ?*/
- + breq 1f
- +
- + cp.w r12,1 /* both operands positive? */
- + brlo 2f
- +
- + /* Different signs. If sign of op1 is negative the difference
- + between op1 and op2 will always be negative, and if op1 is
- + positive the difference will always be positive */
- +#ifdef L_avr32_f64_cmp_ge
- + reteq 1
- + retne 0
- +#endif
- +#ifdef L_avr32_f64_cmp_lt
- + reteq 0
- + retne 1
- +#endif
- +
- +2:
- + /* Both operands positive. Just compute the difference */
- + cp.w r10,r8
- + cpc r11,r9
- +#ifdef L_avr32_f64_cmp_ge
- + reths 1
- + retlo 0
- +#endif
- +#ifdef L_avr32_f64_cmp_lt
- + reths 0
- + retlo 1
- +#endif
- +
- +1:
- + /* Both operands negative. Compute the difference with operands switched */
- + cp r8,r10
- + cpc r9,r11
- +#ifdef L_avr32_f64_cmp_ge
- + reths 1
- + retlo 0
- +#endif
- +#ifdef L_avr32_f64_cmp_lt
- + reths 0
- + retlo 1
- +#endif
- +
- +0:
- + ld.w r7, sp++
- + popm pc, r12=0
- +#endif
- +
- +3:
- + cp.w r7, 1 /* Check sign bit from r9 */
- +#ifdef L_avr32_f64_cmp_ge
- + sreq r12 /* If op2 is negative then op1 >= op2. */
- +#endif
- +#ifdef L_avr32_f64_cmp_lt
- + srne r12 /* If op2 is positve then op1 <= op2. */
- +#endif
- + cp.w r9, 0
- + subfeq r8, 0
- + ld.w r7, sp++
- + ld.w lr, sp++
- +#ifdef L_avr32_f64_cmp_ge
- + reteq 1 /* Both operands are zero. Return true. */
- +#endif
- +#ifdef L_avr32_f64_cmp_lt
- + reteq 0 /* Both operands are zero. Return false. */
- +#endif
- + ret r12
- +
- +
- +#if defined(L_avr32_f64_div) || defined(L_avr32_f64_div_fast)
- + .align 2
- +
- +#if defined(L_avr32_f64_div_fast)
- + .global __avr32_f64_div_fast
- + .type __avr32_f64_div_fast,@function
- +__avr32_f64_div_fast:
- +#else
- + .global __avr32_f64_div
- + .type __avr32_f64_div,@function
- +__avr32_f64_div:
- +#endif
- + stm --sp, r0, r1, r2, r3, r4, r5, r6, r7,lr
- + /* op1 in {r11,r10}*/
- + /* op2 in {r9,r8}*/
- + eor lr, r11, r9 /* MSB(lr) = Sign(op1) ^ Sign(op2) */
- +
- +
- + /* Unpack op1 to 2.62 format*/
- + /* exp: r7 */
- + /* sf: r11, r10 */
- + lsr r7, r11, 20 /* Extract exponent */
- +
- + lsl r11, 9 /* Extract mantissa, leave room for implicit bit */
- + or r11, r11, r10>>23
- + lsl r10, 9
- + sbr r11, 29 /* Insert implicit bit */
- + andh r11, 0x3fff /*Mask last part of exponent since we use 2.62 format*/
- +
- + cbr r7, 11 /* Clear sign bit */
- + /* Check if normalization is needed */
- + breq 11f /*If number is subnormal, normalize it */
- +22:
- + cp r7, 0x7ff
- + brge 2f /* Check op1 for NaN or Inf */
- +
- + /* Unpack op2 to 2.62 format*/
- + /* exp: r6 */
- + /* sf: r9, r8 */
- + lsr r6, r9, 20 /* Extract exponent */
- +
- + lsl r9, 9 /* Extract mantissa, leave room for implicit bit */
- + or r9, r9, r8>>23
- + lsl r8, 9
- + sbr r9, 29 /* Insert implicit bit */
- + andh r9, 0x3fff /*Mask last part of exponent since we use 2.62 format*/
- +
- + cbr r6, 11 /* Clear sign bit */
- + /* Check if normalization is needed */
- + breq 13f /*If number is subnormal, normalize it */
- +23:
- + cp r6, 0x7ff
- + brge 3f /* Check op2 for NaN or Inf */
- +
- + /* Calculate new exponent */
- + sub r7, r6
- + sub r7,-1023
- +
- + /* Divide */
- + /* Approximating 1/d with the following recurrence: */
- + /* R[j+1] = R[j]*(2-R[j]*d) */
- + /* Using 2.62 format */
- + /* TWO: r12 */
- + /* d = op2 = divisor (2.62 format): r9,r8 */
- + /* Multiply result : r5, r4 */
- + /* Initial guess : r3, r2 */
- + /* New approximations : r3, r2 */
- + /* op1 = Dividend (2.62 format) : r11, r10 */
- +
- + mov_imm r12, 0x80000000
- +
- + /* Load initial guess, using look-up table */
- + /* Initial guess is of format 01.XY, where XY is constructed as follows: */
- + /* Let d be of following format: 00.1xy....., then XY=~xy */
- + /* For d=00.100 = 0,5 -> initial guess=01.11 = 1,75 */
- + /* For d=00.101 = 0,625 -> initial guess=01.11 = 1,5 */
- + /* For d=00.110 = 0,75 -> initial guess=01.11 = 1,25 */
- + /* For d=00.111 = 0,875 -> initial guess=01.11 = 1,0 */
- + /* r2 is also part of the reg pair forming initial guess, but it*/
- + /* is kept uninitialized to save one cycle since it has so low significance*/
- +
- + lsr r3, r12, 1
- + bfextu r4, r9, 27, 2
- + com r4
- + bfins r3, r4, 28, 2
- +
- + /* First approximation */
- + /* Approximating to 32 bits */
- + /* r5 = R[j]*d */
- + mulu.d r4, r3, r9
- + /* r5 = 2-R[j]*d */
- + sub r5, r12, r5<<2
- + /* r3 = R[j]*(2-R[j]*d) */
- + mulu.d r4, r3, r5
- + lsl r3, r5, 2
- +
- + /* Second approximation */
- + /* Approximating to 32 bits */
- + /* r5 = R[j]*d */
- + mulu.d r4, r3, r9
- + /* r5 = 2-R[j]*d */
- + sub r5, r12, r5<<2
- + /* r3 = R[j]*(2-R[j]*d) */
- + mulu.d r4, r3, r5
- + lsl r3, r5, 2
- +
- + /* Third approximation */
- + /* Approximating to 32 bits */
- + /* r5 = R[j]*d */
- + mulu.d r4, r3, r9
- + /* r5 = 2-R[j]*d */
- + sub r5, r12, r5<<2
- + /* r3 = R[j]*(2-R[j]*d) */
- + mulu.d r4, r3, r5
- + lsl r3, r5, 2
- +
- + /* Fourth approximation */
- + /* Approximating to 64 bits */
- + /* r5,r4 = R[j]*d */
- + mul_approx_df r3 /*ah*/, r2 /*al*/, r9 /*bh*/, r8 /*bl*/, r5 /*rh*/, r4 /*rl*/, r1 /*sh*/, r0 /*sl*/
- + lsl r5, 2
- + or r5, r5, r4>>30
- + lsl r4, 2
- + /* r5,r4 = 2-R[j]*d */
- + neg r4
- + sbc r5, r12, r5
- + /* r3,r2 = R[j]*(2-R[j]*d) */
- + mul_approx_df r3 /*ah*/, r2 /*al*/, r5 /*bh*/, r4 /*bl*/, r5 /*rh*/, r4 /*rl*/, r1 /*sh*/, r0 /*sl*/
- + lsl r3, r5, 2
- + or r3, r3, r4>>30
- + lsl r2, r4, 2
- +
- +
- + /* Fifth approximation */
- + /* Approximating to 64 bits */
- + /* r5,r4 = R[j]*d */
- + mul_approx_df r3 /*ah*/, r2 /*al*/, r9 /*bh*/, r8 /*bl*/, r5 /*rh*/, r4 /*rl*/, r1 /*sh*/, r0 /*sl*/
- + lsl r5, 2
- + or r5, r5, r4>>30
- + lsl r4, 2
- + /* r5,r4 = 2-R[j]*d */
- + neg r4
- + sbc r5, r12, r5
- + /* r3,r2 = R[j]*(2-R[j]*d) */
- + mul_approx_df r3 /*ah*/, r2 /*al*/, r5 /*bh*/, r4 /*bl*/, r5 /*rh*/, r4 /*rl*/, r1 /*sh*/, r0 /*sl*/
- + lsl r3, r5, 2
- + or r3, r3, r4>>30
- + lsl r2, r4, 2
- +
- +
- + /* Multiply with dividend to get quotient */
- + mul_approx_df r3 /*ah*/, r2 /*al*/, r11 /*bh*/, r10 /*bl*/, r3 /*rh*/, r2 /*rl*/, r1 /*sh*/, r0 /*sl*/
- +
- +
- + /* To increase speed, this result is not corrected before final rounding.*/
- + /* This may give a difference to IEEE compliant code of 1 ULP.*/
- +
- +
- + /* Adjust exponent and mantissa */
- + /* r7:exp, [r3, r2]:mant, [r5, r4]:scratch*/
- + /* Mantissa may be of the format 0.xxxx or 1.xxxx. */
- + /* In the first case, shift one pos to left.*/
- + bld r3, 31-3
- + breq 0f
- + lsl r2, 1
- + rol r3
- + sub r7, 1
- +#if defined(L_avr32_f64_div)
- + /* We must scale down the dividend to 5.59 format. */
- + lsr r10, 3
- + or r10, r10, r11 << 29
- + lsr r11, 3
- + rjmp 1f
- +#endif
- +0:
- +#if defined(L_avr32_f64_div)
- + /* We must scale down the dividend to 6.58 format. */
- + lsr r10, 4
- + or r10, r10, r11 << 28
- + lsr r11, 4
- +1:
- +#endif
- + cp r7, 0
- + brle __avr32_f64_div_res_subnormal /* Result was subnormal. */
- +
- +
- +#if defined(L_avr32_f64_div)
- + /* In order to round correctly we calculate the remainder:
- + Remainder = dividend[11:r10] - divisor[r9:r8]*quotient[r3:r2]
- + for the case when the quotient is halfway between the round-up
- + value and the round down value. If the remainder then is negative
- + it means that the quotient was to big and that it should not be
- + rounded up, if the remainder is positive the quotient was to small
- + and we need to round up. If the remainder is zero it means that the
- + quotient is exact but since we need to remove the guard bit we should
- + round to even. */
- +
- + /* Truncate and add guard bit. */
- + andl r2, 0xff00
- + orl r2, 0x0080
- +
- +
- + /* Now do the multiplication. The quotient has the format 4.60
- + while the divisor has the format 2.62 which gives a result
- + of 6.58 */
- + mulu.d r0, r3, r8
- + macu.d r0, r2, r9
- + mulu.d r4, r2, r8
- + mulu.d r8, r3, r9
- + add r5, r0
- + adc r8, r8, r1
- + acr r9
- +
- +
- + /* Check if remainder is positive, negative or equal. */
- + bfextu r12, r2, 8, 1 /* Get parity bit into bit 0 of r0 */
- + cp r4, 0
- + cpc r5
- +__avr32_f64_div_round_subnormal:
- + cpc r8, r10
- + cpc r9, r11
- + srlo r6 /* Remainder positive: we need to round up.*/
- + moveq r6, r12 /* Remainder zero: round up if mantissa odd. */
- +#else
- + bfextu r6, r2, 7, 1 /* Get guard bit */
- +#endif
- + /* Final packing, scale down mantissa. */
- + lsr r10, r2, 8
- + or r10, r10, r3<<24
- + lsr r11, r3, 8
- + /* Insert exponent and sign bit*/
- + bfins r11, r7, 20, 11
- + bld lr, 31
- + bst r11, 31
- +
- + /* Final rounding */
- + add r10, r6
- + acr r11
- +
- + /* Return result in [r11,r10] */
- + ldm sp++, r0, r1, r2, r3, r4, r5, r6, r7,pc
- +
- +
- +2:
- + /* Op1 is NaN or inf */
- + andh r11, 0x000f /* Extract mantissa */
- + or r11, r10
- + brne 16f /* Return NaN if op1 is NaN */
- + /* Op1 is inf check op2 */
- + lsr r6, r9, 20 /* Extract exponent */
- + cbr r6, 11 /* Clear sign bit */
- + cp r6, 0x7ff
- + brne 17f /* Inf/number gives inf, return inf */
- + rjmp 16f /* The rest gives NaN*/
- +
- +3:
- + /* Op1 is a valid number. Op 2 is NaN or inf */
- + andh r9, 0x000f /* Extract mantissa */
- + or r9, r8
- + brne 16f /* Return NaN if op2 is NaN */
- + rjmp 15f /* Op2 was inf, return zero*/
- +
- +11: /* Op1 was denormal. Fix it. */
- + lsl r11, 3
- + or r11, r11, r10 >> 29
- + lsl r10, 3
- + /* Check if op1 is zero. */
- + or r4, r10, r11
- + breq __avr32_f64_div_op1_zero
- + normalize_df r7 /*exp*/, r10, r11 /*Mantissa*/, r4, r5 /*scratch*/
- + lsr r10, 2
- + or r10, r10, r11 << 30
- + lsr r11, 2
- + rjmp 22b
- +
- +
- +13: /* Op2 was denormal. Fix it */
- + lsl r9, 3
- + or r9, r9, r8 >> 29
- + lsl r8, 3
- + /* Check if op2 is zero. */
- + or r4, r9, r8
- + breq 17f /* Divisor is zero -> return Inf */
- + normalize_df r6 /*exp*/, r8, r9 /*Mantissa*/, r4, r5 /*scratch*/
- + lsr r8, 2
- + or r8, r8, r9 << 30
- + lsr r9, 2
- + rjmp 23b
- +
- +
- +__avr32_f64_div_res_subnormal:/* Divide result was subnormal. */
- +#if defined(L_avr32_f64_div)
- + /* Check how much we must scale down the mantissa. */
- + neg r7
- + sub r7, -1 /* We do no longer have an implicit bit. */
- + satu r7 >> 0, 6 /* Saturate shift amount to max 63. */
- + cp.w r7, 32
- + brge 0f
- + /* Shift amount <32 */
- + /* Scale down quotient */
- + rsub r6, r7, 32
- + lsr r2, r2, r7
- + lsl r12, r3, r6
- + or r2, r12
- + lsr r3, r3, r7
- + /* Scale down the dividend to match the scaling of the quotient. */
- + lsl r1, r10, r6
- + lsr r10, r10, r7
- + lsl r12, r11, r6
- + or r10, r12
- + lsr r11, r11, r7
- + mov r0, 0
- + rjmp 1f
- +0:
- + /* Shift amount >=32 */
- + rsub r6, r7, 32
- + moveq r0, 0
- + moveq r12, 0
- + breq 0f
- + lsl r0, r10, r6
- + lsl r12, r11, r6
- +0:
- + lsr r2, r3, r7
- + mov r3, 0
- + /* Scale down the dividend to match the scaling of the quotient. */
- + lsr r1, r10, r7
- + or r1, r12
- + lsr r10, r11, r7
- + mov r11, 0
- +1:
- + /* Start performing the same rounding as done for normal numbers
- + but this time we have scaled the quotient and dividend and hence
- + need a little different comparison. */
- + /* Truncate and add guard bit. */
- + andl r2, 0xff00
- + orl r2, 0x0080
- +
- + /* Now do the multiplication. */
- + mulu.d r6, r3, r8
- + macu.d r6, r2, r9
- + mulu.d r4, r2, r8
- + mulu.d r8, r3, r9
- + add r5, r6
- + adc r8, r8, r7
- + acr r9
- +
- + /* Set exponent to 0 */
- + mov r7, 0
- +
- + /* Check if remainder is positive, negative or equal. */
- + bfextu r12, r2, 8, 1 /* Get parity bit into bit 0 of r0 */
- + cp r4, r0
- + cpc r5, r1
- + /* Now the rest of the rounding is the same as for normals. */
- + rjmp __avr32_f64_div_round_subnormal
- +
- +#endif
- +15:
- + /* Flush to zero for the fast version. */
- + mov r11, lr /*Get correct sign*/
- + andh r11, 0x8000, COH
- + mov r10, 0
- + ldm sp++, r0, r1, r2, r3, r4, r5, r6, r7,pc
- +
- +16: /* Return NaN. */
- + mov r11, -1
- + mov r10, 0
- + ldm sp++, r0, r1, r2, r3, r4, r5, r6, r7,pc
- +
- +17:
- + /* Check if op1 is zero. */
- + or r4, r10, r11
- + breq __avr32_f64_div_op1_zero
- + /* Return INF. */
- + mov r11, lr /*Get correct sign*/
- + andh r11, 0x8000, COH
- + orh r11, 0x7ff0
- + mov r10, 0
- + ldm sp++, r0, r1, r2, r3, r4, r5, r6, r7,pc
- +
- +__avr32_f64_div_op1_zero:
- + or r5, r8, r9 << 1
- + breq 16b /* 0.0/0.0 -> NaN */
- + bfextu r4, r9, 20, 11
- + cp r4, 0x7ff
- + brne 15b /* Return zero */
- + /* Check if divisor is Inf or NaN */
- + or r5, r8, r9 << 12
- + breq 15b /* Divisor is inf -> return zero */
- + rjmp 16b /* Return NaN */
- +
- +
- +
- +
- +#endif
- +
- +#if defined(L_avr32_f32_addsub) || defined(L_avr32_f32_addsub_fast)
- +
- + .align 2
- +__avr32_f32_sub_from_add:
- + /* Switch sign on op2 */
- + eorh r11, 0x8000
- +
- +#if defined(L_avr32_f32_addsub_fast)
- + .global __avr32_f32_sub_fast
- + .type __avr32_f32_sub_fast,@function
- +__avr32_f32_sub_fast:
- +#else
- + .global __avr32_f32_sub
- + .type __avr32_f32_sub,@function
- +__avr32_f32_sub:
- +#endif
- +
- + /* Check signs */
- + eor r8, r11, r12
- + /* Different signs, use subtraction. */
- + brmi __avr32_f32_add_from_sub
- +
- + /* Get sign of op1 */
- + mov r8, r12
- + andh r12, 0x8000, COH
- +
- + /* Remove sign from operands */
- + cbr r11, 31
- +#if defined(L_avr32_f32_addsub_fast)
- + reteq r8 /* If op2 is zero return op1 */
- +#endif
- + cbr r8, 31
- +
- + /* Put the number with the largest exponent in r10
- + and the number with the smallest exponent in r9 */
- + max r10, r8, r11
- + min r9, r8, r11
- + cp r10, r8 /*If largest operand (in R10) is not equal to op1*/
- + subne r12, 1 /* Subtract 1 from sign, which will invert MSB of r12*/
- + andh r12, 0x8000, COH /*Mask all but MSB*/
- +
- + /* Unpack exponent and mantissa of op1 */
- + lsl r8, r10, 8
- + sbr r8, 31 /* Set implicit bit. */
- + lsr r10, 23
- +
- + /* op1 is NaN or Inf. */
- + cp.w r10, 0xff
- + breq __avr32_f32_sub_op1_nan_or_inf
- +
- + /* Unpack exponent and mantissa of op2 */
- + lsl r11, r9, 8
- + sbr r11, 31 /* Set implicit bit. */
- + lsr r9, 23
- +
- +#if defined(L_avr32_f32_addsub)
- + /* Keep sticky bit for correct IEEE rounding */
- + st.w --sp, r12
- +
- + /* op2 is either zero or subnormal. */
- + breq __avr32_f32_sub_op2_subnormal
- +0:
- + /* Get shift amount to scale mantissa of op2. */
- + sub r12, r10, r9
- +
- + breq __avr32_f32_sub_shift_done
- +
- + /* Saturate the shift amount to 31. If the amount
- + is any larger op2 is insignificant. */
- + satu r12 >> 0, 5
- +
- + /* Put the remaining bits into r9.*/
- + rsub r9, r12, 32
- + lsl r9, r11, r9
- +
- + /* If the remaining bits are non-zero then we must subtract one
- + more from opL. */
- + subne r8, 1
- + srne r9 /* LSB of r9 represents sticky bits. */
- +
- + /* Shift mantissa of op2 to same decimal point as the mantissa
- + of op1. */
- + lsr r11, r11, r12
- +
- +
- +__avr32_f32_sub_shift_done:
- + /* Now subtract the mantissas. */
- + sub r8, r11
- +
- + ld.w r12, sp++
- +
- + /* Normalize resulting mantissa. */
- + clz r11, r8
- +
- + retcs 0
- + lsl r8, r8, r11
- + sub r10, r11
- + brle __avr32_f32_sub_subnormal_result
- +
- + /* Insert the bits we will remove from the mantissa into r9[31:24] */
- + or r9, r9, r8 << 24
- +#else
- + /* Ignore sticky bit to simplify and speed up rounding */
- + /* op2 is either zero or subnormal. */
- + breq __avr32_f32_sub_op2_subnormal
- +0:
- + /* Get shift amount to scale mantissa of op2. */
- + rsub r9, r10
- +
- + /* Saturate the shift amount to 31. If the amount
- + is any larger op2 is insignificant. */
- + satu r9 >> 0, 5
- +
- + /* Shift mantissa of op2 to same decimal point as the mantissa
- + of op1. */
- + lsr r11, r11, r9
- +
- + /* Now subtract the mantissas. */
- + sub r8, r11
- +
- + /* Normalize resulting mantissa. */
- + clz r9, r8
- + retcs 0
- + lsl r8, r8, r9
- + sub r10, r9
- + brle __avr32_f32_sub_subnormal_result
- +#endif
- +
- + /* Pack result. */
- + or r12, r12, r8 >> 8
- + bfins r12, r10, 23, 8
- +
- + /* Round */
- +__avr32_f32_sub_round:
- +#if defined(L_avr32_f32_addsub)
- + mov_imm r10, 0x80000000
- + bld r12, 0
- + subne r10, -1
- + cp.w r9, r10
- + subhs r12, -1
- +#else
- + bld r8, 7
- + acr r12
- +#endif
- +
- + ret r12
- +
- +
- +__avr32_f32_sub_op2_subnormal:
- + /* Fix implicit bit and adjust exponent of subnormals. */
- + cbr r11, 31
- + /* Set exponent to 1 if we do not have a zero. */
- + movne r9,1
- +
- + /* Check if op1 is also subnormal. */
- + cp.w r10, 0
- + brne 0b
- +
- + cbr r8, 31
- + /* If op1 is not zero set exponent to 1. */
- + movne r10,1
- +
- + rjmp 0b
- +
- +__avr32_f32_sub_op1_nan_or_inf:
- + /* Check if op1 is NaN, if so return NaN */
- + lsl r11, r8, 1
- + retne -1
- +
- + /* op1 is Inf. */
- + bfins r12, r10, 23, 8 /* Generate Inf in r12 */
- +
- + /* Check if op2 is Inf. or NaN */
- + lsr r11, r9, 23
- + cp.w r11, 0xff
- + retne r12 /* op2 not Inf or NaN, return op1 */
- +
- + ret -1 /* op2 Inf or NaN, return NaN */
- +
- +__avr32_f32_sub_subnormal_result:
- + /* Check if the number is so small that
- + it will be represented with zero. */
- + rsub r10, r10, 9
- + rsub r11, r10, 32
- + retcs 0
- +
- + /* Shift the mantissa into the correct position.*/
- + lsr r10, r8, r10
- + /* Add sign bit. */
- + or r12, r10
- +
- + /* Put the shifted out bits in the most significant part
- + of r8. */
- + lsl r8, r8, r11
- +
- +#if defined(L_avr32_f32_addsub)
- + /* Add all the remainder bits used for rounding into r9 */
- + or r9, r8
- +#else
- + lsr r8, 24
- +#endif
- + rjmp __avr32_f32_sub_round
- +
- +
- + .align 2
- +
- +__avr32_f32_add_from_sub:
- + /* Switch sign on op2 */
- + eorh r11, 0x8000
- +
- +#if defined(L_avr32_f32_addsub_fast)
- + .global __avr32_f32_add_fast
- + .type __avr32_f32_add_fast,@function
- +__avr32_f32_add_fast:
- +#else
- + .global __avr32_f32_add
- + .type __avr32_f32_add,@function
- +__avr32_f32_add:
- +#endif
- +
- + /* Check signs */
- + eor r8, r11, r12
- + /* Different signs, use subtraction. */
- + brmi __avr32_f32_sub_from_add
- +
- + /* Get sign of op1 */
- + mov r8, r12
- + andh r12, 0x8000, COH
- +
- + /* Remove sign from operands */
- + cbr r11, 31
- +#if defined(L_avr32_f32_addsub_fast)
- + reteq r8 /* If op2 is zero return op1 */
- +#endif
- + cbr r8, 31
- +
- + /* Put the number with the largest exponent in r10
- + and the number with the smallest exponent in r9 */
- + max r10, r8, r11
- + min r9, r8, r11
- +
- + /* Unpack exponent and mantissa of op1 */
- + lsl r8, r10, 8
- + sbr r8, 31 /* Set implicit bit. */
- + lsr r10, 23
- +
- + /* op1 is NaN or Inf. */
- + cp.w r10, 0xff
- + breq __avr32_f32_add_op1_nan_or_inf
- +
- + /* Unpack exponent and mantissa of op2 */
- + lsl r11, r9, 8
- + sbr r11, 31 /* Set implicit bit. */
- + lsr r9, 23
- +
- +#if defined(L_avr32_f32_addsub)
- + /* op2 is either zero or subnormal. */
- + breq __avr32_f32_add_op2_subnormal
- +0:
- + /* Keep sticky bit for correct IEEE rounding */
- + st.w --sp, r12
- +
- + /* Get shift amount to scale mantissa of op2. */
- + rsub r9, r10
- +
- + /* Saturate the shift amount to 31. If the amount
- + is any larger op2 is insignificant. */
- + satu r9 >> 0, 5
- +
- + /* Shift mantissa of op2 to same decimal point as the mantissa
- + of op1. */
- + lsr r12, r11, r9
- +
- + /* Put the remainding bits into r11[23:..].*/
- + rsub r9, r9, (32-8)
- + lsl r11, r11, r9
- + /* Insert the bits we will remove from the mantissa into r11[31:24] */
- + bfins r11, r12, 24, 8
- +
- + /* Now add the mantissas. */
- + add r8, r12
- +
- + ld.w r12, sp++
- +#else
- + /* Ignore sticky bit to simplify and speed up rounding */
- + /* op2 is either zero or subnormal. */
- + breq __avr32_f32_add_op2_subnormal
- +0:
- + /* Get shift amount to scale mantissa of op2. */
- + rsub r9, r10
- +
- + /* Saturate the shift amount to 31. If the amount
- + is any larger op2 is insignificant. */
- + satu r9 >> 0, 5
- +
- + /* Shift mantissa of op2 to same decimal point as the mantissa
- + of op1. */
- + lsr r11, r11, r9
- +
- + /* Now add the mantissas. */
- + add r8, r11
- +
- +#endif
- + /* Check if we overflowed. */
- + brcs __avr32_f32_add_res_of
- +1:
- + /* Pack result. */
- + or r12, r12, r8 >> 8
- + bfins r12, r10, 23, 8
- +
- + /* Round */
- +#if defined(L_avr32_f32_addsub)
- + mov_imm r10, 0x80000000
- + bld r12, 0
- + subne r10, -1
- + cp.w r11, r10
- + subhs r12, -1
- +#else
- + bld r8, 7
- + acr r12
- +#endif
- +
- + ret r12
- +
- +__avr32_f32_add_op2_subnormal:
- + /* Fix implicit bit and adjust exponent of subnormals. */
- + cbr r11, 31
- + /* Set exponent to 1 if we do not have a zero. */
- + movne r9,1
- +
- + /* Check if op1 is also subnormal. */
- + cp.w r10, 0
- + brne 0b
- + /* Both operands subnormal, just add the mantissas and
- + pack. If the addition of the subnormal numbers results
- + in a normal number then the exponent will automatically
- + be set to 1 by the addition. */
- + cbr r8, 31
- + add r11, r8
- + or r12, r12, r11 >> 8
- + ret r12
- +
- +__avr32_f32_add_op1_nan_or_inf:
- + /* Check if op1 is NaN, if so return NaN */
- + lsl r11, r8, 1
- + retne -1
- +
- + /* op1 is Inf. */
- + bfins r12, r10, 23, 8 /* Generate Inf in r12 */
- +
- + /* Check if op2 is Inf. or NaN */
- + lsr r11, r9, 23
- + cp.w r11, 0xff
- + retne r12 /* op2 not Inf or NaN, return op1 */
- +
- + lsl r9, 9
- + reteq r12 /* op2 Inf return op1 */
- + ret -1 /* op2 is NaN, return NaN */
- +
- +__avr32_f32_add_res_of:
- + /* We overflowed. Increase exponent and shift mantissa.*/
- + lsr r8, 1
- + sub r10, -1
- +
- + /* Clear mantissa to set result to Inf if the exponent is 255. */
- + cp.w r10, 255
- + moveq r8, 0
- + moveq r11, 0
- + rjmp 1b
- +
- +
- +#endif
- +
- +
- +#if defined(L_avr32_f32_div) || defined(L_avr32_f32_div_fast)
- + .align 2
- +
- +#if defined(L_avr32_f32_div_fast)
- + .global __avr32_f32_div_fast
- + .type __avr32_f32_div_fast,@function
- +__avr32_f32_div_fast:
- +#else
- + .global __avr32_f32_div
- + .type __avr32_f32_div,@function
- +__avr32_f32_div:
- +#endif
- +
- + eor r8, r11, r12 /* MSB(r8) = Sign(op1) ^ Sign(op2) */
- +
- + /* Unpack */
- + lsl r12,1
- + lsl r11,1
- + breq 4f /* Check op2 for zero */
- +
- + tst r12, r12
- + moveq r9, 0
- + breq 12f
- +
- + /* Unpack op1*/
- + /* exp: r9 */
- + /* sf: r12 */
- + lsr r9, r12, 24
- + breq 11f /*If number is subnormal*/
- + cp r9, 0xff
- + brhs 2f /* Check op1 for NaN or Inf */
- + lsl r12, 7
- + sbr r12, 31 /*Implicit bit*/
- +12:
- +
- + /* Unpack op2*/
- + /* exp: r10 */
- + /* sf: r11 */
- + lsr r10, r11, 24
- + breq 13f /*If number is subnormal*/
- + cp r10, 0xff
- + brhs 3f /* Check op2 for NaN or Inf */
- + lsl r11,7
- + sbr r11, 31 /*Implicit bit*/
- +
- + cp.w r9, 0
- + subfeq r12, 0
- + reteq 0 /* op1 is zero and op2 is not zero */
- + /* or NaN so return zero */
- +
- +14:
- +
- + /* For UC3, store with predecrement is faster than stm */
- + st.w --sp, r5
- + st.d --sp, r6
- +
- + /* Calculate new exponent */
- + sub r9, r10
- + sub r9,-127
- +
- + /* Divide */
- + /* Approximating 1/d with the following recurrence: */
- + /* R[j+1] = R[j]*(2-R[j]*d) */
- + /* Using 2.30 format */
- + /* TWO: r10 */
- + /* d: r5 */
- + /* Multiply result : r6, r7 */
- + /* Initial guess : r11 */
- + /* New approximations : r11 */
- + /* Dividend : r12 */
- +
- + /* Load TWO */
- + mov_imm r10, 0x80000000
- +
- + lsr r12, 2 /* Get significand of Op1 in 2.30 format */
- + lsr r5, r11, 2 /* Get significand of Op2 (=d) in 2.30 format */
- +
- + /* Load initial guess, using look-up table */
- + /* Initial guess is of format 01.XY, where XY is constructed as follows: */
- + /* Let d be of following format: 00.1xy....., then XY=~xy */
- + /* For d=00.100 = 0,5 -> initial guess=01.11 = 1,75 */
- + /* For d=00.101 = 0,625 -> initial guess=01.11 = 1,5 */
- + /* For d=00.110 = 0,75 -> initial guess=01.11 = 1,25 */
- + /* For d=00.111 = 0,875 -> initial guess=01.11 = 1,0 */
- +
- + lsr r11, r10, 1
- + bfextu r6, r5, 27, 2
- + com r6
- + bfins r11, r6, 28, 2
- +
- + /* First approximation */
- + /* r7 = R[j]*d */
- + mulu.d r6, r11, r5
- + /* r7 = 2-R[j]*d */
- + sub r7, r10, r7<<2
- + /* r11 = R[j]*(2-R[j]*d) */
- + mulu.d r6, r11, r7
- + lsl r11, r7, 2
- +
- + /* Second approximation */
- + /* r7 = R[j]*d */
- + mulu.d r6, r11, r5
- + /* r7 = 2-R[j]*d */
- + sub r7, r10, r7<<2
- + /* r11 = R[j]*(2-R[j]*d) */
- + mulu.d r6, r11, r7
- + lsl r11, r7, 2
- +
- + /* Third approximation */
- + /* r7 = R[j]*d */
- + mulu.d r6, r11, r5
- + /* r7 = 2-R[j]*d */
- + sub r7, r10, r7<<2
- + /* r11 = R[j]*(2-R[j]*d) */
- + mulu.d r6, r11, r7
- + lsl r11, r7, 2
- +
- + /* Fourth approximation */
- + /* r7 = R[j]*d */
- + mulu.d r6, r11, r5
- + /* r7 = 2-R[j]*d */
- + sub r7, r10, r7<<2
- + /* r11 = R[j]*(2-R[j]*d) */
- + mulu.d r6, r11, r7
- + lsl r11, r7, 2
- +
- +
- + /* Multiply with dividend to get quotient, r7 = sf(op1)/sf(op2) */
- + mulu.d r6, r11, r12
- +
- + /* Shift by 3 to get result in 1.31 format, as required by the exponent. */
- + /* Note that 1.31 format is already used by the exponent in r9, since */
- + /* a bias of 127 was added to the result exponent, even though the implicit */
- + /* bit was inserted. This gives the exponent an additional bias of 1, which */
- + /* supports 1.31 format. */
- + //lsl r10, r7, 3
- +
- + /* Adjust exponent and mantissa in case the result is of format
- + 0000.1xxx to 0001.xxx*/
- +#if defined(L_avr32_f32_div)
- + lsr r12, 4 /* Scale dividend to 6.26 format to match the
- + result of the multiplication of the divisor and
- + quotient to get the remainder. */
- +#endif
- + bld r7, 31-3
- + breq 0f
- + lsl r7, 1
- + sub r9, 1
- +#if defined(L_avr32_f32_div)
- + lsl r12, 1 /* Scale dividend to 5.27 format to match the
- + result of the multiplication of the divisor and
- + quotient to get the remainder. */
- +#endif
- +0:
- + cp r9, 0
- + brle __avr32_f32_div_res_subnormal /* Result was subnormal. */
- +
- +
- +#if defined(L_avr32_f32_div)
- + /* In order to round correctly we calculate the remainder:
- + Remainder = dividend[r12] - divisor[r5]*quotient[r7]
- + for the case when the quotient is halfway between the round-up
- + value and the round down value. If the remainder then is negative
- + it means that the quotient was to big and that it should not be
- + rounded up, if the remainder is positive the quotient was to small
- + and we need to round up. If the remainder is zero it means that the
- + quotient is exact but since we need to remove the guard bit we should
- + round to even. */
- + andl r7, 0xffe0
- + orl r7, 0x0010
- +
- + /* Now do the multiplication. The quotient has the format 4.28
- + while the divisor has the format 2.30 which gives a result
- + of 6.26 */
- + mulu.d r10, r5, r7
- +
- + /* Check if remainder is positive, negative or equal. */
- + bfextu r5, r7, 5, 1 /* Get parity bit into bit 0 of r5 */
- + cp r10, 0
- +__avr32_f32_div_round_subnormal:
- + cpc r11, r12
- + srlo r11 /* Remainder positive: we need to round up.*/
- + moveq r11, r5 /* Remainder zero: round up if mantissa odd. */
- +#else
- + bfextu r11, r7, 4, 1 /* Get guard bit */
- +#endif
- +
- + /* Pack final result*/
- + lsr r12, r7, 5
- + bfins r12, r9, 23, 8
- + /* For UC3, load with postincrement is faster than ldm */
- + ld.d r6, sp++
- + ld.w r5, sp++
- + bld r8, 31
- + bst r12, 31
- + /* Rounding add. */
- + add r12, r11
- + ret r12
- +
- +__divsf_return_op1:
- + lsl r8, 1
- + ror r12
- + ret r12
- +
- +
- +2:
- + /* Op1 is NaN or inf */
- + retne -1 /* Return NaN if op1 is NaN */
- + /* Op1 is inf check op2 */
- + mov_imm r9, 0xff000000
- + cp r11, r9
- + brlo __divsf_return_op1 /* inf/number gives inf */
- + ret -1 /* The rest gives NaN*/
- +3:
- + /* Op2 is NaN or inf */
- + reteq 0 /* Return zero if number/inf*/
- + ret -1 /* Return NaN*/
- +4:
- + /* Op1 is zero ? */
- + tst r12,r12
- + reteq -1 /* 0.0/0.0 is NaN */
- + /* Op1 is Nan? */
- + lsr r9, r12, 24
- + breq 11f /*If number is subnormal*/
- + cp r9, 0xff
- + brhs 2b /* Check op1 for NaN or Inf */
- + /* Nonzero/0.0 is Inf. Sign bit will be shifted in before returning*/
- + mov_imm r12, 0xff000000
- + rjmp __divsf_return_op1
- +
- +11: /* Op1 was denormal. Fix it. */
- + lsl r12,7
- + clz r9,r12
- + lsl r12,r12,r9
- + rsub r9,r9,1
- + rjmp 12b
- +
- +13: /* Op2 was denormal. Fix it. */
- + lsl r11,7
- + clz r10,r11
- + lsl r11,r11,r10
- + rsub r10,r10,1
- + rjmp 14b
- +
- +
- +__avr32_f32_div_res_subnormal: /* Divide result was subnormal */
- +#if defined(L_avr32_f32_div)
- + /* Check how much we must scale down the mantissa. */
- + neg r9
- + sub r9, -1 /* We do no longer have an implicit bit. */
- + satu r9 >> 0, 5 /* Saturate shift amount to max 32. */
- + /* Scale down quotient */
- + rsub r10, r9, 32
- + lsr r7, r7, r9
- + /* Scale down the dividend to match the scaling of the quotient. */
- + lsl r6, r12, r10 /* Make the divident 64-bit and put the lsw in r6 */
- + lsr r12, r12, r9
- +
- + /* Start performing the same rounding as done for normal numbers
- + but this time we have scaled the quotient and dividend and hence
- + need a little different comparison. */
- + andl r7, 0xffe0
- + orl r7, 0x0010
- +
- + /* Now do the multiplication. The quotient has the format 4.28
- + while the divisor has the format 2.30 which gives a result
- + of 6.26 */
- + mulu.d r10, r5, r7
- +
- + /* Set exponent to 0 */
- + mov r9, 0
- +
- + /* Check if remainder is positive, negative or equal. */
- + bfextu r5, r7, 5, 1 /* Get parity bit into bit 0 of r5 */
- + cp r10, r6
- + rjmp __avr32_f32_div_round_subnormal
- +
- +#else
- + ld.d r6, sp++
- + ld.w r5, sp++
- + /*Flush to zero*/
- + ret 0
- +#endif
- +#endif
- +
- +#ifdef L_avr32_f32_mul
- + .global __avr32_f32_mul
- + .type __avr32_f32_mul,@function
- +
- +
- +__avr32_f32_mul:
- + mov r8, r12
- + eor r12, r11 /* MSB(r8) = Sign(op1) ^ Sign(op2) */
- + andh r12, 0x8000, COH
- +
- + /* arrange operands so that that op1 >= op2 */
- + cbr r8, 31
- + breq __avr32_f32_mul_op1_zero
- + cbr r11, 31
- +
- + /* Put the number with the largest exponent in r10
- + and the number with the smallest exponent in r9 */
- + max r10, r8, r11
- + min r9, r8, r11
- +
- + /* Unpack exponent and mantissa of op1 */
- + lsl r8, r10, 8
- + sbr r8, 31 /* Set implicit bit. */
- + lsr r10, 23
- +
- + /* op1 is NaN or Inf. */
- + cp.w r10, 0xff
- + breq __avr32_f32_mul_op1_nan_or_inf
- +
- + /* Unpack exponent and mantissa of op2 */
- + lsl r11, r9, 8
- + sbr r11, 31 /* Set implicit bit. */
- + lsr r9, 23
- +
- + /* op2 is either zero or subnormal. */
- + breq __avr32_f32_mul_op2_subnormal
- +0:
- + /* Calculate new exponent */
- + add r9,r10
- +
- + /* Do the multiplication */
- + mulu.d r10,r8,r11
- +
- + /* We might need to scale up by two if the MSB of the result is
- + zero. */
- + lsl r8, r11, 1
- + movcc r11, r8
- + subcc r9, 1
- +
- + /* Put the shifted out bits of the mantissa into r10 */
- + lsr r10, 8
- + bfins r10, r11, 24, 8
- +
- + sub r9,(127-1) /* remove extra exponent bias */
- + brle __avr32_f32_mul_res_subnormal
- +
- + /* Check for Inf. */
- + cp.w r9, 0xff
- + brge 1f
- +
- + /* Pack result. */
- + or r12, r12, r11 >> 8
- + bfins r12, r9, 23, 8
- +
- + /* Round */
- +__avr32_f32_mul_round:
- + mov_imm r8, 0x80000000
- + bld r12, 0
- + subne r8, -1
- +
- + cp.w r10, r8
- + subhs r12, -1
- +
- + ret r12
- +
- +1:
- + /* Return Inf */
- + orh r12, 0x7f80
- + ret r12
- +
- +__avr32_f32_mul_op2_subnormal:
- + cbr r11, 31
- + clz r9, r11
- + retcs 0 /* op2 is zero. Return 0 */
- + sub r9, 8
- + lsl r11, r11, r9
- + rsub r9, r9, 1
- +
- + /* Check if op2 is subnormal. */
- + tst r10, r10
- + brne 0b
- +
- + /* op2 is subnormal */
- + cbr r8, 31
- + clz r10, r11
- + retcs 0 /* op1 is zero. Return 0 */
- + lsl r8, r8, r10
- + rsub r10, r10, 1
- +
- + rjmp 0b
- +
- +
- +__avr32_f32_mul_op1_nan_or_inf:
- + /* Check if op1 is NaN, if so return NaN */
- + lsl r11, r8, 1
- + retne -1
- +
- + /* op1 is Inf. */
- + tst r9, r9
- + reteq -1 /* Inf * 0 -> NaN */
- +
- + bfins r12, r10, 23, 8 /* Generate Inf in r12 */
- +
- + /* Check if op2 is Inf. or NaN */
- + lsr r11, r9, 23
- + cp.w r11, 0xff
- + retne r12 /* op2 not Inf or NaN, return Info */
- +
- + lsl r9, 9
- + reteq r12 /* op2 Inf return Inf */
- + ret -1 /* op2 is NaN, return NaN */
- +
- +__avr32_f32_mul_res_subnormal:
- + /* Check if the number is so small that
- + it will be represented with zero. */
- + rsub r9, r9, 9
- + rsub r8, r9, 32
- + retcs 0
- +
- + /* Shift the mantissa into the correct position.*/
- + lsr r9, r11, r9
- + /* Add sign bit. */
- + or r12, r9
- + /* Put the shifted out bits in the most significant part
- + of r8. */
- + lsl r11, r11, r8
- +
- + /* Add all the remainder bits used for rounding into r11 */
- + andh r10, 0x00FF
- + or r10, r11
- + rjmp __avr32_f32_mul_round
- +
- +__avr32_f32_mul_op1_zero:
- + bfextu r10, r11, 23, 8
- + cp.w r10, 0xff
- + retne r12
- + reteq -1
- +
- +#endif
- +
- +
- +#ifdef L_avr32_s32_to_f32
- + .global __avr32_s32_to_f32
- + .type __avr32_s32_to_f32,@function
- +__avr32_s32_to_f32:
- + cp r12, 0
- + reteq r12 /* If zero then return zero float */
- + mov r11, r12 /* Keep the sign */
- + abs r12 /* Compute the absolute value */
- + mov r10, 31 + 127 /* Set the correct exponent */
- +
- + /* Normalize */
- + normalize_sf r10 /*exp*/, r12 /*mant*/, r9 /*scratch*/
- +
- + /* Check for subnormal result */
- + cp.w r10, 0
- + brle __avr32_s32_to_f32_subnormal
- +
- + round_sf r10 /*exp*/, r12 /*mant*/, r9 /*scratch*/
- + pack_sf r12 /*sf*/, r10 /*exp*/, r12 /*mant*/
- + lsl r11, 1
- + ror r12
- + ret r12
- +
- +__avr32_s32_to_f32_subnormal:
- + /* Adjust a subnormal result */
- + adjust_subnormal_sf r12/*sf*/, r10 /*exp*/, r12 /*mant*/, r11/*sign*/, r9 /*scratch*/
- + ret r12
- +
- +#endif
- +
- +#ifdef L_avr32_u32_to_f32
- + .global __avr32_u32_to_f32
- + .type __avr32_u32_to_f32,@function
- +__avr32_u32_to_f32:
- + cp r12, 0
- + reteq r12 /* If zero then return zero float */
- + mov r10, 31 + 127 /* Set the correct exponent */
- +
- + /* Normalize */
- + normalize_sf r10 /*exp*/, r12 /*mant*/, r9 /*scratch*/
- +
- + /* Check for subnormal result */
- + cp.w r10, 0
- + brle __avr32_u32_to_f32_subnormal
- +
- + round_sf r10 /*exp*/, r12 /*mant*/, r9 /*scratch*/
- + pack_sf r12 /*sf*/, r10 /*exp*/, r12 /*mant*/
- + lsr r12,1 /* Sign bit is 0 for unsigned int */
- + ret r12
- +
- +__avr32_u32_to_f32_subnormal:
- + /* Adjust a subnormal result */
- + mov r8, 0
- + adjust_subnormal_sf r12/*sf*/,r10 /*exp*/, r12 /*mant*/,r8/*sign*/, r9 /*scratch*/
- + ret r12
- +
- +
- +#endif
- +
- +
- +#ifdef L_avr32_f32_to_s32
- + .global __avr32_f32_to_s32
- + .type __avr32_f32_to_s32,@function
- +__avr32_f32_to_s32:
- + bfextu r11, r12, 23, 8
- + sub r11,127 /* Fix bias */
- + retlo 0 /* Negative exponent yields zero integer */
- +
- + /* Shift mantissa into correct position */
- + rsub r11,r11,31 /* Shift amount */
- + lsl r10,r12,8 /* Get mantissa */
- + sbr r10,31 /* Add implicit bit */
- + lsr r10,r10,r11 /* Perform shift */
- + lsl r12,1 /* Check sign */
- + retcc r10 /* if positive, we are done */
- + neg r10 /* if negative float, negate result */
- + ret r10
- +
- +#endif
- +
- +#ifdef L_avr32_f32_to_u32
- + .global __avr32_f32_to_u32
- + .type __avr32_f32_to_u32,@function
- +__avr32_f32_to_u32:
- + cp r12,0
- + retmi 0 /* Negative numbers gives 0 */
- + bfextu r11, r12, 23, 8 /* Extract exponent */
- + sub r11,127 /* Fix bias */
- + retlo 0 /* Negative exponent yields zero integer */
- +
- + /* Shift mantissa into correct position */
- + rsub r11,r11,31 /* Shift amount */
- + lsl r12,8 /* Get mantissa */
- + sbr r12,31 /* Add implicit bit */
- + lsr r12,r12,r11 /* Perform shift */
- + ret r12
- +
- +#endif
- +
- +#ifdef L_avr32_f32_to_f64
- + .global __avr32_f32_to_f64
- + .type __avr32_f32_to_f64,@function
- +
- +__avr32_f32_to_f64:
- + lsl r11,r12,1 /* Remove sign bit, keep original value in r12*/
- + moveq r10, 0
- + reteq r11 /* Return zero if input is zero */
- +
- + bfextu r9,r11,24,8 /* Get exponent */
- + cp.w r9,0xff /* check for NaN or inf */
- + breq 0f
- +
- + lsl r11,7 /* Convert sf mantissa to df format */
- + mov r10,0
- +
- + /* Check if implicit bit should be set */
- + cp.w r9, 0
- + subeq r9,-1 /* Adjust exponent if it was 0 */
- + srne r8
- + or r11, r11, r8 << 31 /* Set implicit bit if needed */
- + sub r9,(127-0x3ff) /* Convert exponent to df format exponent */
- +
- + /*We know that low register of mantissa is 0, and will be unaffected by normalization.*/
- + /*We can therefore use the faster normalize_sf function instead of normalize_df.*/
- + normalize_sf r9 /*exp*/, r11 /*mantissa*/, r8 /*scratch*/
- + pack_df r9 /*exp*/, r10, r11 /*mantissa*/, r10, r11 /*df*/
- +
- +__extendsfdf_return_op1:
- + /* Rotate in sign bit */
- + lsl r12, 1
- + ror r11
- + ret r11
- +
- +0:
- + /* Inf or NaN*/
- + mov_imm r10, 0xffe00000
- + lsl r11,8 /* check mantissa */
- + movne r11, -1 /* Return NaN */
- + moveq r11, r10 /* Return inf */
- + mov r10, 0
- + rjmp __extendsfdf_return_op1
- +#endif
- +
- +
- +#ifdef L_avr32_f64_to_f32
- + .global __avr32_f64_to_f32
- + .type __avr32_f64_to_f32,@function
- +
- +__avr32_f64_to_f32:
- + /* Unpack */
- + lsl r9,r11,1 /* Unpack exponent */
- + lsr r9,21
- +
- + reteq 0 /* If exponent is 0 the number is so small
- + that the conversion to single float gives
- + zero */
- +
- + lsl r8,r11,10 /* Adjust mantissa */
- + or r12,r8,r10>>22
- +
- + lsl r10,10 /* Check if there are any remaining bits
- + in the low part of the mantissa.*/
- + neg r10
- + rol r12 /* If there were remaining bits then set lsb
- + of mantissa to 1 */
- +
- + cp r9,0x7ff
- + breq 2f /* Check for NaN or inf */
- +
- + sub r9,(0x3ff-127) /* Adjust bias of exponent */
- + sbr r12,31 /* set the implicit bit.*/
- +
- + cp.w r9, 0 /* Check for subnormal number */
- + brle 3f
- +
- + round_sf r9 /*exp*/, r12 /*mant*/, r10 /*scratch*/
- + pack_sf r12 /*sf*/, r9 /*exp*/, r12 /*mant*/
- +__truncdfsf_return_op1:
- + /* Rotate in sign bit */
- + lsl r11, 1
- + ror r12
- + ret r12
- +
- +2:
- + /* NaN or inf */
- + cbr r12,31 /* clear implicit bit */
- + retne -1 /* Return NaN if mantissa not zero */
- + mov_imm r12, 0x7f800000
- + ret r12 /* Return inf */
- +
- +3: /* Result is subnormal. Adjust it.*/
- + adjust_subnormal_sf r12/*sf*/,r9 /*exp*/, r12 /*mant*/, r11/*sign*/, r10 /*scratch*/
- + ret r12
- +
- +
- +#endif
- +
- +#if defined(L_mulsi3) && defined(__AVR32_NO_MUL__)
- + .global __mulsi3
- + .type __mulsi3,@function
- +
- +__mulsi3:
- + mov r9, 0
- +0:
- + lsr r11, 1
- + addcs r9, r9, r12
- + breq 1f
- + lsl r12, 1
- + rjmp 0b
- +1:
- + ret r9
- +#endif
- --- /dev/null
- +++ b/gcc/config/avr32/lib2funcs.S
- @@ -0,0 +1,21 @@
- + .align 4
- + .global __nonlocal_goto
- + .type __nonlocal_goto,@function
- +
- +/* __nonlocal_goto: This function handles nonlocal_goto's in gcc.
- +
- + parameter 0 (r12) = New Frame Pointer
- + parameter 1 (r11) = Address to goto
- + parameter 2 (r10) = New Stack Pointer
- +
- + This function invalidates the return stack, since it returns from a
- + function without using a return instruction.
- +*/
- +__nonlocal_goto:
- + mov r7, r12
- + mov sp, r10
- + frs # Flush return stack
- + mov pc, r11
- +
- +
- +
- --- /dev/null
- +++ b/gcc/config/avr32/linux-elf.h
- @@ -0,0 +1,151 @@
- +/*
- + Linux/Elf specific definitions.
- + Copyright 2003-2006 Atmel Corporation.
- +
- + Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
- + and H�vard Skinnemoen, Atmel Norway, <hskinnemoen@atmel.com>
- +
- + This file is part of GCC.
- +
- + This program is free software; you can redistribute it and/or modify
- + it under the terms of the GNU General Public License as published by
- + the Free Software Foundation; either version 2 of the License, or
- + (at your option) any later version.
- +
- + This program is distributed in the hope that it will be useful,
- + but WITHOUT ANY WARRANTY; without even the implied warranty of
- + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- + GNU General Public License for more details.
- +
- + You should have received a copy of the GNU General Public License
- + along with this program; if not, write to the Free Software
- + Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
- +
- +
- +
- +/* elfos.h should have already been included. Now just override
- + any conflicting definitions and add any extras. */
- +
- +/* Run-time Target Specification. */
- +#undef TARGET_VERSION
- +#define TARGET_VERSION fputs (" (AVR32 GNU/Linux with ELF)", stderr);
- +
- +/* Do not assume anything about header files. */
- +#define NO_IMPLICIT_EXTERN_C
- +
- +/* The GNU C++ standard library requires that these macros be defined. */
- +#undef CPLUSPLUS_CPP_SPEC
- +#define CPLUSPLUS_CPP_SPEC "-D_GNU_SOURCE %(cpp)"
- +
- +/* Now we define the strings used to build the spec file. */
- +#undef LIB_SPEC
- +#define LIB_SPEC \
- + "%{pthread:-lpthread} \
- + %{shared:-lc} \
- + %{!shared:%{profile:-lc_p}%{!profile:-lc}}"
- +
- +/* Provide a STARTFILE_SPEC appropriate for GNU/Linux. Here we add
- + the GNU/Linux magical crtbegin.o file (see crtstuff.c) which
- + provides part of the support for getting C++ file-scope static
- + object constructed before entering `main'. */
- +
- +#undef STARTFILE_SPEC
- +#define STARTFILE_SPEC \
- + "%{!shared: \
- + %{pg:gcrt1.o%s} %{!pg:%{p:gcrt1.o%s} \
- + %{!p:%{profile:gcrt1.o%s} \
- + %{!profile:crt1.o%s}}}} \
- + crti.o%s %{!shared:crtbegin.o%s} %{shared:crtbeginS.o%s}"
- +
- +/* Provide a ENDFILE_SPEC appropriate for GNU/Linux. Here we tack on
- + the GNU/Linux magical crtend.o file (see crtstuff.c) which
- + provides part of the support for getting C++ file-scope static
- + object constructed before entering `main', followed by a normal
- + GNU/Linux "finalizer" file, `crtn.o'. */
- +
- +#undef ENDFILE_SPEC
- +#define ENDFILE_SPEC \
- + "%{!shared:crtend.o%s} %{shared:crtendS.o%s} crtn.o%s"
- +
- +#undef ASM_SPEC
- +#define ASM_SPEC "%{!mno-pic:%{!fno-pic:--pic}} %{mrelax|O*:%{mno-relax|O0|O1: ;:--linkrelax}} %{mcpu=*:-mcpu=%*}"
- +
- +#undef LINK_SPEC
- +#define LINK_SPEC "%{version:-v} \
- + %{static:-Bstatic} \
- + %{shared:-shared} \
- + %{symbolic:-Bsymbolic} \
- + %{rdynamic:-export-dynamic} \
- + %{!dynamic-linker:-dynamic-linker /lib/ld-uClibc.so.0} \
- + %{mrelax|O*:%{mno-relax|O0|O1: ;:--relax}}"
- +
- +#define TARGET_OS_CPP_BUILTINS() LINUX_TARGET_OS_CPP_BUILTINS()
- +
- +/* This is how we tell the assembler that two symbols have the same value. */
- +#define ASM_OUTPUT_DEF(FILE, NAME1, NAME2) \
- + do \
- + { \
- + assemble_name (FILE, NAME1); \
- + fputs (" = ", FILE); \
- + assemble_name (FILE, NAME2); \
- + fputc ('\n', FILE); \
- + } \
- + while (0)
- +
- +
- +
- +#undef CC1_SPEC
- +#define CC1_SPEC "%{profile:-p}"
- +
- +/* Target CPU builtins. */
- +#define TARGET_CPU_CPP_BUILTINS() \
- + do \
- + { \
- + builtin_define ("__avr32__"); \
- + builtin_define ("__AVR32__"); \
- + builtin_define ("__AVR32_LINUX__"); \
- + builtin_define (avr32_part->macro); \
- + builtin_define (avr32_arch->macro); \
- + if (avr32_arch->uarch_type == UARCH_TYPE_AVR32A) \
- + builtin_define ("__AVR32_AVR32A__"); \
- + else \
- + builtin_define ("__AVR32_AVR32B__"); \
- + if (TARGET_UNALIGNED_WORD) \
- + builtin_define ("__AVR32_HAS_UNALIGNED_WORD__"); \
- + if (TARGET_SIMD) \
- + builtin_define ("__AVR32_HAS_SIMD__"); \
- + if (TARGET_DSP) \
- + builtin_define ("__AVR32_HAS_DSP__"); \
- + if (TARGET_RMW) \
- + builtin_define ("__AVR32_HAS_RMW__"); \
- + if (TARGET_BRANCH_PRED) \
- + builtin_define ("__AVR32_HAS_BRANCH_PRED__"); \
- + if (TARGET_FAST_FLOAT) \
- + builtin_define ("__AVR32_FAST_FLOAT__"); \
- + } \
- + while (0)
- +
- +
- +
- +/* Call the function profiler with a given profile label. */
- +#undef FUNCTION_PROFILER
- +#define FUNCTION_PROFILER(STREAM, LABELNO) \
- + do \
- + { \
- + fprintf (STREAM, "\tmov\tlr, lo(mcount)\n\torh\tlr, hi(mcount)\n"); \
- + fprintf (STREAM, "\ticall lr\n"); \
- + } \
- + while (0)
- +
- +#define NO_PROFILE_COUNTERS 1
- +
- +/* For dynamic libraries to work */
- +/* #define PLT_REG_CALL_CLOBBERED 1 */
- +#define AVR32_ALWAYS_PIC 1
- +
- +/* uclibc does not implement sinf, cosf etc. */
- +#undef TARGET_C99_FUNCTIONS
- +#define TARGET_C99_FUNCTIONS 0
- +
- +#define LINK_GCC_C_SEQUENCE_SPEC \
- + "%{static:--start-group} %G %L %{static:--end-group}%{!static:%G}"
- --- /dev/null
- +++ b/gcc/config/avr32/predicates.md
- @@ -0,0 +1,422 @@
- +;; AVR32 predicates file.
- +;; Copyright 2003-2006 Atmel Corporation.
- +;;
- +;; Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
- +;;
- +;; This file is part of GCC.
- +;;
- +;; This program is free software; you can redistribute it and/or modify
- +;; it under the terms of the GNU General Public License as published by
- +;; the Free Software Foundation; either version 2 of the License, or
- +;; (at your option) any later version.
- +;;
- +;; This program is distributed in the hope that it will be useful,
- +;; but WITHOUT ANY WARRANTY; without even the implied warranty of
- +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- +;; GNU General Public License for more details.
- +;;
- +;; You should have received a copy of the GNU General Public License
- +;; along with this program; if not, write to the Free Software
- +;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- +
- +
- +;; True if the operand is a memory reference which contains an
- +;; Address consisting of a single pointer register
- +(define_predicate "avr32_indirect_register_operand"
- + (and (match_code "mem")
- + (match_test "register_operand(XEXP(op, 0), SImode)")))
- +
- +
- +
- +;; Address expression with a base pointer offset with
- +;; a register displacement
- +(define_predicate "avr32_indexed_memory_operand"
- + (and (match_code "mem")
- + (match_test "GET_CODE(XEXP(op, 0)) == PLUS"))
- + {
- +
- + rtx op0 = XEXP(XEXP(op, 0), 0);
- + rtx op1 = XEXP(XEXP(op, 0), 1);
- +
- + return ((avr32_address_register_rtx_p (op0, 0)
- + && avr32_legitimate_index_p (GET_MODE(op), op1, 0))
- + || (avr32_address_register_rtx_p (op1, 0)
- + && avr32_legitimate_index_p (GET_MODE(op), op0, 0)));
- +
- + })
- +
- +;; Operand suitable for the ld.sb instruction
- +(define_predicate "load_sb_memory_operand"
- + (ior (match_operand 0 "avr32_indirect_register_operand")
- + (match_operand 0 "avr32_indexed_memory_operand")))
- +
- +
- +;; Operand suitable as operand to insns sign extending QI values
- +(define_predicate "extendqi_operand"
- + (ior (match_operand 0 "load_sb_memory_operand")
- + (match_operand 0 "register_operand")))
- +
- +(define_predicate "post_inc_memory_operand"
- + (and (match_code "mem")
- + (match_test "(GET_CODE(XEXP(op, 0)) == POST_INC)
- + && REG_P(XEXP(XEXP(op, 0), 0))")))
- +
- +(define_predicate "pre_dec_memory_operand"
- + (and (match_code "mem")
- + (match_test "(GET_CODE(XEXP(op, 0)) == PRE_DEC)
- + && REG_P(XEXP(XEXP(op, 0), 0))")))
- +
- +;; Operand suitable for add instructions
- +(define_predicate "avr32_add_operand"
- + (ior (match_operand 0 "register_operand")
- + (and (match_operand 0 "immediate_operand")
- + (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'I', \"Is21\")"))))
- +
- +;; Operand is a power of two immediate
- +(define_predicate "power_of_two_operand"
- + (match_code "const_int")
- +{
- + HOST_WIDE_INT value = INTVAL (op);
- +
- + return value != 0 && (value & (value - 1)) == 0;
- +})
- +
- +;; Operand is a multiple of 8 immediate
- +(define_predicate "multiple_of_8_operand"
- + (match_code "const_int")
- +{
- + HOST_WIDE_INT value = INTVAL (op);
- +
- + return (value & 0x7) == 0 ;
- +})
- +
- +;; Operand is a multiple of 16 immediate
- +(define_predicate "multiple_of_16_operand"
- + (match_code "const_int")
- +{
- + HOST_WIDE_INT value = INTVAL (op);
- +
- + return (value & 0xf) == 0 ;
- +})
- +
- +;; Operand is a mask used for masking away upper bits of a reg
- +(define_predicate "avr32_mask_upper_bits_operand"
- + (match_code "const_int")
- +{
- + HOST_WIDE_INT value = INTVAL (op) + 1;
- +
- + return value != 1 && value != 0 && (value & (value - 1)) == 0;
- +})
- +
- +
- +;; Operand suitable for mul instructions
- +(define_predicate "avr32_mul_operand"
- + (ior (match_operand 0 "register_operand")
- + (and (match_operand 0 "immediate_operand")
- + (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'K', \"Ks08\")"))))
- +
- +;; True for logical binary operators.
- +(define_predicate "logical_binary_operator"
- + (match_code "ior,xor,and"))
- +
- +;; True for logical shift operators
- +(define_predicate "logical_shift_operator"
- + (match_code "ashift,lshiftrt"))
- +
- +;; True for shift operand for logical and, or and eor insns
- +(define_predicate "avr32_logical_shift_operand"
- + (and (match_code "ashift,lshiftrt")
- + (ior (and (match_test "GET_CODE(XEXP(op, 1)) == CONST_INT")
- + (match_test "register_operand(XEXP(op, 0), GET_MODE(XEXP(op, 0)))"))
- + (and (match_test "GET_CODE(XEXP(op, 0)) == CONST_INT")
- + (match_test "register_operand(XEXP(op, 1), GET_MODE(XEXP(op, 1)))"))))
- + )
- +
- +
- +;; Predicate for second operand to and, ior and xor insn patterns
- +(define_predicate "avr32_logical_insn_operand"
- + (ior (match_operand 0 "register_operand")
- + (match_operand 0 "avr32_logical_shift_operand"))
- +)
- +
- +
- +;; True for avr32 comparison operators
- +(define_predicate "avr32_comparison_operator"
- + (ior (match_code "eq, ne, gt, ge, lt, le, gtu, geu, ltu, leu")
- + (and (match_code "unspec")
- + (match_test "(XINT(op, 1) == UNSPEC_COND_MI)
- + || (XINT(op, 1) == UNSPEC_COND_PL)"))))
- +
- +(define_predicate "avr32_cond3_comparison_operator"
- + (ior (match_code "eq, ne, ge, lt, geu, ltu")
- + (and (match_code "unspec")
- + (match_test "(XINT(op, 1) == UNSPEC_COND_MI)
- + || (XINT(op, 1) == UNSPEC_COND_PL)"))))
- +
- +;; True for avr32 comparison operand
- +(define_predicate "avr32_comparison_operand"
- + (ior (and (match_code "eq, ne, gt, ge, lt, le, gtu, geu, ltu, leu")
- + (match_test "(CC0_P (XEXP(op,0)) && rtx_equal_p (XEXP(op,1), const0_rtx))"))
- + (and (match_code "unspec")
- + (match_test "(XINT(op, 1) == UNSPEC_COND_MI)
- + || (XINT(op, 1) == UNSPEC_COND_PL)"))))
- +
- +;; True if this is a const_int with one bit set
- +(define_predicate "one_bit_set_operand"
- + (match_code "const_int")
- + {
- + int i;
- + int value;
- + int ones = 0;
- +
- + value = INTVAL(op);
- + for ( i = 0 ; i < 32; i++ ){
- + if ( value & ( 1 << i ) ){
- + ones++;
- + }
- + }
- +
- + return ( ones == 1 );
- + })
- +
- +
- +;; True if this is a const_int with one bit cleared
- +(define_predicate "one_bit_cleared_operand"
- + (match_code "const_int")
- + {
- + int i;
- + int value;
- + int zeroes = 0;
- +
- + value = INTVAL(op);
- + for ( i = 0 ; i < 32; i++ ){
- + if ( !(value & ( 1 << i )) ){
- + zeroes++;
- + }
- + }
- +
- + return ( zeroes == 1 );
- + })
- +
- +
- +;; Immediate all the low 16-bits cleared
- +(define_predicate "avr32_hi16_immediate_operand"
- + (match_code "const_int")
- + {
- + /* If the low 16-bits are zero then this
- + is a hi16 immediate. */
- + return ((INTVAL(op) & 0xffff) == 0);
- + }
- +)
- +
- +;; True if this is a register or immediate operand
- +(define_predicate "register_immediate_operand"
- + (ior (match_operand 0 "register_operand")
- + (match_operand 0 "immediate_operand")))
- +
- +;; True if this is a register or const_int operand
- +(define_predicate "register_const_int_operand"
- + (ior (match_operand 0 "register_operand")
- + (and (match_operand 0 "const_int_operand")
- + (match_operand 0 "immediate_operand"))))
- +
- +;; True if this is a register or const_double operand
- +(define_predicate "register_const_double_operand"
- + (ior (match_operand 0 "register_operand")
- + (match_operand 0 "const_double_operand")))
- +
- +;; True if this is an operand containing a label_ref.
- +(define_predicate "avr32_label_ref_operand"
- + (and (match_code "mem")
- + (match_test "avr32_find_symbol(op)
- + && (GET_CODE(avr32_find_symbol(op)) == LABEL_REF)")))
- +
- +;; True if this is a valid symbol pointing to the constant pool.
- +(define_predicate "avr32_const_pool_operand"
- + (and (match_code "symbol_ref")
- + (match_test "CONSTANT_POOL_ADDRESS_P(op)"))
- + {
- + return (flag_pic ? (!(symbol_mentioned_p (get_pool_constant (op))
- + || label_mentioned_p (get_pool_constant (op)))
- + || avr32_got_mentioned_p(get_pool_constant (op)))
- + : true);
- + }
- +)
- +
- +;; True if this is a memory reference to the constant or mini pool.
- +(define_predicate "avr32_const_pool_ref_operand"
- + (ior (match_operand 0 "avr32_label_ref_operand")
- + (and (match_code "mem")
- + (match_test "avr32_const_pool_operand(XEXP(op,0), GET_MODE(XEXP(op,0)))"))))
- +
- +
- +;; Legal source operand for movti insns
- +(define_predicate "avr32_movti_src_operand"
- + (ior (match_operand 0 "avr32_const_pool_ref_operand")
- + (ior (ior (match_operand 0 "register_immediate_operand")
- + (match_operand 0 "avr32_indirect_register_operand"))
- + (match_operand 0 "post_inc_memory_operand"))))
- +
- +;; Legal destination operand for movti insns
- +(define_predicate "avr32_movti_dst_operand"
- + (ior (ior (match_operand 0 "register_operand")
- + (match_operand 0 "avr32_indirect_register_operand"))
- + (match_operand 0 "pre_dec_memory_operand")))
- +
- +
- +;; True if this is a k12 offseted memory operand.
- +(define_predicate "avr32_k12_memory_operand"
- + (and (match_code "mem")
- + (ior (match_test "REG_P(XEXP(op, 0))")
- + (match_test "GET_CODE(XEXP(op, 0)) == PLUS
- + && REG_P(XEXP(XEXP(op, 0), 0))
- + && (GET_CODE(XEXP(XEXP(op, 0), 1)) == CONST_INT)
- + && (CONST_OK_FOR_CONSTRAINT_P(INTVAL(XEXP(XEXP(op, 0), 0)),
- + 'K', (mode == SImode) ? \"Ks14\" : ((mode == HImode) ? \"Ks13\" : \"Ks12\")))"))))
- +
- +;; True if this is a memory operand with an immediate displacement.
- +(define_predicate "avr32_imm_disp_memory_operand"
- + (and (match_code "mem")
- + (match_test "GET_CODE(XEXP(op, 0)) == PLUS
- + && REG_P(XEXP(XEXP(op, 0), 0))
- + && (GET_CODE(XEXP(XEXP(op, 0), 1)) == CONST_INT)")))
- +
- +;; True if this is a bswap operand.
- +(define_predicate "avr32_bswap_operand"
- + (ior (match_operand 0 "avr32_k12_memory_operand")
- + (match_operand 0 "register_operand")))
- +
- +;; True if this is a valid coprocessor insn memory operand.
- +(define_predicate "avr32_cop_memory_operand"
- + (and (match_operand 0 "memory_operand")
- + (not (match_test "GET_CODE(XEXP(op, 0)) == PLUS
- + && REG_P(XEXP(XEXP(op, 0), 0))
- + && (GET_CODE(XEXP(XEXP(op, 0), 1)) == CONST_INT)
- + && !(CONST_OK_FOR_CONSTRAINT_P(INTVAL(XEXP(XEXP(op, 0), 0)), 'K', \"Ku10\"))"))))
- +
- +;; True if this is a valid source/destination operand.
- +;; for moving values to/from a coprocessor
- +(define_predicate "avr32_cop_move_operand"
- + (ior (match_operand 0 "register_operand")
- + (match_operand 0 "avr32_cop_memory_operand")))
- +
- +
- +;; True if this is a valid extract byte offset for use in
- +;; load extracted index insns.
- +(define_predicate "avr32_extract_shift_operand"
- + (and (match_operand 0 "const_int_operand")
- + (match_test "(INTVAL(op) == 0) || (INTVAL(op) == 8)
- + || (INTVAL(op) == 16) || (INTVAL(op) == 24)")))
- +
- +;; True if this is a valid avr32 symbol operand.
- +(define_predicate "avr32_symbol_operand"
- + (and (match_code "label_ref, symbol_ref, const")
- + (match_test "avr32_find_symbol(op)")))
- +
- +;; True if this is a valid operand for the lda.w and call pseudo insns.
- +(define_predicate "avr32_address_operand"
- + (and (and (match_code "label_ref, symbol_ref")
- + (match_test "avr32_find_symbol(op)"))
- + (ior (match_test "TARGET_HAS_ASM_ADDR_PSEUDOS")
- + (match_test "flag_pic")) ))
- +
- +;; An immediate k16 address operand
- +(define_predicate "avr32_ks16_address_operand"
- + (and (match_operand 0 "address_operand")
- + (ior (match_test "REG_P(op)")
- + (match_test "GET_CODE(op) == PLUS
- + && ((GET_CODE(XEXP(op,0)) == CONST_INT)
- + || (GET_CODE(XEXP(op,1)) == CONST_INT))")) ))
- +
- +;; An offset k16 memory operand
- +(define_predicate "avr32_ks16_memory_operand"
- + (and (match_code "mem")
- + (match_test "avr32_ks16_address_operand (XEXP (op, 0), GET_MODE (XEXP (op, 0)))")))
- +
- +;; An immediate k11 address operand
- +(define_predicate "avr32_ks11_address_operand"
- + (and (match_operand 0 "address_operand")
- + (ior (match_test "REG_P(op)")
- + (match_test "GET_CODE(op) == PLUS
- + && (((GET_CODE(XEXP(op,0)) == CONST_INT)
- + && avr32_const_ok_for_constraint_p(INTVAL(XEXP(op,0)), 'K', \"Ks11\"))
- + || ((GET_CODE(XEXP(op,1)) == CONST_INT)
- + && avr32_const_ok_for_constraint_p(INTVAL(XEXP(op,1)), 'K', \"Ks11\")))")) ))
- +
- +;; True if this is a avr32 call operand
- +(define_predicate "avr32_call_operand"
- + (ior (ior (match_operand 0 "register_operand")
- + (ior (match_operand 0 "avr32_const_pool_ref_operand")
- + (match_operand 0 "avr32_address_operand")))
- + (match_test "SYMBOL_REF_RCALL_FUNCTION_P(op)")))
- +
- +;; Return true for operators performing ALU operations
- +
- +(define_predicate "alu_operator"
- + (match_code "ior, xor, and, plus, minus, ashift, lshiftrt, ashiftrt"))
- +
- +(define_predicate "avr32_add_shift_immediate_operand"
- + (and (match_operand 0 "immediate_operand")
- + (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'K', \"Ku02\")")))
- +
- +(define_predicate "avr32_cond_register_immediate_operand"
- + (ior (match_operand 0 "register_operand")
- + (and (match_operand 0 "immediate_operand")
- + (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'K', \"Ks08\")"))))
- +
- +(define_predicate "avr32_cond_immediate_operand"
- + (and (match_operand 0 "immediate_operand")
- + (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'I', \"Is08\")")))
- +
- +
- +(define_predicate "avr32_cond_move_operand"
- + (ior (ior (match_operand 0 "register_operand")
- + (and (match_operand 0 "immediate_operand")
- + (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'K', \"Ks08\")")))
- + (and (match_test "TARGET_V2_INSNS")
- + (match_operand 0 "memory_operand"))))
- +
- +(define_predicate "avr32_mov_immediate_operand"
- + (and (match_operand 0 "immediate_operand")
- + (match_test "avr32_const_ok_for_move(INTVAL(op))")))
- +
- +
- +(define_predicate "avr32_rmw_address_operand"
- + (ior (and (match_code "symbol_ref")
- + (match_test "({rtx symbol = avr32_find_symbol(op); \
- + symbol && (GET_CODE (symbol) == SYMBOL_REF) && SYMBOL_REF_RMW_ADDR(symbol);})"))
- + (and (match_operand 0 "immediate_operand")
- + (match_test "CONST_OK_FOR_CONSTRAINT_P(INTVAL(op), 'K', \"Ks17\")")))
- + {
- + return TARGET_RMW && !flag_pic;
- + }
- +)
- +
- +(define_predicate "avr32_rmw_memory_operand"
- + (and (match_code "mem")
- + (match_test "!volatile_refs_p(op) && (GET_MODE(op) == SImode) &&
- + avr32_rmw_address_operand(XEXP(op, 0), GET_MODE(XEXP(op, 0)))")))
- +
- +(define_predicate "avr32_rmw_memory_or_register_operand"
- + (ior (match_operand 0 "avr32_rmw_memory_operand")
- + (match_operand 0 "register_operand")))
- +
- +(define_predicate "avr32_non_rmw_memory_operand"
- + (and (not (match_operand 0 "avr32_rmw_memory_operand"))
- + (match_operand 0 "memory_operand")))
- +
- +(define_predicate "avr32_non_rmw_general_operand"
- + (and (not (match_operand 0 "avr32_rmw_memory_operand"))
- + (match_operand 0 "general_operand")))
- +
- +(define_predicate "avr32_non_rmw_nonimmediate_operand"
- + (and (not (match_operand 0 "avr32_rmw_memory_operand"))
- + (match_operand 0 "nonimmediate_operand")))
- +
- +;; Return true if the operand is the 1.0f constant.
- +
- +(define_predicate "const_1f_operand"
- + (match_code "const_int,const_double")
- +{
- + return (op == CONST1_RTX (SFmode));
- +})
- --- /dev/null
- +++ b/gcc/config/avr32/simd.md
- @@ -0,0 +1,145 @@
- +;; AVR32 machine description file for SIMD instructions.
- +;; Copyright 2003-2006 Atmel Corporation.
- +;;
- +;; Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
- +;;
- +;; This file is part of GCC.
- +;;
- +;; This program is free software; you can redistribute it and/or modify
- +;; it under the terms of the GNU General Public License as published by
- +;; the Free Software Foundation; either version 2 of the License, or
- +;; (at your option) any later version.
- +;;
- +;; This program is distributed in the hope that it will be useful,
- +;; but WITHOUT ANY WARRANTY; without even the implied warranty of
- +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- +;; GNU General Public License for more details.
- +;;
- +;; You should have received a copy of the GNU General Public License
- +;; along with this program; if not, write to the Free Software
- +;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- +
- +;; -*- Mode: Scheme -*-
- +
- +
- +;; Vector modes
- +(define_mode_iterator VECM [V2HI V4QI])
- +(define_mode_attr size [(V2HI "h") (V4QI "b")])
- +
- +(define_insn "add<mode>3"
- + [(set (match_operand:VECM 0 "register_operand" "=r")
- + (plus:VECM (match_operand:VECM 1 "register_operand" "r")
- + (match_operand:VECM 2 "register_operand" "r")))]
- + "TARGET_SIMD"
- + "padd.<size>\t%0, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "type" "alu")])
- +
- +
- +(define_insn "sub<mode>3"
- + [(set (match_operand:VECM 0 "register_operand" "=r")
- + (minus:VECM (match_operand:VECM 1 "register_operand" "r")
- + (match_operand:VECM 2 "register_operand" "r")))]
- + "TARGET_SIMD"
- + "psub.<size>\t%0, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "type" "alu")])
- +
- +
- +(define_insn "abs<mode>2"
- + [(set (match_operand:VECM 0 "register_operand" "=r")
- + (abs:VECM (match_operand:VECM 1 "register_operand" "r")))]
- + "TARGET_SIMD"
- + "pabs.s<size>\t%0, %1"
- + [(set_attr "length" "4")
- + (set_attr "type" "alu")])
- +
- +(define_insn "ashl<mode>3"
- + [(set (match_operand:VECM 0 "register_operand" "=r")
- + (ashift:VECM (match_operand:VECM 1 "register_operand" "r")
- + (match_operand:SI 2 "immediate_operand" "Ku04")))]
- + "TARGET_SIMD"
- + "plsl.<size>\t%0, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "type" "alu")])
- +
- +(define_insn "ashr<mode>3"
- + [(set (match_operand:VECM 0 "register_operand" "=r")
- + (ashiftrt:VECM (match_operand:VECM 1 "register_operand" "r")
- + (match_operand:SI 2 "immediate_operand" "Ku04")))]
- + "TARGET_SIMD"
- + "pasr.<size>\t%0, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "type" "alu")])
- +
- +(define_insn "lshr<mode>3"
- + [(set (match_operand:VECM 0 "register_operand" "=r")
- + (lshiftrt:VECM (match_operand:VECM 1 "register_operand" "r")
- + (match_operand:SI 2 "immediate_operand" "Ku04")))]
- + "TARGET_SIMD"
- + "plsr.<size>\t%0, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "type" "alu")])
- +
- +(define_insn "smaxv2hi3"
- + [(set (match_operand:V2HI 0 "register_operand" "=r")
- + (smax:V2HI (match_operand:V2HI 1 "register_operand" "r")
- + (match_operand:V2HI 2 "register_operand" "r")))]
- +
- + "TARGET_SIMD"
- + "pmax.sh\t%0, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "type" "alu")])
- +
- +(define_insn "sminv2hi3"
- + [(set (match_operand:V2HI 0 "register_operand" "=r")
- + (smin:V2HI (match_operand:V2HI 1 "register_operand" "r")
- + (match_operand:V2HI 2 "register_operand" "r")))]
- +
- + "TARGET_SIMD"
- + "pmin.sh\t%0, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "type" "alu")])
- +
- +(define_insn "umaxv4qi3"
- + [(set (match_operand:V4QI 0 "register_operand" "=r")
- + (umax:V4QI (match_operand:V4QI 1 "register_operand" "r")
- + (match_operand:V4QI 2 "register_operand" "r")))]
- +
- + "TARGET_SIMD"
- + "pmax.ub\t%0, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "type" "alu")])
- +
- +(define_insn "uminv4qi3"
- + [(set (match_operand:V4QI 0 "register_operand" "=r")
- + (umin:V4QI (match_operand:V4QI 1 "register_operand" "r")
- + (match_operand:V4QI 2 "register_operand" "r")))]
- +
- + "TARGET_SIMD"
- + "pmin.ub\t%0, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "type" "alu")])
- +
- +
- +(define_insn "addsubv2hi"
- + [(set (match_operand:V2HI 0 "register_operand" "=r")
- + (vec_concat:V2HI
- + (plus:HI (match_operand:HI 1 "register_operand" "r")
- + (match_operand:HI 2 "register_operand" "r"))
- + (minus:HI (match_dup 1) (match_dup 2))))]
- + "TARGET_SIMD"
- + "paddsub.h\t%0, %1:b, %2:b"
- + [(set_attr "length" "4")
- + (set_attr "type" "alu")])
- +
- +(define_insn "subaddv2hi"
- + [(set (match_operand:V2HI 0 "register_operand" "=r")
- + (vec_concat:V2HI
- + (minus:HI (match_operand:HI 1 "register_operand" "r")
- + (match_operand:HI 2 "register_operand" "r"))
- + (plus:HI (match_dup 1) (match_dup 2))))]
- + "TARGET_SIMD"
- + "psubadd.h\t%0, %1:b, %2:b"
- + [(set_attr "length" "4")
- + (set_attr "type" "alu")])
- --- /dev/null
- +++ b/gcc/config/avr32/sync.md
- @@ -0,0 +1,244 @@
- +;;=================================================================
- +;; Atomic operations
- +;;=================================================================
- +
- +
- +(define_insn "sync_compare_and_swapsi"
- + [(set (match_operand:SI 0 "register_operand" "=&r,&r")
- + (match_operand:SI 1 "memory_operand" "+RKs16,+RKs16"))
- + (set (match_dup 1)
- + (unspec_volatile:SI
- + [(match_dup 1)
- + (match_operand:SI 2 "register_immediate_operand" "r,Ks21")
- + (match_operand:SI 3 "register_operand" "r,r")]
- + VUNSPEC_SYNC_CMPXCHG)) ]
- + ""
- + "0:
- + ssrf\t5
- + ld.w\t%0,%1
- + cp.w\t%0,%2
- + brne\t0f
- + stcond\t%1, %3
- + brne\t0b
- + 0:
- + "
- + [(set_attr "length" "16,18")
- + (set_attr "cc" "clobber")]
- + )
- +
- +
- +(define_code_iterator atomic_op [plus minus and ior xor])
- +(define_code_attr atomic_asm_insn [(plus "add") (minus "sub") (and "and") (ior "or") (xor "eor")])
- +(define_code_attr atomic_insn [(plus "add") (minus "sub") (and "and") (ior "ior") (xor "xor")])
- +
- +(define_insn "sync_loadsi"
- + ; NB! Put an early clobber on the destination operand to
- + ; avoid gcc using the same register in the source and
- + ; destination. This is done in order to avoid gcc to
- + ; clobber the source operand since these instructions
- + ; are actually inside a "loop".
- + [(set (match_operand:SI 0 "register_operand" "=&r")
- + (unspec_volatile:SI
- + [(match_operand:SI 1 "avr32_ks16_memory_operand" "RKs16")
- + (label_ref (match_operand 2 "" ""))]
- + VUNSPEC_SYNC_SET_LOCK_AND_LOAD) )]
- + ""
- + "%2:
- + ssrf\t5
- + ld.w\t%0,%1"
- + [(set_attr "length" "6")
- + (set_attr "cc" "clobber")]
- + )
- +
- +(define_insn "sync_store_if_lock"
- + [(set (match_operand:SI 0 "avr32_ks16_memory_operand" "=RKs16")
- + (unspec_volatile:SI
- + [(match_operand:SI 1 "register_operand" "r")
- + (label_ref (match_operand 2 "" ""))]
- + VUNSPEC_SYNC_STORE_IF_LOCK) )]
- + ""
- + "stcond\t%0, %1
- + brne\t%2"
- + [(set_attr "length" "6")
- + (set_attr "cc" "clobber")]
- + )
- +
- +
- +(define_expand "sync_<atomic_insn>si"
- + [(set (match_dup 2)
- + (unspec_volatile:SI
- + [(match_operand:SI 0 "avr32_ks16_memory_operand" "")
- + (match_dup 3)]
- + VUNSPEC_SYNC_SET_LOCK_AND_LOAD))
- + (set (match_dup 2)
- + (atomic_op:SI (match_dup 2)
- + (match_operand:SI 1 "register_immediate_operand" "")))
- + (set (match_dup 0)
- + (unspec_volatile:SI
- + [(match_dup 2)
- + (match_dup 3)]
- + VUNSPEC_SYNC_STORE_IF_LOCK) )
- + (use (match_dup 1))
- + (use (match_dup 4))]
- + ""
- + {
- + rtx *mem_expr = &operands[0];
- + rtx ptr_reg;
- + if ( !avr32_ks16_memory_operand (*mem_expr, GET_MODE (*mem_expr)) )
- + {
- + ptr_reg = force_reg (Pmode, XEXP (*mem_expr, 0));
- + XEXP (*mem_expr, 0) = ptr_reg;
- + }
- + else
- + {
- + rtx address = XEXP (*mem_expr, 0);
- + if ( REG_P (address) )
- + ptr_reg = address;
- + else if ( REG_P (XEXP (address, 0)) )
- + ptr_reg = XEXP (address, 0);
- + else
- + ptr_reg = XEXP (address, 1);
- + }
- +
- + operands[2] = gen_reg_rtx (SImode);
- + operands[3] = gen_rtx_LABEL_REF(Pmode, gen_label_rtx ());
- + operands[4] = ptr_reg;
- +
- + }
- + )
- +
- +
- +
- +(define_expand "sync_old_<atomic_insn>si"
- + [(set (match_operand:SI 0 "register_operand" "")
- + (unspec_volatile:SI
- + [(match_operand:SI 1 "avr32_ks16_memory_operand" "")
- + (match_dup 4)]
- + VUNSPEC_SYNC_SET_LOCK_AND_LOAD))
- + (set (match_dup 3)
- + (atomic_op:SI (match_dup 0)
- + (match_operand:SI 2 "register_immediate_operand" "")))
- + (set (match_dup 1)
- + (unspec_volatile:SI
- + [(match_dup 3)
- + (match_dup 4)]
- + VUNSPEC_SYNC_STORE_IF_LOCK) )
- + (use (match_dup 2))
- + (use (match_dup 5))]
- + ""
- + {
- + rtx *mem_expr = &operands[1];
- + rtx ptr_reg;
- + if ( !avr32_ks16_memory_operand (*mem_expr, GET_MODE (*mem_expr)) )
- + {
- + ptr_reg = force_reg (Pmode, XEXP (*mem_expr, 0));
- + XEXP (*mem_expr, 0) = ptr_reg;
- + }
- + else
- + {
- + rtx address = XEXP (*mem_expr, 0);
- + if ( REG_P (address) )
- + ptr_reg = address;
- + else if ( REG_P (XEXP (address, 0)) )
- + ptr_reg = XEXP (address, 0);
- + else
- + ptr_reg = XEXP (address, 1);
- + }
- +
- + operands[3] = gen_reg_rtx (SImode);
- + operands[4] = gen_rtx_LABEL_REF(Pmode, gen_label_rtx ());
- + operands[5] = ptr_reg;
- + }
- + )
- +
- +(define_expand "sync_new_<atomic_insn>si"
- + [(set (match_operand:SI 0 "register_operand" "")
- + (unspec_volatile:SI
- + [(match_operand:SI 1 "avr32_ks16_memory_operand" "")
- + (match_dup 3)]
- + VUNSPEC_SYNC_SET_LOCK_AND_LOAD))
- + (set (match_dup 0)
- + (atomic_op:SI (match_dup 0)
- + (match_operand:SI 2 "register_immediate_operand" "")))
- + (set (match_dup 1)
- + (unspec_volatile:SI
- + [(match_dup 0)
- + (match_dup 3)]
- + VUNSPEC_SYNC_STORE_IF_LOCK) )
- + (use (match_dup 2))
- + (use (match_dup 4))]
- + ""
- + {
- + rtx *mem_expr = &operands[1];
- + rtx ptr_reg;
- + if ( !avr32_ks16_memory_operand (*mem_expr, GET_MODE (*mem_expr)) )
- + {
- + ptr_reg = force_reg (Pmode, XEXP (*mem_expr, 0));
- + XEXP (*mem_expr, 0) = ptr_reg;
- + }
- + else
- + {
- + rtx address = XEXP (*mem_expr, 0);
- + if ( REG_P (address) )
- + ptr_reg = address;
- + else if ( REG_P (XEXP (address, 0)) )
- + ptr_reg = XEXP (address, 0);
- + else
- + ptr_reg = XEXP (address, 1);
- + }
- +
- + operands[3] = gen_rtx_LABEL_REF(Pmode, gen_label_rtx ());
- + operands[4] = ptr_reg;
- + }
- + )
- +
- +
- +;(define_insn "sync_<atomic_insn>si"
- +; [(set (match_operand:SI 0 "memory_operand" "+RKs16")
- +; (unspec_volatile:SI
- +; [(atomic_op:SI (match_dup 0)
- +; (match_operand:SI 1 "register_operand" "r"))]
- +; VUNSPEC_SYNC_CMPXCHG))
- +; (clobber (match_scratch:SI 2 "=&r"))]
- +; ""
- +; "0:
- +; ssrf\t5
- +; ld.w\t%2,%0
- +; <atomic_asm_insn>\t%2,%1
- +; stcond\t%0, %2
- +; brne\t0b
- +; "
- +; [(set_attr "length" "14")
- +; (set_attr "cc" "clobber")]
- +; )
- +;
- +;(define_insn "sync_new_<atomic_insn>si"
- +; [(set (match_operand:SI 1 "memory_operand" "+RKs16")
- +; (unspec_volatile:SI
- +; [(atomic_op:SI (match_dup 1)
- +; (match_operand:SI 2 "register_operand" "r"))]
- +; VUNSPEC_SYNC_CMPXCHG))
- +; (set (match_operand:SI 0 "register_operand" "=&r")
- +; (atomic_op:SI (match_dup 1)
- +; (match_dup 2)))]
- +; ""
- +; "0:
- +; ssrf\t5
- +; ld.w\t%0,%1
- +; <atomic_asm_insn>\t%0,%2
- +; stcond\t%1, %0
- +; brne\t0b
- +; "
- +; [(set_attr "length" "14")
- +; (set_attr "cc" "clobber")]
- +; )
- +
- +(define_insn "sync_lock_test_and_setsi"
- + [ (set (match_operand:SI 0 "register_operand" "=&r")
- + (match_operand:SI 1 "memory_operand" "+RKu00"))
- + (set (match_dup 1)
- + (match_operand:SI 2 "register_operand" "r")) ]
- + ""
- + "xchg\t%0, %p1, %2"
- + [(set_attr "length" "4")]
- + )
- --- /dev/null
- +++ b/gcc/config/avr32/t-avr32
- @@ -0,0 +1,118 @@
- +
- +MD_INCLUDES= $(srcdir)/config/avr32/avr32.md \
- + $(srcdir)/config/avr32/sync.md \
- + $(srcdir)/config/avr32/simd.md \
- + $(srcdir)/config/avr32/predicates.md
- +
- +s-config s-conditions s-flags s-codes s-constants s-emit s-recog s-preds \
- + s-opinit s-extract s-peep s-attr s-attrtab s-output: $(MD_INCLUDES)
- +
- +# We want fine grained libraries, so use the new code
- +# to build the floating point emulation libraries.
- +FPBIT = fp-bit.c
- +DPBIT = dp-bit.c
- +
- +LIB1ASMSRC = avr32/lib1funcs.S
- +LIB1ASMFUNCS = _avr32_f64_mul _avr32_f64_mul_fast _avr32_f64_addsub _avr32_f64_addsub_fast _avr32_f64_to_u32 \
- + _avr32_f64_to_s32 _avr32_f64_to_u64 _avr32_f64_to_s64 _avr32_u32_to_f64 \
- + _avr32_s32_to_f64 _avr32_f64_cmp_eq _avr32_f64_cmp_ge _avr32_f64_cmp_lt \
- + _avr32_f32_cmp_eq _avr32_f32_cmp_ge _avr32_f32_cmp_lt _avr32_f64_div _avr32_f64_div_fast \
- + _avr32_f32_div _avr32_f32_div_fast _avr32_f32_addsub _avr32_f32_addsub_fast \
- + _avr32_f32_mul _avr32_s32_to_f32 _avr32_u32_to_f32 _avr32_f32_to_s32 \
- + _avr32_f32_to_u32 _avr32_f32_to_f64 _avr32_f64_to_f32 _mulsi3
- +
- +#LIB2FUNCS_EXTRA += $(srcdir)/config/avr32/lib2funcs.S
- +
- +MULTILIB_OPTIONS = march=ap/march=ucr1/march=ucr2/march=ucr2nomul/march=ucr3/march=ucr3fp
- +MULTILIB_DIRNAMES = ap ucr1 ucr2 ucr2nomul ucr3 ucr3fp
- +MULTILIB_EXCEPTIONS =
- +MULTILIB_MATCHES += march?ap=mpart?ap7000
- +MULTILIB_MATCHES += march?ap=mpart?ap7001
- +MULTILIB_MATCHES += march?ap=mpart?ap7002
- +MULTILIB_MATCHES += march?ap=mpart?ap7200
- +MULTILIB_MATCHES += march?ucr1=march?uc
- +MULTILIB_MATCHES += march?ucr1=mpart?uc3a0512es
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a0128
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a0256
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a0512
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a1128
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a1256
- +MULTILIB_MATCHES += march?ucr1=mpart?uc3a1512es
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a1512
- +MULTILIB_MATCHES += march?ucr2nomul=mpart?uc3a3revd
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a364
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a364s
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a3128
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a3128s
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a3256
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a3256s
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a464
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a464s
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a4128
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a4128s
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a4256
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a4256s
- +MULTILIB_MATCHES += march?ucr1=mpart?uc3b064
- +MULTILIB_MATCHES += march?ucr1=mpart?uc3b0128
- +MULTILIB_MATCHES += march?ucr1=mpart?uc3b0256es
- +MULTILIB_MATCHES += march?ucr1=mpart?uc3b0256
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3b0512
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3b0512revc
- +MULTILIB_MATCHES += march?ucr1=mpart?uc3b164
- +MULTILIB_MATCHES += march?ucr1=mpart?uc3b1128
- +MULTILIB_MATCHES += march?ucr1=mpart?uc3b1256es
- +MULTILIB_MATCHES += march?ucr1=mpart?uc3b1256
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3b1512
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3b1512revc
- +MULTILIB_MATCHES += march?ucr3=mpart?uc64d3
- +MULTILIB_MATCHES += march?ucr3=mpart?uc128d3
- +MULTILIB_MATCHES += march?ucr3=mpart?uc64d4
- +MULTILIB_MATCHES += march?ucr3=mpart?uc128d4
- +MULTILIB_MATCHES += march?ucr3=mpart?uc3c0512crevc
- +MULTILIB_MATCHES += march?ucr3=mpart?uc3c1512crevc
- +MULTILIB_MATCHES += march?ucr3=mpart?uc3c2512crevc
- +MULTILIB_MATCHES += march?ucr3=mpart?uc3l0256
- +MULTILIB_MATCHES += march?ucr3=mpart?uc3l0128
- +MULTILIB_MATCHES += march?ucr3=mpart?uc3l064
- +MULTILIB_MATCHES += march?ucr3=mpart?uc3l032
- +MULTILIB_MATCHES += march?ucr3=mpart?uc3l016
- +MULTILIB_MATCHES += march?ucr3=mpart?uc3l064revb
- +MULTILIB_MATCHES += march?ucr3=mpart?uc64l3u
- +MULTILIB_MATCHES += march?ucr3=mpart?uc128l3u
- +MULTILIB_MATCHES += march?ucr3=mpart?uc256l3u
- +MULTILIB_MATCHES += march?ucr3=mpart?uc64l4u
- +MULTILIB_MATCHES += march?ucr3=mpart?uc128l4u
- +MULTILIB_MATCHES += march?ucr3=mpart?uc256l4u
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c064c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c0128c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c0256c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c0512c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c164c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c1128c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c1256c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c1512c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c264c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c2128c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c2256c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c2512c
- +MULTILIB_MATCHES += march?ucr3=mpart?mxt768e
- +
- +
- +EXTRA_MULTILIB_PARTS = crtbegin.o crtbeginS.o crtend.o crtendS.o crti.o crtn.o
- +
- +CRTSTUFF_T_CFLAGS = -mrelax
- +CRTSTUFF_T_CFLAGS_S = -mrelax -fPIC
- +TARGET_LIBGCC2_CFLAGS += -mrelax
- +
- +LIBGCC = stmp-multilib
- +INSTALL_LIBGCC = install-multilib
- +
- +fp-bit.c: $(srcdir)/config/fp-bit.c
- + echo '#define FLOAT' > fp-bit.c
- + cat $(srcdir)/config/fp-bit.c >> fp-bit.c
- +
- +dp-bit.c: $(srcdir)/config/fp-bit.c
- + cat $(srcdir)/config/fp-bit.c > dp-bit.c
- +
- +
- +
- --- /dev/null
- +++ b/gcc/config/avr32/t-avr32-linux
- @@ -0,0 +1,118 @@
- +
- +MD_INCLUDES= $(srcdir)/config/avr32/avr32.md \
- + $(srcdir)/config/avr32/sync.md \
- + $(srcdir)/config/avr32/simd.md \
- + $(srcdir)/config/avr32/predicates.md
- +
- +s-config s-conditions s-flags s-codes s-constants s-emit s-recog s-preds \
- + s-opinit s-extract s-peep s-attr s-attrtab s-output: $(MD_INCLUDES)
- +
- +# We want fine grained libraries, so use the new code
- +# to build the floating point emulation libraries.
- +FPBIT = fp-bit.c
- +DPBIT = dp-bit.c
- +
- +LIB1ASMSRC = avr32/lib1funcs.S
- +LIB1ASMFUNCS = _avr32_f64_mul _avr32_f64_mul_fast _avr32_f64_addsub _avr32_f64_addsub_fast _avr32_f64_to_u32 \
- + _avr32_f64_to_s32 _avr32_f64_to_u64 _avr32_f64_to_s64 _avr32_u32_to_f64 \
- + _avr32_s32_to_f64 _avr32_f64_cmp_eq _avr32_f64_cmp_ge _avr32_f64_cmp_lt \
- + _avr32_f32_cmp_eq _avr32_f32_cmp_ge _avr32_f32_cmp_lt _avr32_f64_div _avr32_f64_div_fast \
- + _avr32_f32_div _avr32_f32_div_fast _avr32_f32_addsub _avr32_f32_addsub_fast \
- + _avr32_f32_mul _avr32_s32_to_f32 _avr32_u32_to_f32 _avr32_f32_to_s32 \
- + _avr32_f32_to_u32 _avr32_f32_to_f64 _avr32_f64_to_f32 _mulsi3
- +
- +#LIB2FUNCS_EXTRA += $(srcdir)/config/avr32/lib2funcs.S
- +
- +MULTILIB_OPTIONS = march=ap/march=ucr1/march=ucr2/march=ucr2nomul/march=ucr3/march=ucr3fp
- +MULTILIB_DIRNAMES = ap ucr1 ucr2 ucr2nomul ucr3 ucr3fp
- +MULTILIB_EXCEPTIONS =
- +MULTILIB_MATCHES += march?ap=mpart?ap7000
- +MULTILIB_MATCHES += march?ap=mpart?ap7001
- +MULTILIB_MATCHES += march?ap=mpart?ap7002
- +MULTILIB_MATCHES += march?ap=mpart?ap7200
- +MULTILIB_MATCHES += march?ucr1=march?uc
- +MULTILIB_MATCHES += march?ucr1=mpart?uc3a0512es
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a0128
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a0256
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a0512
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a1128
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a1256
- +MULTILIB_MATCHES += march?ucr1=mpart?uc3a1512es
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a1512
- +MULTILIB_MATCHES += march?ucr2nomul=mpart?uc3a3revd
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a364
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a364s
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a3128
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a3128s
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a3256
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a3256s
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a464
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a464s
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a4128
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a4128s
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a4256
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3a4256s
- +MULTILIB_MATCHES += march?ucr1=mpart?uc3b064
- +MULTILIB_MATCHES += march?ucr1=mpart?uc3b0128
- +MULTILIB_MATCHES += march?ucr1=mpart?uc3b0256es
- +MULTILIB_MATCHES += march?ucr1=mpart?uc3b0256
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3b0512
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3b0512revc
- +MULTILIB_MATCHES += march?ucr1=mpart?uc3b164
- +MULTILIB_MATCHES += march?ucr1=mpart?uc3b1128
- +MULTILIB_MATCHES += march?ucr1=mpart?uc3b1256es
- +MULTILIB_MATCHES += march?ucr1=mpart?uc3b1256
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3b1512
- +MULTILIB_MATCHES += march?ucr2=mpart?uc3b1512revc
- +MULTILIB_MATCHES += march?ucr3=mpart?uc64d3
- +MULTILIB_MATCHES += march?ucr3=mpart?uc128d3
- +MULTILIB_MATCHES += march?ucr3=mpart?uc64d4
- +MULTILIB_MATCHES += march?ucr3=mpart?uc128d4
- +MULTILIB_MATCHES += march?ucr3=mpart?uc3c0512crevc
- +MULTILIB_MATCHES += march?ucr3=mpart?uc3c1512crevc
- +MULTILIB_MATCHES += march?ucr3=mpart?uc3c2512crevc
- +MULTILIB_MATCHES += march?ucr3=mpart?uc3l0256
- +MULTILIB_MATCHES += march?ucr3=mpart?uc3l0128
- +MULTILIB_MATCHES += march?ucr3=mpart?uc3l064
- +MULTILIB_MATCHES += march?ucr3=mpart?uc3l032
- +MULTILIB_MATCHES += march?ucr3=mpart?uc3l016
- +MULTILIB_MATCHES += march?ucr3=mpart?uc3l064revb
- +MULTILIB_MATCHES += march?ucr3=mpart?uc64l3u
- +MULTILIB_MATCHES += march?ucr3=mpart?uc128l3u
- +MULTILIB_MATCHES += march?ucr3=mpart?uc256l3u
- +MULTILIB_MATCHES += march?ucr3=mpart?uc64l4u
- +MULTILIB_MATCHES += march?ucr3=mpart?uc128l4u
- +MULTILIB_MATCHES += march?ucr3=mpart?uc256l4u
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c064c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c0128c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c0256c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c0512c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c164c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c1128c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c1256c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c1512c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c264c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c2128c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c2256c
- +MULTILIB_MATCHES += march?ucr3fp=mpart?uc3c2512c
- +MULTILIB_MATCHES += march?ucr3=mpart?mxt768e
- +
- +
- +EXTRA_MULTILIB_PARTS = crtbegin.o crtbeginS.o crtend.o crtendS.o
- +
- +CRTSTUFF_T_CFLAGS = -mrelax
- +CRTSTUFF_T_CFLAGS_S = -mrelax -fPIC
- +TARGET_LIBGCC2_CFLAGS += -mrelax
- +
- +LIBGCC = stmp-multilib
- +INSTALL_LIBGCC = install-multilib
- +
- +fp-bit.c: $(srcdir)/config/fp-bit.c
- + echo '#define FLOAT' > fp-bit.c
- + cat $(srcdir)/config/fp-bit.c >> fp-bit.c
- +
- +dp-bit.c: $(srcdir)/config/fp-bit.c
- + cat $(srcdir)/config/fp-bit.c > dp-bit.c
- +
- +
- +
- --- /dev/null
- +++ b/gcc/config/avr32/t-elf
- @@ -0,0 +1,16 @@
- +
- +# Assemble startup files.
- +$(T)crti.o: $(srcdir)/config/avr32/crti.asm $(GCC_PASSES)
- + $(GCC_FOR_TARGET) $(CRTSTUFF_CFLAGS) $(CRTSTUFF_T_CFLAGS) $(INCLUDES) \
- + -c -o $(T)crti.o -x assembler-with-cpp $(srcdir)/config/avr32/crti.asm
- +
- +$(T)crtn.o: $(srcdir)/config/avr32/crtn.asm $(GCC_PASSES)
- + $(GCC_FOR_TARGET) $(CRTSTUFF_CFLAGS) $(CRTSTUFF_T_CFLAGS) $(INCLUDES) \
- + -c -o $(T)crtn.o -x assembler-with-cpp $(srcdir)/config/avr32/crtn.asm
- +
- +
- +# Build the libraries for both hard and soft floating point
- +EXTRA_MULTILIB_PARTS = crtbegin.o crtbeginS.o crtend.o crtendS.o crti.o crtn.o
- +
- +LIBGCC = stmp-multilib
- +INSTALL_LIBGCC = install-multilib
- --- /dev/null
- +++ b/gcc/config/avr32/uc3fpu.md
- @@ -0,0 +1,199 @@
- +;; AVR32 machine description file for Floating-Point instructions.
- +;; Copyright 2003-2006 Atmel Corporation.
- +;;
- +;;
- +;; This file is part of GCC.
- +;;
- +;; This program is free software; you can redistribute it and/or modify
- +;; it under the terms of the GNU General Public License as published by
- +;; the Free Software Foundation; either version 2 of the License, or
- +;; (at your option) any later version.
- +;;
- +;; This program is distributed in the hope that it will be useful,
- +;; but WITHOUT ANY WARRANTY; without even the implied warranty of
- +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- +;; GNU General Public License for more details.
- +;;
- +;; You should have received a copy of the GNU General Public License
- +;; along with this program; if not, write to the Free Software
- +;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- +
- +(define_insn "*movsf_uc3fp"
- + [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,r,m")
- + (match_operand:SF 1 "general_operand" "r,G,m,r"))]
- + "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
- + "@
- + mov\t%0, %1
- + mov\t%0, %1
- + ld.w\t%0, %1
- + st.w\t%0, %1"
- + [(set_attr "length" "2,4,4,4")
- + (set_attr "type" "alu,alu,load,store")])
- +
- +(define_insn "mulsf3"
- + [(set (match_operand:SF 0 "register_operand" "=r")
- + (mult:SF (match_operand:SF 1 "register_operand" "r")
- + (match_operand:SF 2 "register_operand" "r")))]
- + "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
- + "fmul.s\t%0, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "type" "fmul")])
- +
- +(define_insn "nmulsf3"
- + [(set (match_operand:SF 0 "register_operand" "=r")
- + (neg:SF (mult:SF (match_operand:SF 1 "register_operand" "%r")
- + (match_operand:SF 2 "register_operand" "r"))))]
- + "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
- + "fnmul.s\t%0, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "type" "fmul")])
- +
- +(define_insn "macsf3"
- + [(set (match_operand:SF 0 "register_operand" "=r")
- + (plus:SF (mult:SF (match_operand:SF 1 "register_operand" "r")
- + (match_operand:SF 2 "register_operand" "r"))
- + (match_operand:SF 3 "register_operand" "r")))]
- + "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
- + "fmac.s\t%0, %3, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "type" "fmul")])
- +
- +;(define_insn "nmacsf3"
- +; [(set (match_operand:SF 0 "register_operand" "=r")
- +; (plus:SF (neg:SF (match_operand:SF 1 "register_operand" "r"))
- +; (mult:SF(match_operand:SF 2 "register_operand" "r")
- +; (match_operand:SF 3 "register_operand" "r"))))]
- +; "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
- +; "fnmac.s\t%0, %1, %2, %3"
- +; [(set_attr "length" "4")
- +; (set_attr "type" "fmul")])
- +
- +(define_insn "nmacsf3"
- + [(set (match_operand:SF 0 "register_operand" "=r")
- + (minus:SF (mult:SF (match_operand:SF 2 "register_operand" "r")
- + (match_operand:SF 3 "register_operand" "r"))
- + (match_operand:SF 1 "register_operand" "r")))]
- + "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
- + "fnmac.s\t%0, %1, %2, %3"
- + [(set_attr "length" "4")
- + (set_attr "type" "fmul")])
- +
- +(define_insn "msubacsf3"
- + [(set (match_operand:SF 0 "register_operand" "=r")
- + (minus:SF (match_operand:SF 3 "register_operand" "r")
- + (mult:SF (match_operand:SF 1 "register_operand" "r")
- + (match_operand:SF 2 "register_operand" "r"))))]
- + "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
- + "fmsc.s\t%0, %3, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "type" "fmul")])
- +
- +(define_insn "nmsubacsf3"
- + [(set (match_operand:SF 0 "register_operand" "=r")
- + (minus:SF (neg:SF (mult:SF (match_operand:SF 1 "register_operand" "r")
- + (match_operand:SF 2 "register_operand" "r")))
- + (match_operand:SF 3 "register_operand" "r")))]
- + "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
- + "fnmsc.s\t%0, %3, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "type" "fmul")])
- +
- +(define_insn "addsf3"
- + [(set (match_operand:SF 0 "register_operand" "=r")
- + (plus:SF (match_operand:SF 1 "register_operand" "%r")
- + (match_operand:SF 2 "register_operand" "r")))]
- + "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
- + "fadd.s\t%0, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "type" "fmul")])
- +
- +(define_insn "subsf3"
- + [(set (match_operand:SF 0 "register_operand" "=r")
- + (minus:SF (match_operand:SF 1 "register_operand" "r")
- + (match_operand:SF 2 "register_operand" "r")))]
- + "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
- + "fsub.s\t%0, %1, %2"
- + [(set_attr "length" "4")
- + (set_attr "type" "fmul")])
- +
- +(define_insn "fixuns_truncsfsi2"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (unsigned_fix:SI (match_operand:SF 1 "register_operand" "r")))]
- + "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
- + "fcastrs.uw\t%0, %1"
- + [(set_attr "length" "4")])
- +
- +(define_insn "fix_truncsfsi2"
- + [(set (match_operand:SI 0 "register_operand" "=r")
- + (fix:SI (match_operand:SF 1 "register_operand" "r")))]
- + "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
- + "fcastrs.sw\t%0, %1"
- + [(set_attr "length" "4")])
- +
- +(define_insn "floatunssisf2"
- + [(set (match_operand:SF 0 "register_operand" "=r")
- + (unsigned_float:SF (match_operand:SI 1 "register_operand" "r")))]
- + "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
- + "fcastuw.s\t%0, %1"
- + [(set_attr "length" "4")])
- +
- +(define_insn "floatsisf2"
- + [(set (match_operand:SF 0 "register_operand" "=r")
- + (float:SF (match_operand:SI 1 "register_operand" "r")))]
- + "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
- + "fcastsw.s\t%0, %1"
- + [(set_attr "length" "4")])
- +
- +(define_insn "cmpsf_internal_uc3fp"
- + [(set (cc0)
- + (compare:CC
- + (match_operand:SF 0 "register_operand" "r")
- + (match_operand:SF 1 "register_operand" "r")))]
- + "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
- + {
- + avr32_branch_type = CMP_SF;
- + if (!rtx_equal_p(cc_prev_status.mdep.value, SET_SRC(PATTERN (insn))) )
- + return "fcmp.s\t%0, %1";
- + return "";
- + }
- + [(set_attr "length" "4")
- + (set_attr "cc" "compare")])
- +
- +(define_expand "divsf3"
- + [(set (match_operand:SF 0 "register_operand" "=r")
- + (div:SF (match_operand:SF 1 "register_operand" "r")
- + (match_operand:SF 2 "register_operand" "r")))]
- + "TARGET_ARCH_FPU && TARGET_HARD_FLOAT && flag_unsafe_math_optimizations"
- + "{
- + emit_insn(gen_frcpa_internal(operands[0],operands[2]));
- + emit_insn(gen_mulsf3(operands[0],operands[0],operands[1]));
- + DONE;
- + }"
- +)
- +
- +(define_insn "frcpa_internal"
- + [(set (match_operand:SF 0 "register_operand" "=r")
- + (unspec:SF [(match_operand:SF 1 "register_operand" "r")] UNSPEC_FRCPA))]
- + "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
- + "frcpa.s %0,%1"
- + [(set_attr "length" "4")])
- +
- +(define_expand "sqrtsf2"
- + [(set (match_operand:SF 0 "register_operand" "")
- + (sqrt:SF (match_operand:SF 1 "register_operand" "")))]
- + "TARGET_ARCH_FPU && TARGET_HARD_FLOAT && flag_unsafe_math_optimizations"
- + "
- +{
- + rtx scratch = gen_reg_rtx (SFmode);
- + emit_insn (gen_rsqrtsf2 (scratch, operands[1], CONST1_RTX (SFmode)));
- + emit_insn (gen_divsf3(operands[0], force_reg (SFmode, CONST1_RTX (SFmode)),
- + scratch));
- + DONE;
- +}")
- +
- +(define_insn "rsqrtsf2"
- + [(set (match_operand:SF 0 "register_operand" "=r")
- + (div:SF (match_operand:SF 2 "const_1f_operand" "F")
- + (sqrt:SF (match_operand:SF 1 "register_operand" "?r"))))]
- + "TARGET_ARCH_FPU && TARGET_HARD_FLOAT"
- + "frsqrta.s %1, %0")
- --- /dev/null
- +++ b/gcc/config/avr32/uclinux-elf.h
- @@ -0,0 +1,20 @@
- +
- +/* Run-time Target Specification. */
- +#undef TARGET_VERSION
- +#define TARGET_VERSION fputs (" (AVR32 uClinux with ELF)", stderr)
- +
- +/* We don't want a .jcr section on uClinux. As if this makes a difference... */
- +#define TARGET_USE_JCR_SECTION 0
- +
- +/* Here we go. Drop the crtbegin/crtend stuff completely. */
- +#undef STARTFILE_SPEC
- +#define STARTFILE_SPEC \
- + "%{!shared: %{pg:gcrt1.o%s} %{!pg:%{p:gcrt1.o%s}" \
- + " %{!p:%{profile:gcrt1.o%s}" \
- + " %{!profile:crt1.o%s}}}} crti.o%s"
- +
- +#undef ENDFILE_SPEC
- +#define ENDFILE_SPEC "crtn.o%s"
- +
- +#undef TARGET_DEFAULT
- +#define TARGET_DEFAULT (AVR32_FLAG_NO_INIT_GOT)
- --- a/gcc/config/host-linux.c
- +++ b/gcc/config/host-linux.c
- @@ -25,6 +25,9 @@
- #include "hosthooks.h"
- #include "hosthooks-def.h"
-
- +#ifndef SSIZE_MAX
- +#define SSIZE_MAX LONG_MAX
- +#endif
-
- /* Linux has a feature called exec-shield-randomize that perturbs the
- address of non-fixed mapped segments by a (relatively) small amount.
- --- a/gcc/config.gcc
- +++ b/gcc/config.gcc
- @@ -810,6 +810,24 @@ avr-*-rtems*)
- avr-*-*)
- tm_file="avr/avr.h dbxelf.h"
- ;;
- +avr32*-*-linux*)
- + tm_file="dbxelf.h elfos.h linux.h avr32/linux-elf.h avr32/avr32.h "
- + tmake_file="t-linux avr32/t-avr32-linux"
- + extra_parts="crtbegin.o crtbeginS.o crtend.o crtendS.o"
- + extra_modes=avr32/avr32-modes.def
- + gnu_ld=yes
- + ;;
- +avr32*-*-uclinux*)
- + tm_file="dbxelf.h elfos.h linux.h avr32/linux-elf.h avr32/uclinux-elf.h avr32/avr32.h"
- + tmake_file="t-linux avr32/t-avr32-linux"
- + extra_modes=avr32/avr32-modes.def
- + gnu_ld=yes
- + ;;
- +avr32-*-*)
- + tm_file="dbxelf.h elfos.h avr32/avr32.h avr32/avr32-elf.h"
- + tmake_file="avr32/t-avr32 avr32/t-elf"
- + extra_modes=avr32/avr32-modes.def
- + ;;
- bfin*-elf*)
- tm_file="${tm_file} dbxelf.h elfos.h bfin/elf.h"
- tmake_file=bfin/t-bfin-elf
- @@ -2764,6 +2782,32 @@ case "${target}" in
- fi
- ;;
-
- + avr32*-*-*)
- + supported_defaults="part arch"
- +
- + case "$with_part" in
- + "" \
- + | "ap7000" | "ap7010" | "ap7020" | "uc3a0256" | "uc3a0512" | "uc3a1128" | "uc3a1256" | "uc3a1512" )
- + # OK
- + ;;
- + *)
- + echo "Unknown part used in --with-part=$with_part" 1>&2
- + exit 1
- + ;;
- + esac
- +
- + case "$with_arch" in
- + "" \
- + | "ap" | "uc")
- + # OK
- + ;;
- + *)
- + echo "Unknown arch used in --with-arch=$with_arch" 1>&2
- + exit 1
- + ;;
- + esac
- + ;;
- +
- fr*-*-*linux*)
- supported_defaults=cpu
- case "$with_cpu" in
- --- a/gcc/configure.ac
- +++ b/gcc/configure.ac
- @@ -2240,10 +2240,9 @@ L2:],
- as_ver=`$gcc_cv_as --version 2>/dev/null | sed 1q`
- if echo "$as_ver" | grep GNU > /dev/null; then
- changequote(,)dnl
- - as_vers=`echo $as_ver | sed -n \
- - -e 's,^.*[ ]\([0-9][0-9]*\.[0-9][0-9]*.*\)$,\1,p'`
- - as_major=`expr "$as_vers" : '\([0-9]*\)'`
- - as_minor=`expr "$as_vers" : '[0-9]*\.\([0-9]*\)'`
- + as_ver=`echo $as_ver | sed -e 's/GNU assembler\( (GNU Binutils)\)\? \([0-9.][0-9.]*\).*/\2/'`
- + as_major=`echo $as_ver | sed 's/\..*//'`
- + as_minor=`echo $as_ver | sed 's/[^.]*\.\([0-9]*\).*/\1/'`
- changequote([,])dnl
- if test $as_major -eq 2 && test $as_minor -lt 11
- then :
- @@ -3308,7 +3307,7 @@ case "$target" in
- i?86*-*-* | mips*-*-* | alpha*-*-* | powerpc*-*-* | sparc*-*-* | m68*-*-* \
- | x86_64*-*-* | hppa*-*-* | arm*-*-* \
- | xstormy16*-*-* | cris-*-* | crisv32-*-* | xtensa*-*-* | bfin-*-* | score*-*-* \
- - | spu-*-* | fido*-*-* | m32c-*-*)
- + | spu-*-* | fido*-*-* | m32c-*-* | avr32-*-*)
- insn="nop"
- ;;
- ia64*-*-* | s390*-*-*)
- --- a/gcc/doc/extend.texi
- +++ b/gcc/doc/extend.texi
- @@ -2397,7 +2397,7 @@ This attribute is ignored for R8C target
-
- @item interrupt
- @cindex interrupt handler functions
- -Use this attribute on the ARM, AVR, CRX, M32C, M32R/D, m68k,
- +Use this attribute on the ARM, AVR, AVR32, CRX, M32C, M32R/D, m68k,
- and Xstormy16 ports to indicate that the specified function is an
- interrupt handler. The compiler will generate function entry and exit
- sequences suitable for use in an interrupt handler when this attribute
- @@ -2417,6 +2417,15 @@ void f () __attribute__ ((interrupt ("IR
-
- Permissible values for this parameter are: IRQ, FIQ, SWI, ABORT and UNDEF@.
-
- +Note, for the AVR32, you can specify which banking scheme is used for
- +the interrupt mode this interrupt handler is used in like this:
- +
- +@smallexample
- +void f () __attribute__ ((interrupt ("FULL")));
- +@end smallexample
- +
- +Permissible values for this parameter are: FULL, HALF, NONE and UNDEF.
- +
- On ARMv7-M the interrupt type is ignored, and the attribute means the function
- may be called with a word aligned stack pointer.
-
- @@ -4188,6 +4197,23 @@ placed in either the @code{.bss_below100
-
- @end table
-
- +@subsection AVR32 Variable Attributes
- +
- +One attribute is currently defined for AVR32 configurations:
- +@code{rmw_addressable}
- +
- +@table @code
- +@item rmw_addressable
- +@cindex @code{rmw_addressable} attribute
- +
- +This attribute can be used to signal that a variable can be accessed
- +with the addressing mode of the AVR32 Atomic Read-Modify-Write memory
- +instructions and hence make it possible for gcc to generate these
- +instructions without using built-in functions or inline assembly statements.
- +Variables used within the AVR32 Atomic Read-Modify-Write built-in
- +functions will automatically get the @code{rmw_addressable} attribute.
- +@end table
- +
- @subsection AVR Variable Attributes
-
- @table @code
- @@ -7042,6 +7068,7 @@ instructions, but allow the compiler to
- * Alpha Built-in Functions::
- * ARM iWMMXt Built-in Functions::
- * ARM NEON Intrinsics::
- +* AVR32 Built-in Functions::
- * Blackfin Built-in Functions::
- * FR-V Built-in Functions::
- * X86 Built-in Functions::
- @@ -7284,6 +7311,7 @@ long long __builtin_arm_wxor (long long,
- long long __builtin_arm_wzero ()
- @end smallexample
-
- +
- @node ARM NEON Intrinsics
- @subsection ARM NEON Intrinsics
-
- @@ -7292,6 +7320,74 @@ when the @option{-mfpu=neon} switch is u
-
- @include arm-neon-intrinsics.texi
-
- +@node AVR32 Built-in Functions
- +@subsection AVR32 Built-in Functions
- +
- +Built-in functions for atomic memory (RMW) instructions. Note that these
- +built-ins will fail for targets where the RMW instructions are not
- +implemented. Also note that these instructions only that a Ks15 << 2
- +memory address and will therefor not work with any runtime computed
- +memory addresses. The user is responsible for making sure that any
- +pointers used within these functions points to a valid memory address.
- +
- +@smallexample
- +void __builtin_mems(int */*ptr*/, int /*bit*/)
- +void __builtin_memc(int */*ptr*/, int /*bit*/)
- +void __builtin_memt(int */*ptr*/, int /*bit*/)
- +@end smallexample
- +
- +Built-in functions for DSP instructions. Note that these built-ins will
- +fail for targets where the DSP instructions are not implemented.
- +
- +@smallexample
- +int __builtin_sats (int /*Rd*/,int /*sa*/, int /*bn*/)
- +int __builtin_satu (int /*Rd*/,int /*sa*/, int /*bn*/)
- +int __builtin_satrnds (int /*Rd*/,int /*sa*/, int /*bn*/)
- +int __builtin_satrndu (int /*Rd*/,int /*sa*/, int /*bn*/)
- +short __builtin_mulsathh_h (short, short)
- +int __builtin_mulsathh_w (short, short)
- +short __builtin_mulsatrndhh_h (short, short)
- +int __builtin_mulsatrndwh_w (int, short)
- +int __builtin_mulsatwh_w (int, short)
- +int __builtin_macsathh_w (int, short, short)
- +short __builtin_satadd_h (short, short)
- +short __builtin_satsub_h (short, short)
- +int __builtin_satadd_w (int, int)
- +int __builtin_satsub_w (int, int)
- +long long __builtin_mulwh_d(int, short)
- +long long __builtin_mulnwh_d(int, short)
- +long long __builtin_macwh_d(long long, int, short)
- +long long __builtin_machh_d(long long, short, short)
- +@end smallexample
- +
- +Other built-in functions for instructions that cannot easily be
- +generated by the compiler.
- +
- +@smallexample
- +void __builtin_ssrf(int);
- +void __builtin_csrf(int);
- +void __builtin_musfr(int);
- +int __builtin_mustr(void);
- +int __builtin_mfsr(int /*Status Register Address*/)
- +void __builtin_mtsr(int /*Status Register Address*/, int /*Value*/)
- +int __builtin_mfdr(int /*Debug Register Address*/)
- +void __builtin_mtdr(int /*Debug Register Address*/, int /*Value*/)
- +void __builtin_cache(void * /*Address*/, int /*Cache Operation*/)
- +void __builtin_sync(int /*Sync Operation*/)
- +void __builtin_tlbr(void)
- +void __builtin_tlbs(void)
- +void __builtin_tlbw(void)
- +void __builtin_breakpoint(void)
- +int __builtin_xchg(void * /*Address*/, int /*Value*/ )
- +short __builtin_bswap_16(short)
- +int __builtin_bswap_32(int)
- +void __builtin_cop(int/*cpnr*/, int/*crd*/, int/*crx*/, int/*cry*/, int/*op*/)
- +int __builtin_mvcr_w(int/*cpnr*/, int/*crs*/)
- +void __builtin_mvrc_w(int/*cpnr*/, int/*crd*/, int/*value*/)
- +long long __builtin_mvcr_d(int/*cpnr*/, int/*crs*/)
- +void __builtin_mvrc_d(int/*cpnr*/, int/*crd*/, long long/*value*/)
- +@end smallexample
- +
- @node Blackfin Built-in Functions
- @subsection Blackfin Built-in Functions
-
- --- a/gcc/doc/invoke.texi
- +++ b/gcc/doc/invoke.texi
- @@ -195,7 +195,7 @@ in the following sections.
- -fvisibility-ms-compat @gol
- -Wabi -Wctor-dtor-privacy @gol
- -Wnon-virtual-dtor -Wreorder @gol
- --Weffc++ -Wstrict-null-sentinel @gol
- +-Weffc++ -Wno-deprecated @gol
- -Wno-non-template-friend -Wold-style-cast @gol
- -Woverloaded-virtual -Wno-pmf-conversions @gol
- -Wsign-promo}
- @@ -641,6 +641,12 @@ Objective-C and Objective-C++ Dialects}.
- -mauto-incdec -minmax -mlong-calls -mshort @gol
- -msoft-reg-count=@var{count}}
-
- +@emph{AVR32 Options}
- +@gccoptlist{-muse-rodata-section -mhard-float -msoft-float -mrelax @gol
- +-mforce-double-align -mno-init-got -mrelax -mmd-reorg-opt -masm-addr-pseudos @gol
- +-mpart=@var{part} -mcpu=@var{cpu} -march=@var{arch} @gol
- +-mfast-float -mimm-in-const-pool}
- +
- @emph{MCore Options}
- @gccoptlist{-mhardlit -mno-hardlit -mdiv -mno-div -mrelax-immediates @gol
- -mno-relax-immediates -mwide-bitfields -mno-wide-bitfields @gol
- @@ -3256,13 +3262,11 @@ appears in a class without constructors.
- If you want to warn about code which uses the uninitialized value of the
- variable in its own initializer, use the @option{-Winit-self} option.
-
- -These warnings occur for individual uninitialized or clobbered
- -elements of structure, union or array variables as well as for
- -variables which are uninitialized or clobbered as a whole. They do
- -not occur for variables or elements declared @code{volatile}. Because
- -these warnings depend on optimization, the exact variables or elements
- -for which there are warnings will depend on the precise optimization
- -options and version of GCC used.
- +These warnings occur only for variables that are candidates for
- +register allocation. Therefore, they do not occur for a variable that
- +is declared @code{volatile}, or whose address is taken, or whose size
- +is other than 1, 2, 4 or 8 bytes. Also, they do not occur for
- +structures, unions or arrays, even when they are in registers.
-
- Note that there may be no warning about a variable that is used only
- to compute a value that itself is never used, because such
- @@ -7461,10 +7465,6 @@ If number of candidates in the set is sm
- we always try to remove unnecessary ivs from the set during its
- optimization when a new iv is added to the set.
-
- -@item scev-max-expr-size
- -Bound on size of expressions used in the scalar evolutions analyzer.
- -Large expressions slow the analyzer.
- -
- @item omega-max-vars
- The maximum number of variables in an Omega constraint system.
- The default value is 128.
- @@ -8860,6 +8860,7 @@ platform.
- * ARC Options::
- * ARM Options::
- * AVR Options::
- +* AVR32 Options::
- * Blackfin Options::
- * CRIS Options::
- * CRX Options::
- @@ -9348,6 +9349,145 @@ comply to the C standards, but it will p
- size.
- @end table
-
- +@node AVR32 Options
- +@subsection AVR32 Options
- +@cindex AVR32 Options
- +
- +These options are defined for AVR32 implementations:
- +
- +@table @gcctabopt
- +@item -muse-rodata-section
- +@opindex muse-rodata-section
- +Use section @samp{.rodata} for read-only data instead of @samp{.text}.
- +
- +@item -mhard-float
- +@opindex mhard-float
- +Use floating point coprocessor instructions.
- +
- +@item -msoft-float
- +@opindex msoft-float
- +Use software floating-point library for floating-point operations.
- +
- +@item -mforce-double-align
- +@opindex mforce-double-align
- +Force double-word alignment for double-word memory accesses.
- +
- +@item -masm-addr-pseudos
- +@opindex masm-addr-pseudos
- +Use assembler pseudo-instructions lda.w and call for handling direct
- +addresses. (Enabled by default)
- +
- +@item -mno-init-got
- +@opindex mno-init-got
- +Do not initialize the GOT register before using it when compiling PIC
- +code.
- +
- +@item -mrelax
- +@opindex mrelax
- +Let invoked assembler and linker do relaxing
- +(Enabled by default when optimization level is >1).
- +This means that when the address of symbols are known at link time,
- +the linker can optimize @samp{icall} and @samp{mcall}
- +instructions into a @samp{rcall} instruction if possible.
- +Loading the address of a symbol can also be optimized.
- +
- +@item -mmd-reorg-opt
- +@opindex mmd-reorg-opt
- +Perform machine dependent optimizations in reorg stage.
- +
- +@item -mpart=@var{part}
- +@opindex mpart
- +Generate code for the specified part. Permissible parts are:
- +@samp{ap7000},
- +@samp{ap7001},
- +@samp{ap7002},
- +@samp{ap7200},
- +@samp{uc3a0128},
- +@samp{uc3a0256},
- +@samp{uc3a0512},
- +@samp{uc3a0512es},
- +@samp{uc3a1128},
- +@samp{uc3a1256},
- +@samp{uc3a1512},
- +@samp{uc3a1512es},
- +@samp{uc3a3revd},
- +@samp{uc3a364},
- +@samp{uc3a364s},
- +@samp{uc3a3128},
- +@samp{uc3a3128s},
- +@samp{uc3a3256},
- +@samp{uc3a3256s},
- +@samp{uc3a464},
- +@samp{uc3a464s},
- +@samp{uc3a4128},
- +@samp{uc3a4128s},
- +@samp{uc3a4256},
- +@samp{uc3a4256s},
- +@samp{uc3b064},
- +@samp{uc3b0128},
- +@samp{uc3b0256},
- +@samp{uc3b0256es},
- +@samp{uc3b0512},
- +@samp{uc3b0512revc},
- +@samp{uc3b164},
- +@samp{uc3b1128},
- +@samp{uc3b1256},
- +@samp{uc3b1256es},
- +@samp{uc3b1512},
- +@samp{uc3b1512revc}
- +@samp{uc64d3},
- +@samp{uc128d3},
- +@samp{uc64d4},
- +@samp{uc128d4},
- +@samp{uc3c0512crevc},
- +@samp{uc3c1512crevc},
- +@samp{uc3c2512crevc},
- +@samp{uc3l0256},
- +@samp{uc3l0128},
- +@samp{uc3l064},
- +@samp{uc3l032},
- +@samp{uc3l016},
- +@samp{uc3l064revb},
- +@samp{uc64l3u},
- +@samp{uc128l3u},
- +@samp{uc256l3u},
- +@samp{uc64l4u},
- +@samp{uc128l4u},
- +@samp{uc256l4u},
- +@samp{uc3c064c},
- +@samp{uc3c0128c},
- +@samp{uc3c0256c},
- +@samp{uc3c0512c},
- +@samp{uc3c164c},
- +@samp{uc3c1128c},
- +@samp{uc3c1256c},
- +@samp{uc3c1512c},
- +@samp{uc3c264c},
- +@samp{uc3c2128c},
- +@samp{uc3c2256c},
- +@samp{uc3c2512c},
- +@samp{mxt768e}.
- +
- +@item -mcpu=@var{cpu-type}
- +@opindex mcpu
- +Same as -mpart. Obsolete.
- +
- +@item -march=@var{arch}
- +@opindex march
- +Generate code for the specified architecture. Permissible architectures are:
- +@samp{ap}, @samp{uc} and @samp{ucr2}.
- +
- +@item -mfast-float
- +@opindex mfast-float
- +Enable fast floating-point library that does not conform to IEEE-754 but is still good enough
- +for most applications. The fast floating-point library does not round to the nearest even
- +but away from zero. Enabled by default if the -funsafe-math-optimizations switch is specified.
- +
- +@item -mimm-in-const-pool
- +@opindex mimm-in-const-pool
- +Put large immediates in constant pool. This is enabled by default for archs with insn-cache.
- +@end table
- +
- @node Blackfin Options
- @subsection Blackfin Options
- @cindex Blackfin Options
- @@ -9403,29 +9543,12 @@ When enabled, the compiler will ensure t
- contain speculative loads after jump instructions. If this option is used,
- @code{__WORKAROUND_SPECULATIVE_LOADS} is defined.
-
- -@item -mno-specld-anomaly
- -@opindex mno-specld-anomaly
- -Don't generate extra code to prevent speculative loads from occurring.
- -
- @item -mcsync-anomaly
- @opindex mcsync-anomaly
- When enabled, the compiler will ensure that the generated code does not
- contain CSYNC or SSYNC instructions too soon after conditional branches.
- If this option is used, @code{__WORKAROUND_SPECULATIVE_SYNCS} is defined.
-
- -@item -mno-csync-anomaly
- -@opindex mno-csync-anomaly
- -Don't generate extra code to prevent CSYNC or SSYNC instructions from
- -occurring too soon after a conditional branch.
- -
- -@item -mlow-64k
- -@opindex mlow-64k
- -When enabled, the compiler is free to take advantage of the knowledge that
- -the entire program fits into the low 64k of memory.
- -
- -@item -mno-low-64k
- -@opindex mno-low-64k
- -Assume that the program is arbitrarily large. This is the default.
-
- @item -mstack-check-l1
- @opindex mstack-check-l1
- @@ -9439,11 +9562,6 @@ This allows for execute in place and sha
- without virtual memory management. This option implies @option{-fPIC}.
- With a @samp{bfin-elf} target, this option implies @option{-msim}.
-
- -@item -mno-id-shared-library
- -@opindex mno-id-shared-library
- -Generate code that doesn't assume ID based shared libraries are being used.
- -This is the default.
- -
- @item -mleaf-id-shared-library
- @opindex mleaf-id-shared-library
- Generate code that supports shared libraries via the library ID method,
- @@ -9485,11 +9603,6 @@ call on this register. This switch is n
- will lie outside of the 24 bit addressing range of the offset based
- version of subroutine call instruction.
-
- -This feature is not enabled by default. Specifying
- -@option{-mno-long-calls} will restore the default behavior. Note these
- -switches have no effect on how the compiler generates code to handle
- -function calls via function pointers.
- -
- @item -mfast-fp
- @opindex mfast-fp
- Link with the fast floating-point library. This library relaxes some of
- --- a/gcc/doc/md.texi
- +++ b/gcc/doc/md.texi
- @@ -4,6 +4,7 @@
- @c This is part of the GCC manual.
- @c For copying conditions, see the file gcc.texi.
-
- +
- @ifset INTERNALS
- @node Machine Desc
- @chapter Machine Descriptions
- @@ -1685,6 +1686,58 @@ A memory reference suitable for iWMMXt l
- A memory reference suitable for the ARMv4 ldrsb instruction.
- @end table
-
- +@item AVR32 family---@file{avr32.h}
- +@table @code
- +@item f
- +Floating-point registers (f0 to f15)
- +
- +@item Ku@var{bits}
- +Unsigned constant representable with @var{bits} number of bits (Must be
- +two digits). I.e: An unsigned 8-bit constant is written as @samp{Ku08}
- +
- +@item Ks@var{bits}
- +Signed constant representable with @var{bits} number of bits (Must be
- +two digits). I.e: A signed 12-bit constant is written as @samp{Ks12}
- +
- +@item Is@var{bits}
- +The negated range of a signed constant representable with @var{bits}
- +number of bits. The same as @samp{Ks@var{bits}} with a negated range.
- +This means that the constant must be in the range @math{-2^{bits-1}-1} to @math{2^{bits-1}}
- +
- +@item G
- +A single/double precision floating-point immediate or 64-bit integer
- +immediate where the least and most significant words both can be
- +loaded with a move instruction. That is the the integer form of the
- +values in the least and most significant words both are in the range
- +@math{-2^{20}} to @math{2^{20}-1}.
- +
- +@item RKs@var{bits}
- +A memory reference where the address consists of a base register
- +plus a signed immediate displacement with range given by @samp{Ks@var{bits}}
- +which has the same format as for the signed immediate integer constraint
- +given above.
- +
- +@item RKu@var{bits}
- +A memory reference where the address consists of a base register
- +plus an unsigned immediate displacement with range given by @samp{Ku@var{bits}}
- +which has the same format as for the unsigned immediate integer constraint
- +given above.
- +
- +@item S
- +A memory reference with an immediate or register offset
- +
- +@item T
- +A memory reference to a constant pool entry
- +
- +@item W
- +A valid operand for use in the @samp{lda.w} instruction macro when
- +relaxing is enabled
- +
- +@item Z
- +A memory reference valid for coprocessor memory instructions
- +
- +@end table
- +
- @item AVR family---@file{config/avr/constraints.md}
- @table @code
- @item l
- --- a/gcc/expmed.c
- +++ b/gcc/expmed.c
- @@ -472,9 +472,9 @@ store_bit_field_1 (rtx str_rtx, unsigned
- ? ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
- || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
- && byte_offset % GET_MODE_SIZE (fieldmode) == 0)
- - : (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
- + : ( (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
- || (offset * BITS_PER_UNIT % bitsize == 0
- - && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0))))
- + && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0)))))
- {
- if (MEM_P (op0))
- op0 = adjust_address (op0, fieldmode, offset);
- --- a/gcc/expr.c
- +++ b/gcc/expr.c
- @@ -52,6 +52,7 @@ along with GCC; see the file COPYING3.
- #include "tree-flow.h"
- #include "target.h"
- #include "timevar.h"
- +#include "c-common.h"
- #include "df.h"
- #include "diagnostic.h"
-
- @@ -3647,16 +3648,17 @@ emit_single_push_insn (enum machine_mode
- }
- else
- {
- + emit_move_insn (stack_pointer_rtx,
- + expand_binop (Pmode,
- #ifdef STACK_GROWS_DOWNWARD
- - /* ??? This seems wrong if STACK_PUSH_CODE == POST_DEC. */
- - dest_addr = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
- - GEN_INT (-(HOST_WIDE_INT) rounded_size));
- + sub_optab,
- #else
- - /* ??? This seems wrong if STACK_PUSH_CODE == POST_INC. */
- - dest_addr = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
- - GEN_INT (rounded_size));
- + add_optab,
- #endif
- - dest_addr = gen_rtx_PRE_MODIFY (Pmode, stack_pointer_rtx, dest_addr);
- + stack_pointer_rtx,
- + GEN_INT (rounded_size),
- + NULL_RTX, 0, OPTAB_LIB_WIDEN));
- + dest_addr = stack_pointer_rtx;
- }
-
- dest = gen_rtx_MEM (mode, dest_addr);
- @@ -5775,7 +5777,8 @@ store_field (rtx target, HOST_WIDE_INT b
- is a bit field, we cannot use addressing to access it.
- Use bit-field techniques or SUBREG to store in it. */
-
- - if (mode == VOIDmode
- + if (
- + mode == VOIDmode
- || (mode != BLKmode && ! direct_store[(int) mode]
- && GET_MODE_CLASS (mode) != MODE_COMPLEX_INT
- && GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT)
- @@ -5932,7 +5935,18 @@ get_inner_reference (tree exp, HOST_WIDE
- {
- tree field = TREE_OPERAND (exp, 1);
- size_tree = DECL_SIZE (field);
- - if (!DECL_BIT_FIELD (field))
- + if (!DECL_BIT_FIELD (field)
- + /* Added for AVR32:
- + Bitfields with a size equal to a target storage
- + type might not cause DECL_BIT_FIELD to return
- + true since it can be optimized into a normal array
- + access operation. But for volatile bitfields we do
- + not allow this when targetm.narrow_volatile_bitfield ()
- + is false. We can use DECL_C_BIT_FIELD to check if this
- + really is a c-bitfield. */
- + && !(TREE_THIS_VOLATILE (exp)
- + && !targetm.narrow_volatile_bitfield ()
- + && DECL_C_BIT_FIELD (field)) )
- mode = DECL_MODE (field);
- else if (DECL_MODE (field) == BLKmode)
- blkmode_bitfield = true;
- @@ -7915,7 +7929,8 @@ expand_expr_real_1 (tree exp, rtx target
- by doing the extract into an object as wide as the field
- (which we know to be the width of a basic mode), then
- storing into memory, and changing the mode to BLKmode. */
- - if (mode1 == VOIDmode
- + if (
- + mode1 == VOIDmode
- || REG_P (op0) || GET_CODE (op0) == SUBREG
- || (mode1 != BLKmode && ! direct_load[(int) mode1]
- && GET_MODE_CLASS (mode) != MODE_COMPLEX_INT
- --- a/gcc/function.c
- +++ b/gcc/function.c
- @@ -2810,7 +2810,11 @@ assign_parm_setup_reg (struct assign_par
- assign_parm_remove_parallels (data);
-
- /* Copy the value into the register. */
- - if (data->nominal_mode != data->passed_mode
- + if ( (data->nominal_mode != data->passed_mode
- + /* Added for AVR32: If passed_mode is equal
- + to promoted nominal mode why should be convert?
- + The conversion should make no difference. */
- + && data->passed_mode != promoted_nominal_mode)
- || promoted_nominal_mode != data->promoted_mode)
- {
- int save_tree_used;
- --- a/gcc/genemit.c
- +++ b/gcc/genemit.c
- @@ -121,6 +121,24 @@ max_operand_vec (rtx insn, int arg)
- }
-
- static void
- +gen_vararg_prologue(int operands)
- +{
- + int i;
- +
- + if (operands > 1)
- + {
- + for (i = 1; i < operands; i++)
- + printf(" rtx operand%d ATTRIBUTE_UNUSED;\n", i);
- +
- + printf(" va_list args;\n\n");
- + printf(" va_start(args, operand0);\n");
- + for (i = 1; i < operands; i++)
- + printf(" operand%d = va_arg(args, rtx);\n", i);
- + printf(" va_end(args);\n\n");
- + }
- +}
- +
- +static void
- print_code (RTX_CODE code)
- {
- const char *p1;
- @@ -406,18 +424,16 @@ gen_insn (rtx insn, int lineno)
- fatal ("match_dup operand number has no match_operand");
-
- /* Output the function name and argument declarations. */
- - printf ("rtx\ngen_%s (", XSTR (insn, 0));
- + printf ("rtx\ngen_%s ", XSTR (insn, 0));
- +
- if (operands)
- - for (i = 0; i < operands; i++)
- - if (i)
- - printf (",\n\trtx operand%d ATTRIBUTE_UNUSED", i);
- + printf("(rtx operand0 ATTRIBUTE_UNUSED, ...)\n");
- else
- - printf ("rtx operand%d ATTRIBUTE_UNUSED", i);
- - else
- - printf ("void");
- - printf (")\n");
- + printf("(void)\n");
- printf ("{\n");
-
- + gen_vararg_prologue(operands);
- +
- /* Output code to construct and return the rtl for the instruction body. */
-
- if (XVECLEN (insn, 1) == 1)
- @@ -461,16 +477,12 @@ gen_expand (rtx expand)
- operands = max_operand_vec (expand, 1);
-
- /* Output the function name and argument declarations. */
- - printf ("rtx\ngen_%s (", XSTR (expand, 0));
- + printf ("rtx\ngen_%s ", XSTR (expand, 0));
- if (operands)
- - for (i = 0; i < operands; i++)
- - if (i)
- - printf (",\n\trtx operand%d", i);
- - else
- - printf ("rtx operand%d", i);
- + printf("(rtx operand0 ATTRIBUTE_UNUSED, ...)\n");
- else
- - printf ("void");
- - printf (")\n");
- + printf("(void)\n");
- +
- printf ("{\n");
-
- /* If we don't have any C code to write, only one insn is being written,
- @@ -480,6 +492,8 @@ gen_expand (rtx expand)
- && operands > max_dup_opno
- && XVECLEN (expand, 1) == 1)
- {
- + gen_vararg_prologue(operands);
- +
- printf (" return ");
- gen_exp (XVECEXP (expand, 1, 0), DEFINE_EXPAND, NULL);
- printf (";\n}\n\n");
- @@ -493,6 +507,7 @@ gen_expand (rtx expand)
- for (; i <= max_scratch_opno; i++)
- printf (" rtx operand%d ATTRIBUTE_UNUSED;\n", i);
- printf (" rtx _val = 0;\n");
- + gen_vararg_prologue(operands);
- printf (" start_sequence ();\n");
-
- /* The fourth operand of DEFINE_EXPAND is some code to be executed
- --- a/gcc/genflags.c
- +++ b/gcc/genflags.c
- @@ -127,7 +127,6 @@ static void
- gen_proto (rtx insn)
- {
- int num = num_operands (insn);
- - int i;
- const char *name = XSTR (insn, 0);
- int truth = maybe_eval_c_test (XSTR (insn, 2));
-
- @@ -158,12 +157,7 @@ gen_proto (rtx insn)
- if (num == 0)
- fputs ("void", stdout);
- else
- - {
- - for (i = 1; i < num; i++)
- - fputs ("rtx, ", stdout);
- -
- - fputs ("rtx", stdout);
- - }
- + fputs("rtx, ...", stdout);
-
- puts (");");
-
- @@ -173,12 +167,7 @@ gen_proto (rtx insn)
- {
- printf ("static inline rtx\ngen_%s", name);
- if (num > 0)
- - {
- - putchar ('(');
- - for (i = 0; i < num-1; i++)
- - printf ("rtx ARG_UNUSED (%c), ", 'a' + i);
- - printf ("rtx ARG_UNUSED (%c))\n", 'a' + i);
- - }
- + puts("(rtx ARG_UNUSED(a), ...)");
- else
- puts ("(void)");
- puts ("{\n return 0;\n}");
- --- a/gcc/genoutput.c
- +++ b/gcc/genoutput.c
- @@ -386,7 +386,7 @@ output_insn_data (void)
- }
-
- if (d->name && d->name[0] != '*')
- - printf (" (insn_gen_fn) gen_%s,\n", d->name);
- + printf (" gen_%s,\n", d->name);
- else
- printf (" 0,\n");
-
- --- a/gcc/ifcvt.c
- +++ b/gcc/ifcvt.c
- @@ -84,7 +84,7 @@ static int num_possible_if_blocks;
- static int num_updated_if_blocks;
-
- /* # of changes made. */
- -static int num_true_changes;
- +int num_true_changes;
-
- /* Whether conditional execution changes were made. */
- static int cond_exec_changed_p;
- @@ -290,6 +290,9 @@ cond_exec_process_insns (ce_if_block_t *
- if (must_be_last)
- return FALSE;
-
- +#ifdef IFCVT_ALLOW_MODIFY_TEST_IN_INSN
- + if ( !IFCVT_ALLOW_MODIFY_TEST_IN_INSN )
- +#endif
- if (modified_in_p (test, insn))
- {
- if (!mod_ok)
- @@ -570,15 +573,18 @@ cond_exec_process_if_block (ce_if_block_
- IFCVT_MODIFY_FINAL (ce_info);
- #endif
-
- + /* Merge the blocks! */
- + if ( reload_completed ){
- /* Conversion succeeded. */
- if (dump_file)
- fprintf (dump_file, "%d insn%s converted to conditional execution.\n",
- n_insns, (n_insns == 1) ? " was" : "s were");
-
- - /* Merge the blocks! */
- merge_if_block (ce_info);
- cond_exec_changed_p = TRUE;
- return TRUE;
- + }
- + return FALSE;
-
- fail:
- #ifdef IFCVT_MODIFY_CANCEL
- @@ -1087,7 +1093,11 @@ noce_try_addcc (struct noce_if_info *if_
- != UNKNOWN))
- {
- rtx cond = if_info->cond;
- - enum rtx_code code = reversed_comparison_code (cond, if_info->jump);
- + /* This generates wrong code for AVR32. The cond code need not be reversed
- + since the addmodecc patterns add if the condition is NOT met. */
- + /* enum rtx_code code = reversed_comparison_code (cond, if_info->jump);*/
- + enum rtx_code code = GET_CODE(cond);
- +
-
- /* First try to use addcc pattern. */
- if (general_operand (XEXP (cond, 0), VOIDmode)
- @@ -3039,7 +3049,12 @@ find_if_header (basic_block test_bb, int
- && noce_find_if_block (test_bb, then_edge, else_edge, pass))
- goto success;
-
- - if (HAVE_conditional_execution && reload_completed
- + if (HAVE_conditional_execution &&
- +#ifdef IFCVT_COND_EXEC_BEFORE_RELOAD
- + (reload_completed || IFCVT_COND_EXEC_BEFORE_RELOAD)
- +#else
- + reload_completed
- +#endif
- && cond_exec_find_if_block (&ce_info))
- goto success;
-
- @@ -3154,7 +3169,11 @@ cond_exec_find_if_block (struct ce_if_bl
-
- /* We only ever should get here after reload,
- and only if we have conditional execution. */
- +#ifdef IFCVT_COND_EXEC_BEFORE_RELOAD
- + gcc_assert (HAVE_conditional_execution && (reload_completed||IFCVT_COND_EXEC_BEFORE_RELOAD));
- +#else
- gcc_assert (HAVE_conditional_execution && reload_completed);
- +#endif
-
- /* Discover if any fall through predecessors of the current test basic block
- were && tests (which jump to the else block) or || tests (which jump to
- @@ -4259,6 +4278,14 @@ gate_handle_if_after_reload (void)
- static unsigned int
- rest_of_handle_if_after_reload (void)
- {
- + /* Hack for the AVR32 experimental ifcvt processing before reload.
- + The AVR32 specific ifcvt code needs to know when ifcvt after reload
- + has begun. */
- +#ifdef IFCVT_COND_EXEC_BEFORE_RELOAD
- + if ( IFCVT_COND_EXEC_BEFORE_RELOAD )
- + cfun->machine->ifcvt_after_reload = 1;
- +#endif
- +
- if_convert ();
- return 0;
- }
- --- a/gcc/longlong.h
- +++ b/gcc/longlong.h
- @@ -250,6 +250,41 @@ UDItype __umulsidi3 (USItype, USItype);
- #define COUNT_LEADING_ZEROS_0 32
- #endif
-
- +#if defined (__avr32__) && W_TYPE_SIZE == 32
- +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
- + __asm__ ("add\t%1, %4, %5\n\tadc\t%0, %2, %3" \
- + : "=r" ((USItype) (sh)), \
- + "=&r" ((USItype) (sl)) \
- + : "r" ((USItype) (ah)), \
- + "r" ((USItype) (bh)), \
- + "r" ((USItype) (al)), \
- + "r" ((USItype) (bl)) __CLOBBER_CC)
- +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
- + __asm__ ("sub\t%1, %4, %5\n\tsbc\t%0, %2, %3" \
- + : "=r" ((USItype) (sh)), \
- + "=&r" ((USItype) (sl)) \
- + : "r" ((USItype) (ah)), \
- + "r" ((USItype) (bh)), \
- + "r" ((USItype) (al)), \
- + "r" ((USItype) (bl)) __CLOBBER_CC)
- +
- +#if !defined (__AVR32_NO_MUL__)
- +#define __umulsidi3(a,b) ((UDItype)(a) * (UDItype)(b))
- +
- +#define umul_ppmm(w1, w0, u, v) \
- +{ \
- + DWunion __w; \
- + __w.ll = __umulsidi3 (u, v); \
- + w1 = __w.s.high; \
- + w0 = __w.s.low; \
- +}
- +#endif
- +
- +#define count_leading_zeros(COUNT,X) ((COUNT) = __builtin_clz (X))
- +#define count_trailing_zeros(COUNT,X) ((COUNT) = __builtin_ctz (X))
- +#define COUNT_LEADING_ZEROS_0 32
- +#endif
- +
- #if defined (__CRIS__) && __CRIS_arch_version >= 3
- #define count_leading_zeros(COUNT, X) ((COUNT) = __builtin_clz (X))
- #if __CRIS_arch_version >= 8
- --- a/gcc/optabs.h
- +++ b/gcc/optabs.h
- @@ -603,7 +603,7 @@ extern enum insn_code reload_out_optab[N
- extern optab code_to_optab[NUM_RTX_CODE + 1];
-
-
- -typedef rtx (*rtxfun) (rtx);
- +typedef rtx (*rtxfun) (rtx, ...);
-
- /* Indexed by the rtx-code for a conditional (e.g. EQ, LT,...)
- gives the gen_function to make a branch to test that condition. */
- --- a/gcc/regrename.c
- +++ b/gcc/regrename.c
- @@ -1582,6 +1582,9 @@ copyprop_hardreg_forward_1 (basic_block
- bool changed = false;
- rtx insn;
-
- + rtx prev_pred_test;
- + int prev_pred_insn_skipped = 0;
- +
- for (insn = BB_HEAD (bb); ; insn = NEXT_INSN (insn))
- {
- int n_ops, i, alt, predicated;
- @@ -1621,6 +1624,58 @@ copyprop_hardreg_forward_1 (basic_block
- recog_data.operand_type[i] = OP_INOUT;
- }
-
- +
- + /* Added for targets (AVR32) which supports test operands to be modified
- + in cond_exec instruction. For these targets we cannot make a change to
- + the test operands if one of the test operands is an output operand This beacuse
- + changing the test operands might cause the need for inserting a new test
- + insns in the middle of a sequence of cond_exec insns and if the test operands
- + are modified these tests will fail.
- + */
- + if ( IFCVT_ALLOW_MODIFY_TEST_IN_INSN
- + && predicated )
- + {
- + int insn_skipped = 0;
- + rtx test = COND_EXEC_TEST (PATTERN (insn));
- +
- + /* Check if the previous insn was a skipped predicated insn with the same
- + test as this predicated insns. If so we cannot do any modification to
- + this insn either since we cannot emit the test insn because the operands
- + are clobbered. */
- + if ( prev_pred_insn_skipped
- + && (rtx_equal_p (test, prev_pred_test)
- + || rtx_equal_p (test, reversed_condition (prev_pred_test))) )
- + {
- + insn_skipped = 1;
- + }
- + else
- + {
- + /* Check if the output operand is used in the test expression. */
- + for (i = 0; i < n_ops; ++i)
- + if ( recog_data.operand_type[i] == OP_INOUT
- + && reg_mentioned_p (recog_data.operand[i], test) )
- + {
- + insn_skipped = 1;
- + break;
- + }
- +
- + }
- +
- + prev_pred_test = test;
- + prev_pred_insn_skipped = insn_skipped;
- + if ( insn_skipped )
- + {
- + if (insn == BB_END (bb))
- + break;
- + else
- + continue;
- + }
- + }
- + else
- + {
- + prev_pred_insn_skipped = 0;
- + }
- +
- /* For each earlyclobber operand, zap the value data. */
- for (i = 0; i < n_ops; i++)
- if (recog_op_alt[i][alt].earlyclobber)
- --- a/gcc/sched-deps.c
- +++ b/gcc/sched-deps.c
- @@ -1473,7 +1473,14 @@ fixup_sched_groups (rtx insn)
-
- prev_nonnote = prev_nonnote_insn (insn);
- if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
- - && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
- + /* Modification for AVR32 by RP: Why is this here, this will
- + cause instruction to be without any dependencies which might
- + cause it to be moved anywhere. For the AVR32 we try to keep
- + a group of conditionals together even if they are mutual exclusive.
- + */
- + && (! sched_insns_conditions_mutex_p (insn, prev_nonnote)
- + || GET_CODE (PATTERN (insn)) == COND_EXEC )
- + )
- add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
- }
-
- @@ -2230,8 +2237,29 @@ sched_analyze_insn (struct deps *deps, r
-
- if (code == COND_EXEC)
- {
- +#ifdef IFCVT_ALLOW_MODIFY_TEST_IN_INSN
- + if (IFCVT_ALLOW_MODIFY_TEST_IN_INSN)
- + {
- + /* Check if we have a group og conditional instructions with the same test.
- + If so we must make sure that they are not scheduled apart in order to
- + avoid unnecesarry tests and if one of the registers in the test is modified
- + in the instruction this is needed to ensure correct code. */
- + if ( prev_nonnote_insn (insn)
- + && INSN_P (prev_nonnote_insn (insn))
- + && GET_CODE (PATTERN (prev_nonnote_insn (insn))) == COND_EXEC
- + && rtx_equal_p (XEXP(COND_EXEC_TEST (PATTERN (prev_nonnote_insn (insn))), 0), XEXP (COND_EXEC_TEST (x), 0))
- + && rtx_equal_p (XEXP(COND_EXEC_TEST (PATTERN (prev_nonnote_insn (insn))), 1), XEXP (COND_EXEC_TEST (x), 1))
- + && ( GET_CODE (COND_EXEC_TEST (PATTERN (prev_nonnote_insn (insn)))) == GET_CODE (COND_EXEC_TEST (x))
- + || GET_CODE (COND_EXEC_TEST (PATTERN (prev_nonnote_insn (insn)))) == reversed_comparison_code (COND_EXEC_TEST (x), insn)))
- + {
- + SCHED_GROUP_P (insn) = 1;
- + //CANT_MOVE (prev_nonnote_insn (insn)) = 1;
- + }
- + }
- +#endif
- sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
-
- +
- /* ??? Should be recording conditions so we reduce the number of
- false dependencies. */
- x = COND_EXEC_CODE (x);
- --- a/gcc/testsuite/gcc.dg/sibcall-3.c
- +++ b/gcc/testsuite/gcc.dg/sibcall-3.c
- @@ -5,7 +5,7 @@
- Copyright (C) 2002 Free Software Foundation Inc.
- Contributed by Hans-Peter Nilsson <hp@bitrange.com> */
-
- -/* { dg-do run { xfail { { arc-*-* avr-*-* cris-*-* crisv32-*-* h8300-*-* hppa*64*-*-* m32r-*-* m68hc1?-*-* mcore-*-* mn10300-*-* xstormy16-*-* v850*-*-* vax-*-* xtensa*-*-* } || { arm*-*-* && { ! arm32 } } } } } */
- +/* { dg-do run { xfail { { arc-*-* avr-*-* avr32-*-* cris-*-* crisv32-*-* h8300-*-* hppa*64*-*-* m32r-*-* m68hc1?-*-* mcore-*-* mn10300-*-* xstormy16-*-* v850*-*-* vax-*-* xtensa*-*-* } || { arm*-*-* && { ! arm32 } } } } } */
- /* -mlongcall disables sibcall patterns. */
- /* { dg-skip-if "" { powerpc*-*-* } { "-mlongcall" } { "" } } */
- /* { dg-options "-O2 -foptimize-sibling-calls" } */
- --- a/gcc/testsuite/gcc.dg/sibcall-4.c
- +++ b/gcc/testsuite/gcc.dg/sibcall-4.c
- @@ -5,7 +5,7 @@
- Copyright (C) 2002 Free Software Foundation Inc.
- Contributed by Hans-Peter Nilsson <hp@bitrange.com> */
-
- -/* { dg-do run { xfail { { arc-*-* avr-*-* cris-*-* crisv32-*-* h8300-*-* hppa*64*-*-* m32r-*-* m68hc1?-*-* mcore-*-* mn10300-*-* xstormy16-*-* v850*-*-* vax-*-* xtensa*-*-* } || { arm*-*-* && { ! arm32 } } } } } */
- +/* { dg-do run { xfail { { arc-*-* avr-*-* avr32-*-* cris-*-* crisv32-*-* h8300-*-* hppa*64*-*-* m32r-*-* m68hc1?-*-* mcore-*-* mn10300-*-* xstormy16-*-* v850*-*-* vax-*-* xtensa*-*-* } || { arm*-*-* && { ! arm32 } } } } } */
- /* -mlongcall disables sibcall patterns. */
- /* { dg-skip-if "" { powerpc*-*-* } { "-mlongcall" } { "" } } */
- /* { dg-options "-O2 -foptimize-sibling-calls" } */
- --- a/gcc/testsuite/gcc.dg/trampoline-1.c
- +++ b/gcc/testsuite/gcc.dg/trampoline-1.c
- @@ -47,6 +47,8 @@ void foo (void)
-
- int main (void)
- {
- +#ifndef NO_TRAMPOLINES
- foo ();
- +#endif
- return 0;
- }
- --- a/libgcc/config.host
- +++ b/libgcc/config.host
- @@ -218,6 +218,13 @@ arm*-wince-pe*)
- ;;
- arm-*-pe*)
- ;;
- +avr32-*-linux*)
- + # No need to build crtbeginT.o on uClibc systems. Should probably be
- + # moved to the OS specific section above.
- + extra_parts="crtbegin.o crtbeginS.o crtend.o crtendS.o"
- + ;;
- +avr32-*-*)
- + ;;
- avr-*-rtems*)
- ;;
- avr-*-*)
- --- a/libstdc++-v3/config/os/gnu-linux/ctype_base.h
- +++ b/libstdc++-v3/config/os/gnu-linux/ctype_base.h
- @@ -26,6 +26,8 @@
- //
- // ISO C++ 14882: 22.1 Locales
- //
- +#include <features.h>
- +#include <ctype.h>
-
- /** @file ctype_base.h
- * This is an internal header file, included by other library headers.
- @@ -40,7 +42,11 @@ _GLIBCXX_BEGIN_NAMESPACE(std)
- struct ctype_base
- {
- // Non-standard typedefs.
- +#ifdef __UCLIBC__
- + typedef const __ctype_touplow_t* __to_type;
- +#else
- typedef const int* __to_type;
- +#endif
-
- // NB: Offsets into ctype<char>::_M_table force a particular size
- // on the mask type. Because of this, we don't use an enum.
- --- a/libstdc++-v3/include/Makefile.in
- +++ b/libstdc++-v3/include/Makefile.in
- @@ -36,6 +36,7 @@ POST_UNINSTALL = :
- build_triplet = @build@
- host_triplet = @host@
- target_triplet = @target@
- +LIBOBJDIR =
- DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \
- $(top_srcdir)/fragment.am
- subdir = include
- --- a/libstdc++-v3/libsupc++/Makefile.in
- +++ b/libstdc++-v3/libsupc++/Makefile.in
- @@ -38,6 +38,7 @@ POST_UNINSTALL = :
- build_triplet = @build@
- host_triplet = @host@
- target_triplet = @target@
- +LIBOBJDIR =
- DIST_COMMON = $(glibcxxinstall_HEADERS) $(srcdir)/Makefile.am \
- $(srcdir)/Makefile.in $(top_srcdir)/fragment.am
- subdir = libsupc++
- --- a/libstdc++-v3/Makefile.in
- +++ b/libstdc++-v3/Makefile.in
- @@ -36,6 +36,7 @@ POST_UNINSTALL = :
- build_triplet = @build@
- host_triplet = @host@
- target_triplet = @target@
- +LIBOBJDIR =
- DIST_COMMON = $(top_srcdir)/fragment.am $(srcdir)/../config.guess \
- $(srcdir)/../config.sub README ChangeLog $(srcdir)/Makefile.in \
- $(srcdir)/Makefile.am $(top_srcdir)/configure \
- --- a/libstdc++-v3/po/Makefile.in
- +++ b/libstdc++-v3/po/Makefile.in
- @@ -36,6 +36,7 @@ POST_UNINSTALL = :
- build_triplet = @build@
- host_triplet = @host@
- target_triplet = @target@
- +LIBOBJDIR =
- DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \
- $(top_srcdir)/fragment.am
- subdir = po
- --- a/libstdc++-v3/src/Makefile.in
- +++ b/libstdc++-v3/src/Makefile.in
- @@ -37,6 +37,7 @@ POST_UNINSTALL = :
- build_triplet = @build@
- host_triplet = @host@
- target_triplet = @target@
- +LIBOBJDIR =
- DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \
- $(top_srcdir)/fragment.am
- subdir = src
|