revert-sparc.patch 734 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789979097919792979397949795979697979798979998009801980298039804980598069807980898099810981198129813981498159816981798189819982098219822982398249825982698279828982998309831983298339834983598369837983898399840984198429843984498459846984798489849985098519852985398549855985698579858985998609861986298639864986598669867986898699870987198729873987498759876987798789879988098819882988398849885988698879888988998909891989298939894989598969897989898999900990199029903990499059906990799089909991099119912991399149915991699179918991999209921992299239924992599269927992899299930993199329933993499359936993799389939994099419942994399449945994699479948994999509951995299539954995599569957995899599960996199629963996499659966996799689969997099719972997399749975997699779978997999809981998299839984998599869987998899899990999199929993999499959996999799989999100001000110002100031000410005100061000710008100091001010011100121001310014100151001610017100181001910020100211002210023100241002510026100271002810029100301003110032100331003410035100361003710038100391004010041100421004310044100451004610047100481004910050100511005210053100541005510056100571005810059100601006110062100631006410065100661006710068100691007010071100721007310074100751007610077100781007910080100811008210083100841008510086100871008810089100901009110092100931009410095100961009710098100991010010101101021010310104101051010610107101081010910110101111011210113101141011510116101171011810119101201012110122101231012410125101261012710128101291013010131101321013310134101351013610137101381013910140101411014210143101441014510146101471014810149101501015110152101531015410155101561015710158101591016010161101621016310164101651016610167101681016910170101711017210173101741017510176101771017810179101801018110182101831018410185101861018710188101891019010191101921019310194101951019610197101981019910200102011020210203102041020510206102071020810209102101021110212102131021410215102161021710218102191022010221102221022310224102251022610227102281022910230102311023210233102341023510236102371023810239102401024110242102431024410245102461024710248102491025010251102521025310254102551025610257102581025910260102611026210263102641026510266102671026810269102701027110272102731027410275102761027710278102791028010281102821028310284102851028610287102881028910290102911029210293102941029510296102971029810299103001030110302103031030410305103061030710308103091031010311103121031310314103151031610317103181031910320103211032210323103241032510326103271032810329103301033110332103331033410335103361033710338103391034010341103421034310344103451034610347103481034910350103511035210353103541035510356103571035810359103601036110362103631036410365103661036710368103691037010371103721037310374103751037610377103781037910380103811038210383103841038510386103871038810389103901039110392103931039410395103961039710398103991040010401104021040310404104051040610407104081040910410104111041210413104141041510416104171041810419104201042110422104231042410425104261042710428104291043010431104321043310434104351043610437104381043910440104411044210443104441044510446104471044810449104501045110452104531045410455104561045710458104591046010461104621046310464104651046610467104681046910470104711047210473104741047510476104771047810479104801048110482104831048410485104861048710488104891049010491104921049310494104951049610497104981049910500105011050210503105041050510506105071050810509105101051110512105131051410515105161051710518105191052010521105221052310524105251052610527105281052910530105311053210533105341053510536105371053810539105401054110542105431054410545105461054710548105491055010551105521055310554105551055610557105581055910560105611056210563105641056510566105671056810569105701057110572105731057410575105761057710578105791058010581105821058310584105851058610587105881058910590105911059210593105941059510596105971059810599106001060110602106031060410605106061060710608106091061010611106121061310614106151061610617106181061910620106211062210623106241062510626106271062810629106301063110632106331063410635106361063710638106391064010641106421064310644106451064610647106481064910650106511065210653106541065510656106571065810659106601066110662106631066410665106661066710668106691067010671106721067310674106751067610677106781067910680106811068210683106841068510686106871068810689106901069110692106931069410695106961069710698106991070010701107021070310704107051070610707107081070910710107111071210713107141071510716107171071810719107201072110722107231072410725107261072710728107291073010731107321073310734107351073610737107381073910740107411074210743107441074510746107471074810749107501075110752107531075410755107561075710758107591076010761107621076310764107651076610767107681076910770107711077210773107741077510776107771077810779107801078110782107831078410785107861078710788107891079010791107921079310794107951079610797107981079910800108011080210803108041080510806108071080810809108101081110812108131081410815108161081710818108191082010821108221082310824108251082610827108281082910830108311083210833108341083510836108371083810839108401084110842108431084410845108461084710848108491085010851108521085310854108551085610857108581085910860108611086210863108641086510866108671086810869108701087110872108731087410875108761087710878108791088010881108821088310884108851088610887108881088910890108911089210893108941089510896108971089810899109001090110902109031090410905109061090710908109091091010911109121091310914109151091610917109181091910920109211092210923109241092510926109271092810929109301093110932109331093410935109361093710938109391094010941109421094310944109451094610947109481094910950109511095210953109541095510956109571095810959109601096110962109631096410965109661096710968109691097010971109721097310974109751097610977109781097910980109811098210983109841098510986109871098810989109901099110992109931099410995109961099710998109991100011001110021100311004110051100611007110081100911010110111101211013110141101511016110171101811019110201102111022110231102411025110261102711028110291103011031110321103311034110351103611037110381103911040110411104211043110441104511046110471104811049110501105111052110531105411055110561105711058110591106011061110621106311064110651106611067110681106911070110711107211073110741107511076110771107811079110801108111082110831108411085110861108711088110891109011091110921109311094110951109611097110981109911100111011110211103111041110511106111071110811109111101111111112111131111411115111161111711118111191112011121111221112311124111251112611127111281112911130111311113211133111341113511136111371113811139111401114111142111431114411145111461114711148111491115011151111521115311154111551115611157111581115911160111611116211163111641116511166111671116811169111701117111172111731117411175111761117711178111791118011181111821118311184111851118611187111881118911190111911119211193111941119511196111971119811199112001120111202112031120411205112061120711208112091121011211112121121311214112151121611217112181121911220112211122211223112241122511226112271122811229112301123111232112331123411235112361123711238112391124011241112421124311244112451124611247112481124911250112511125211253112541125511256112571125811259112601126111262112631126411265112661126711268112691127011271112721127311274112751127611277112781127911280112811128211283112841128511286112871128811289112901129111292112931129411295112961129711298112991130011301113021130311304113051130611307113081130911310113111131211313113141131511316113171131811319113201132111322113231132411325113261132711328113291133011331113321133311334113351133611337113381133911340113411134211343113441134511346113471134811349113501135111352113531135411355113561135711358113591136011361113621136311364113651136611367113681136911370113711137211373113741137511376113771137811379113801138111382113831138411385113861138711388113891139011391113921139311394113951139611397113981139911400114011140211403114041140511406114071140811409114101141111412114131141411415114161141711418114191142011421114221142311424114251142611427114281142911430114311143211433114341143511436114371143811439114401144111442114431144411445114461144711448114491145011451114521145311454114551145611457114581145911460114611146211463114641146511466114671146811469114701147111472114731147411475114761147711478114791148011481114821148311484114851148611487114881148911490114911149211493114941149511496114971149811499115001150111502115031150411505115061150711508115091151011511115121151311514115151151611517115181151911520115211152211523115241152511526115271152811529115301153111532115331153411535115361153711538115391154011541115421154311544115451154611547115481154911550115511155211553115541155511556115571155811559115601156111562115631156411565115661156711568115691157011571115721157311574115751157611577115781157911580115811158211583115841158511586115871158811589115901159111592115931159411595115961159711598115991160011601116021160311604116051160611607116081160911610116111161211613116141161511616116171161811619116201162111622116231162411625116261162711628116291163011631116321163311634116351163611637116381163911640116411164211643116441164511646116471164811649116501165111652116531165411655116561165711658116591166011661116621166311664116651166611667116681166911670116711167211673116741167511676116771167811679116801168111682116831168411685116861168711688116891169011691116921169311694116951169611697116981169911700117011170211703117041170511706117071170811709117101171111712117131171411715117161171711718117191172011721117221172311724117251172611727117281172911730117311173211733117341173511736117371173811739117401174111742117431174411745117461174711748117491175011751117521175311754117551175611757117581175911760117611176211763117641176511766117671176811769117701177111772117731177411775117761177711778117791178011781117821178311784117851178611787117881178911790117911179211793117941179511796117971179811799118001180111802118031180411805118061180711808118091181011811118121181311814118151181611817118181181911820118211182211823118241182511826118271182811829118301183111832118331183411835118361183711838118391184011841118421184311844118451184611847118481184911850118511185211853118541185511856118571185811859118601186111862118631186411865118661186711868118691187011871118721187311874118751187611877118781187911880118811188211883118841188511886118871188811889118901189111892118931189411895118961189711898118991190011901119021190311904119051190611907119081190911910119111191211913119141191511916119171191811919119201192111922119231192411925119261192711928119291193011931119321193311934119351193611937119381193911940119411194211943119441194511946119471194811949119501195111952119531195411955119561195711958119591196011961119621196311964119651196611967119681196911970119711197211973119741197511976119771197811979119801198111982119831198411985119861198711988119891199011991119921199311994119951199611997119981199912000120011200212003120041200512006120071200812009120101201112012120131201412015120161201712018120191202012021120221202312024120251202612027120281202912030120311203212033120341203512036120371203812039120401204112042120431204412045120461204712048120491205012051120521205312054120551205612057120581205912060120611206212063120641206512066120671206812069120701207112072120731207412075120761207712078120791208012081120821208312084120851208612087120881208912090120911209212093120941209512096120971209812099121001210112102121031210412105121061210712108121091211012111121121211312114121151211612117121181211912120121211212212123121241212512126121271212812129121301213112132121331213412135121361213712138121391214012141121421214312144121451214612147121481214912150121511215212153121541215512156121571215812159121601216112162121631216412165121661216712168121691217012171121721217312174121751217612177121781217912180121811218212183121841218512186121871218812189121901219112192121931219412195121961219712198121991220012201122021220312204122051220612207122081220912210122111221212213122141221512216122171221812219122201222112222122231222412225122261222712228122291223012231122321223312234122351223612237122381223912240122411224212243122441224512246122471224812249122501225112252122531225412255122561225712258122591226012261122621226312264122651226612267122681226912270122711227212273122741227512276122771227812279122801228112282122831228412285122861228712288122891229012291122921229312294122951229612297122981229912300123011230212303123041230512306123071230812309123101231112312123131231412315123161231712318123191232012321123221232312324123251232612327123281232912330123311233212333123341233512336123371233812339123401234112342123431234412345123461234712348123491235012351123521235312354123551235612357123581235912360123611236212363123641236512366123671236812369123701237112372123731237412375123761237712378123791238012381123821238312384123851238612387123881238912390123911239212393123941239512396123971239812399124001240112402124031240412405124061240712408124091241012411124121241312414124151241612417124181241912420124211242212423124241242512426124271242812429124301243112432124331243412435124361243712438124391244012441124421244312444124451244612447124481244912450124511245212453124541245512456124571245812459124601246112462124631246412465124661246712468124691247012471124721247312474124751247612477124781247912480124811248212483124841248512486124871248812489124901249112492124931249412495124961249712498124991250012501125021250312504125051250612507125081250912510125111251212513125141251512516125171251812519125201252112522125231252412525125261252712528125291253012531125321253312534125351253612537125381253912540125411254212543125441254512546125471254812549125501255112552125531255412555125561255712558125591256012561125621256312564125651256612567125681256912570125711257212573125741257512576125771257812579125801258112582125831258412585125861258712588125891259012591125921259312594125951259612597125981259912600126011260212603126041260512606126071260812609126101261112612126131261412615126161261712618126191262012621126221262312624126251262612627126281262912630126311263212633126341263512636126371263812639126401264112642126431264412645126461264712648126491265012651126521265312654126551265612657126581265912660126611266212663126641266512666126671266812669126701267112672126731267412675126761267712678126791268012681126821268312684126851268612687126881268912690126911269212693126941269512696126971269812699127001270112702127031270412705127061270712708127091271012711127121271312714127151271612717127181271912720127211272212723127241272512726127271272812729127301273112732127331273412735127361273712738127391274012741127421274312744127451274612747127481274912750127511275212753127541275512756127571275812759127601276112762127631276412765127661276712768127691277012771127721277312774127751277612777127781277912780127811278212783127841278512786127871278812789127901279112792127931279412795127961279712798127991280012801128021280312804128051280612807128081280912810128111281212813128141281512816128171281812819128201282112822128231282412825128261282712828128291283012831128321283312834128351283612837128381283912840128411284212843128441284512846128471284812849128501285112852128531285412855128561285712858128591286012861128621286312864128651286612867128681286912870128711287212873128741287512876128771287812879128801288112882128831288412885128861288712888128891289012891128921289312894128951289612897128981289912900129011290212903129041290512906129071290812909129101291112912129131291412915129161291712918129191292012921129221292312924129251292612927129281292912930129311293212933129341293512936129371293812939129401294112942129431294412945129461294712948129491295012951129521295312954129551295612957129581295912960129611296212963129641296512966129671296812969129701297112972129731297412975129761297712978129791298012981129821298312984129851298612987129881298912990129911299212993129941299512996129971299812999130001300113002130031300413005130061300713008130091301013011130121301313014130151301613017130181301913020130211302213023130241302513026130271302813029130301303113032130331303413035130361303713038130391304013041130421304313044130451304613047130481304913050130511305213053130541305513056130571305813059130601306113062130631306413065130661306713068130691307013071130721307313074130751307613077130781307913080130811308213083130841308513086130871308813089130901309113092130931309413095130961309713098130991310013101131021310313104131051310613107131081310913110131111311213113131141311513116131171311813119131201312113122131231312413125131261312713128131291313013131131321313313134131351313613137131381313913140131411314213143131441314513146131471314813149131501315113152131531315413155131561315713158131591316013161131621316313164131651316613167131681316913170131711317213173131741317513176131771317813179131801318113182131831318413185131861318713188131891319013191131921319313194131951319613197131981319913200132011320213203132041320513206132071320813209132101321113212132131321413215132161321713218132191322013221132221322313224132251322613227132281322913230132311323213233132341323513236132371323813239132401324113242132431324413245132461324713248132491325013251132521325313254132551325613257132581325913260132611326213263132641326513266132671326813269132701327113272132731327413275132761327713278132791328013281132821328313284132851328613287132881328913290132911329213293132941329513296132971329813299133001330113302133031330413305133061330713308133091331013311133121331313314133151331613317133181331913320133211332213323133241332513326133271332813329133301333113332133331333413335133361333713338133391334013341133421334313344133451334613347133481334913350133511335213353133541335513356133571335813359133601336113362133631336413365133661336713368133691337013371133721337313374133751337613377133781337913380133811338213383133841338513386133871338813389133901339113392133931339413395133961339713398133991340013401134021340313404134051340613407134081340913410134111341213413134141341513416134171341813419134201342113422134231342413425134261342713428134291343013431134321343313434134351343613437134381343913440134411344213443134441344513446134471344813449134501345113452134531345413455134561345713458134591346013461134621346313464134651346613467134681346913470134711347213473134741347513476134771347813479134801348113482134831348413485134861348713488134891349013491134921349313494134951349613497134981349913500135011350213503135041350513506135071350813509135101351113512135131351413515135161351713518135191352013521135221352313524135251352613527135281352913530135311353213533135341353513536135371353813539135401354113542135431354413545135461354713548135491355013551135521355313554135551355613557135581355913560135611356213563135641356513566135671356813569135701357113572135731357413575135761357713578135791358013581135821358313584135851358613587135881358913590135911359213593135941359513596135971359813599136001360113602136031360413605136061360713608136091361013611136121361313614136151361613617136181361913620136211362213623136241362513626136271362813629136301363113632136331363413635136361363713638136391364013641136421364313644136451364613647136481364913650136511365213653136541365513656136571365813659136601366113662136631366413665136661366713668136691367013671136721367313674136751367613677136781367913680136811368213683136841368513686136871368813689136901369113692136931369413695136961369713698136991370013701137021370313704137051370613707137081370913710137111371213713137141371513716137171371813719137201372113722137231372413725137261372713728137291373013731137321373313734137351373613737137381373913740137411374213743137441374513746137471374813749137501375113752137531375413755137561375713758137591376013761137621376313764137651376613767137681376913770137711377213773137741377513776137771377813779137801378113782137831378413785137861378713788137891379013791137921379313794137951379613797137981379913800138011380213803138041380513806138071380813809138101381113812138131381413815138161381713818138191382013821138221382313824138251382613827138281382913830138311383213833138341383513836138371383813839138401384113842138431384413845138461384713848138491385013851138521385313854138551385613857138581385913860138611386213863138641386513866138671386813869138701387113872138731387413875138761387713878138791388013881138821388313884138851388613887138881388913890138911389213893138941389513896138971389813899139001390113902139031390413905139061390713908139091391013911139121391313914139151391613917139181391913920139211392213923139241392513926139271392813929139301393113932139331393413935139361393713938139391394013941139421394313944139451394613947139481394913950139511395213953139541395513956139571395813959139601396113962139631396413965139661396713968139691397013971139721397313974139751397613977139781397913980139811398213983139841398513986139871398813989139901399113992139931399413995139961399713998139991400014001140021400314004140051400614007140081400914010140111401214013140141401514016140171401814019140201402114022140231402414025140261402714028140291403014031140321403314034140351403614037140381403914040140411404214043140441404514046140471404814049140501405114052140531405414055140561405714058140591406014061140621406314064140651406614067140681406914070140711407214073140741407514076140771407814079140801408114082140831408414085140861408714088140891409014091140921409314094140951409614097140981409914100141011410214103141041410514106141071410814109141101411114112141131411414115141161411714118141191412014121141221412314124141251412614127141281412914130141311413214133141341413514136141371413814139141401414114142141431414414145141461414714148141491415014151141521415314154141551415614157141581415914160141611416214163141641416514166141671416814169141701417114172141731417414175141761417714178141791418014181141821418314184141851418614187141881418914190141911419214193141941419514196141971419814199142001420114202142031420414205142061420714208142091421014211142121421314214142151421614217142181421914220142211422214223142241422514226142271422814229142301423114232142331423414235142361423714238142391424014241142421424314244142451424614247142481424914250142511425214253142541425514256142571425814259142601426114262142631426414265142661426714268142691427014271142721427314274142751427614277142781427914280142811428214283142841428514286142871428814289142901429114292142931429414295142961429714298142991430014301143021430314304143051430614307143081430914310143111431214313143141431514316143171431814319143201432114322143231432414325143261432714328143291433014331143321433314334143351433614337143381433914340143411434214343143441434514346143471434814349143501435114352143531435414355143561435714358143591436014361143621436314364143651436614367143681436914370143711437214373143741437514376143771437814379143801438114382143831438414385143861438714388143891439014391143921439314394143951439614397143981439914400144011440214403144041440514406144071440814409144101441114412144131441414415144161441714418144191442014421144221442314424144251442614427144281442914430144311443214433144341443514436144371443814439144401444114442144431444414445144461444714448144491445014451144521445314454144551445614457144581445914460144611446214463144641446514466144671446814469144701447114472144731447414475144761447714478144791448014481144821448314484144851448614487144881448914490144911449214493144941449514496144971449814499145001450114502145031450414505145061450714508145091451014511145121451314514145151451614517145181451914520145211452214523145241452514526145271452814529145301453114532145331453414535145361453714538145391454014541145421454314544145451454614547145481454914550145511455214553145541455514556145571455814559145601456114562145631456414565145661456714568145691457014571145721457314574145751457614577145781457914580145811458214583145841458514586145871458814589145901459114592145931459414595145961459714598145991460014601146021460314604146051460614607146081460914610146111461214613146141461514616146171461814619146201462114622146231462414625146261462714628146291463014631146321463314634146351463614637146381463914640146411464214643146441464514646146471464814649146501465114652146531465414655146561465714658146591466014661146621466314664146651466614667146681466914670146711467214673146741467514676146771467814679146801468114682146831468414685146861468714688146891469014691146921469314694146951469614697146981469914700147011470214703147041470514706147071470814709147101471114712147131471414715147161471714718147191472014721147221472314724147251472614727147281472914730147311473214733147341473514736147371473814739147401474114742147431474414745147461474714748147491475014751147521475314754147551475614757147581475914760147611476214763147641476514766147671476814769147701477114772147731477414775147761477714778147791478014781147821478314784147851478614787147881478914790147911479214793147941479514796147971479814799148001480114802148031480414805148061480714808148091481014811148121481314814148151481614817148181481914820148211482214823148241482514826148271482814829148301483114832148331483414835148361483714838148391484014841148421484314844148451484614847148481484914850148511485214853148541485514856148571485814859148601486114862148631486414865148661486714868148691487014871148721487314874148751487614877148781487914880148811488214883148841488514886148871488814889148901489114892148931489414895148961489714898148991490014901149021490314904149051490614907149081490914910149111491214913149141491514916149171491814919149201492114922149231492414925149261492714928149291493014931149321493314934149351493614937149381493914940149411494214943149441494514946149471494814949149501495114952149531495414955149561495714958149591496014961149621496314964149651496614967149681496914970149711497214973149741497514976149771497814979149801498114982149831498414985149861498714988149891499014991149921499314994149951499614997149981499915000150011500215003150041500515006150071500815009150101501115012150131501415015150161501715018150191502015021150221502315024150251502615027150281502915030150311503215033150341503515036150371503815039150401504115042150431504415045150461504715048150491505015051150521505315054150551505615057150581505915060150611506215063150641506515066150671506815069150701507115072150731507415075150761507715078150791508015081150821508315084150851508615087150881508915090150911509215093150941509515096150971509815099151001510115102151031510415105151061510715108151091511015111151121511315114151151511615117151181511915120151211512215123151241512515126151271512815129151301513115132151331513415135151361513715138151391514015141151421514315144151451514615147151481514915150151511515215153151541515515156151571515815159151601516115162151631516415165151661516715168151691517015171151721517315174151751517615177151781517915180151811518215183151841518515186151871518815189151901519115192151931519415195151961519715198151991520015201152021520315204152051520615207152081520915210152111521215213152141521515216152171521815219152201522115222152231522415225152261522715228152291523015231152321523315234152351523615237152381523915240152411524215243152441524515246152471524815249152501525115252152531525415255152561525715258152591526015261152621526315264152651526615267152681526915270152711527215273152741527515276152771527815279152801528115282152831528415285152861528715288152891529015291152921529315294152951529615297152981529915300153011530215303153041530515306153071530815309153101531115312153131531415315153161531715318153191532015321153221532315324153251532615327153281532915330153311533215333153341533515336153371533815339153401534115342153431534415345153461534715348153491535015351153521535315354153551535615357153581535915360153611536215363153641536515366153671536815369153701537115372153731537415375153761537715378153791538015381153821538315384153851538615387153881538915390153911539215393153941539515396153971539815399154001540115402154031540415405154061540715408154091541015411154121541315414154151541615417154181541915420154211542215423154241542515426154271542815429154301543115432154331543415435154361543715438154391544015441154421544315444154451544615447154481544915450154511545215453154541545515456154571545815459154601546115462154631546415465154661546715468154691547015471154721547315474154751547615477154781547915480154811548215483154841548515486154871548815489154901549115492154931549415495154961549715498154991550015501155021550315504155051550615507155081550915510155111551215513155141551515516155171551815519155201552115522155231552415525155261552715528155291553015531155321553315534155351553615537155381553915540155411554215543155441554515546155471554815549155501555115552155531555415555155561555715558155591556015561155621556315564155651556615567155681556915570155711557215573155741557515576155771557815579155801558115582155831558415585155861558715588155891559015591155921559315594155951559615597155981559915600156011560215603156041560515606156071560815609156101561115612156131561415615156161561715618156191562015621156221562315624156251562615627156281562915630156311563215633156341563515636156371563815639156401564115642156431564415645156461564715648156491565015651156521565315654156551565615657156581565915660156611566215663156641566515666156671566815669156701567115672156731567415675156761567715678156791568015681156821568315684156851568615687156881568915690156911569215693156941569515696156971569815699157001570115702157031570415705157061570715708157091571015711157121571315714157151571615717157181571915720157211572215723157241572515726157271572815729157301573115732157331573415735157361573715738157391574015741157421574315744157451574615747157481574915750157511575215753157541575515756157571575815759157601576115762157631576415765157661576715768157691577015771157721577315774157751577615777157781577915780157811578215783157841578515786157871578815789157901579115792157931579415795157961579715798157991580015801158021580315804158051580615807158081580915810158111581215813158141581515816158171581815819158201582115822158231582415825158261582715828158291583015831158321583315834158351583615837158381583915840158411584215843158441584515846158471584815849158501585115852158531585415855158561585715858158591586015861158621586315864158651586615867158681586915870158711587215873158741587515876158771587815879158801588115882158831588415885158861588715888158891589015891158921589315894158951589615897158981589915900159011590215903159041590515906159071590815909159101591115912159131591415915159161591715918159191592015921159221592315924159251592615927159281592915930159311593215933159341593515936159371593815939159401594115942159431594415945159461594715948159491595015951159521595315954159551595615957159581595915960159611596215963159641596515966159671596815969159701597115972159731597415975159761597715978159791598015981159821598315984159851598615987159881598915990159911599215993159941599515996159971599815999160001600116002160031600416005160061600716008160091601016011160121601316014160151601616017160181601916020160211602216023160241602516026160271602816029160301603116032160331603416035160361603716038160391604016041160421604316044160451604616047160481604916050160511605216053160541605516056160571605816059160601606116062160631606416065160661606716068160691607016071160721607316074160751607616077160781607916080160811608216083160841608516086160871608816089160901609116092160931609416095160961609716098160991610016101161021610316104161051610616107161081610916110161111611216113161141611516116161171611816119161201612116122161231612416125161261612716128161291613016131161321613316134161351613616137161381613916140161411614216143161441614516146161471614816149161501615116152161531615416155161561615716158161591616016161161621616316164161651616616167161681616916170161711617216173161741617516176161771617816179161801618116182161831618416185161861618716188161891619016191161921619316194161951619616197161981619916200162011620216203162041620516206162071620816209162101621116212162131621416215162161621716218162191622016221162221622316224162251622616227162281622916230162311623216233162341623516236162371623816239162401624116242162431624416245162461624716248162491625016251162521625316254162551625616257162581625916260162611626216263162641626516266162671626816269162701627116272162731627416275162761627716278162791628016281162821628316284162851628616287162881628916290162911629216293162941629516296162971629816299163001630116302163031630416305163061630716308163091631016311163121631316314163151631616317163181631916320163211632216323163241632516326163271632816329163301633116332163331633416335163361633716338163391634016341163421634316344163451634616347163481634916350163511635216353163541635516356163571635816359163601636116362163631636416365163661636716368163691637016371163721637316374163751637616377163781637916380163811638216383163841638516386163871638816389163901639116392163931639416395163961639716398163991640016401164021640316404164051640616407164081640916410164111641216413164141641516416164171641816419164201642116422164231642416425164261642716428164291643016431164321643316434164351643616437164381643916440164411644216443164441644516446164471644816449164501645116452164531645416455164561645716458164591646016461164621646316464164651646616467164681646916470164711647216473164741647516476164771647816479164801648116482164831648416485164861648716488164891649016491164921649316494164951649616497164981649916500165011650216503165041650516506165071650816509165101651116512165131651416515165161651716518165191652016521165221652316524165251652616527165281652916530165311653216533165341653516536165371653816539165401654116542165431654416545165461654716548165491655016551165521655316554165551655616557165581655916560165611656216563165641656516566165671656816569165701657116572165731657416575165761657716578165791658016581165821658316584165851658616587165881658916590165911659216593165941659516596165971659816599166001660116602166031660416605166061660716608166091661016611166121661316614166151661616617166181661916620166211662216623166241662516626166271662816629166301663116632166331663416635166361663716638166391664016641166421664316644166451664616647166481664916650166511665216653166541665516656166571665816659166601666116662166631666416665166661666716668166691667016671166721667316674166751667616677166781667916680166811668216683166841668516686166871668816689166901669116692166931669416695166961669716698166991670016701167021670316704167051670616707167081670916710167111671216713167141671516716167171671816719167201672116722167231672416725167261672716728167291673016731167321673316734167351673616737167381673916740167411674216743167441674516746167471674816749167501675116752167531675416755167561675716758167591676016761167621676316764167651676616767167681676916770167711677216773167741677516776167771677816779167801678116782167831678416785167861678716788167891679016791167921679316794167951679616797167981679916800168011680216803168041680516806168071680816809168101681116812168131681416815168161681716818168191682016821168221682316824168251682616827168281682916830168311683216833168341683516836168371683816839168401684116842168431684416845168461684716848168491685016851168521685316854168551685616857168581685916860168611686216863168641686516866168671686816869168701687116872168731687416875168761687716878168791688016881168821688316884168851688616887168881688916890168911689216893168941689516896168971689816899169001690116902169031690416905169061690716908169091691016911169121691316914169151691616917169181691916920169211692216923169241692516926169271692816929169301693116932169331693416935169361693716938169391694016941169421694316944169451694616947169481694916950169511695216953169541695516956169571695816959169601696116962169631696416965169661696716968169691697016971169721697316974169751697616977169781697916980169811698216983169841698516986169871698816989169901699116992169931699416995169961699716998169991700017001170021700317004170051700617007170081700917010170111701217013170141701517016170171701817019170201702117022170231702417025170261702717028170291703017031170321703317034170351703617037170381703917040170411704217043170441704517046170471704817049170501705117052170531705417055170561705717058170591706017061170621706317064170651706617067170681706917070170711707217073170741707517076170771707817079170801708117082170831708417085170861708717088170891709017091170921709317094170951709617097170981709917100171011710217103171041710517106171071710817109171101711117112171131711417115171161711717118171191712017121171221712317124171251712617127171281712917130171311713217133171341713517136171371713817139171401714117142171431714417145171461714717148171491715017151171521715317154171551715617157171581715917160171611716217163171641716517166171671716817169171701717117172171731717417175171761717717178171791718017181171821718317184171851718617187171881718917190171911719217193171941719517196171971719817199172001720117202172031720417205172061720717208172091721017211172121721317214172151721617217172181721917220172211722217223172241722517226172271722817229172301723117232172331723417235172361723717238172391724017241172421724317244172451724617247172481724917250172511725217253172541725517256172571725817259172601726117262172631726417265172661726717268172691727017271172721727317274172751727617277172781727917280172811728217283172841728517286172871728817289172901729117292172931729417295172961729717298172991730017301173021730317304173051730617307173081730917310173111731217313173141731517316173171731817319173201732117322173231732417325173261732717328173291733017331173321733317334173351733617337173381733917340173411734217343173441734517346173471734817349173501735117352173531735417355173561735717358173591736017361173621736317364173651736617367173681736917370173711737217373173741737517376173771737817379173801738117382173831738417385173861738717388173891739017391173921739317394173951739617397173981739917400174011740217403174041740517406174071740817409174101741117412174131741417415174161741717418174191742017421174221742317424174251742617427174281742917430174311743217433174341743517436174371743817439174401744117442174431744417445174461744717448174491745017451174521745317454174551745617457174581745917460174611746217463174641746517466174671746817469174701747117472174731747417475174761747717478174791748017481174821748317484174851748617487174881748917490174911749217493174941749517496174971749817499175001750117502175031750417505175061750717508175091751017511175121751317514175151751617517175181751917520175211752217523175241752517526175271752817529175301753117532175331753417535175361753717538175391754017541175421754317544175451754617547175481754917550175511755217553175541755517556175571755817559175601756117562175631756417565175661756717568175691757017571175721757317574175751757617577175781757917580175811758217583175841758517586175871758817589175901759117592175931759417595175961759717598175991760017601176021760317604176051760617607176081760917610176111761217613176141761517616176171761817619176201762117622176231762417625176261762717628176291763017631176321763317634176351763617637176381763917640176411764217643176441764517646176471764817649176501765117652176531765417655176561765717658176591766017661176621766317664176651766617667176681766917670176711767217673176741767517676176771767817679176801768117682176831768417685176861768717688176891769017691176921769317694176951769617697176981769917700177011770217703177041770517706177071770817709177101771117712177131771417715177161771717718177191772017721177221772317724177251772617727177281772917730177311773217733177341773517736177371773817739177401774117742177431774417745177461774717748177491775017751177521775317754177551775617757177581775917760177611776217763177641776517766177671776817769177701777117772177731777417775177761777717778177791778017781177821778317784177851778617787177881778917790177911779217793177941779517796177971779817799178001780117802178031780417805178061780717808178091781017811178121781317814178151781617817178181781917820178211782217823178241782517826178271782817829178301783117832178331783417835178361783717838178391784017841178421784317844178451784617847178481784917850178511785217853178541785517856178571785817859178601786117862178631786417865178661786717868178691787017871178721787317874178751787617877178781787917880178811788217883178841788517886178871788817889178901789117892178931789417895178961789717898178991790017901179021790317904179051790617907179081790917910179111791217913179141791517916179171791817919179201792117922179231792417925179261792717928179291793017931179321793317934179351793617937179381793917940179411794217943179441794517946179471794817949179501795117952179531795417955179561795717958179591796017961179621796317964179651796617967179681796917970179711797217973179741797517976179771797817979179801798117982179831798417985179861798717988179891799017991179921799317994179951799617997179981799918000180011800218003180041800518006180071800818009180101801118012180131801418015180161801718018180191802018021180221802318024180251802618027180281802918030180311803218033180341803518036180371803818039180401804118042180431804418045180461804718048180491805018051180521805318054180551805618057180581805918060180611806218063180641806518066180671806818069180701807118072180731807418075180761807718078180791808018081180821808318084180851808618087180881808918090180911809218093180941809518096180971809818099181001810118102181031810418105181061810718108181091811018111181121811318114181151811618117181181811918120181211812218123181241812518126181271812818129181301813118132181331813418135181361813718138181391814018141181421814318144181451814618147181481814918150181511815218153181541815518156181571815818159181601816118162181631816418165181661816718168181691817018171181721817318174181751817618177181781817918180181811818218183181841818518186181871818818189181901819118192181931819418195181961819718198181991820018201182021820318204182051820618207182081820918210182111821218213182141821518216182171821818219182201822118222182231822418225182261822718228182291823018231182321823318234182351823618237182381823918240182411824218243182441824518246182471824818249182501825118252182531825418255182561825718258182591826018261182621826318264182651826618267182681826918270182711827218273182741827518276182771827818279182801828118282182831828418285182861828718288182891829018291182921829318294182951829618297182981829918300183011830218303183041830518306183071830818309183101831118312183131831418315183161831718318183191832018321183221832318324183251832618327183281832918330183311833218333183341833518336183371833818339183401834118342183431834418345183461834718348183491835018351183521835318354183551835618357183581835918360183611836218363183641836518366183671836818369183701837118372183731837418375183761837718378183791838018381183821838318384183851838618387183881838918390183911839218393183941839518396183971839818399184001840118402184031840418405184061840718408184091841018411184121841318414184151841618417184181841918420184211842218423184241842518426184271842818429184301843118432184331843418435184361843718438184391844018441184421844318444184451844618447184481844918450184511845218453184541845518456184571845818459184601846118462184631846418465184661846718468184691847018471184721847318474184751847618477184781847918480184811848218483184841848518486184871848818489184901849118492184931849418495184961849718498184991850018501185021850318504185051850618507185081850918510185111851218513185141851518516185171851818519185201852118522185231852418525185261852718528185291853018531185321853318534185351853618537185381853918540185411854218543185441854518546185471854818549185501855118552185531855418555185561855718558185591856018561185621856318564185651856618567185681856918570185711857218573185741857518576185771857818579185801858118582185831858418585185861858718588185891859018591185921859318594185951859618597185981859918600186011860218603186041860518606186071860818609186101861118612186131861418615186161861718618186191862018621186221862318624186251862618627186281862918630186311863218633186341863518636186371863818639186401864118642186431864418645186461864718648186491865018651186521865318654186551865618657186581865918660186611866218663186641866518666186671866818669186701867118672186731867418675186761867718678186791868018681186821868318684186851868618687186881868918690186911869218693186941869518696186971869818699187001870118702187031870418705187061870718708187091871018711187121871318714187151871618717187181871918720187211872218723187241872518726187271872818729187301873118732187331873418735187361873718738187391874018741187421874318744187451874618747187481874918750187511875218753187541875518756187571875818759187601876118762187631876418765187661876718768187691877018771187721877318774187751877618777187781877918780187811878218783187841878518786187871878818789187901879118792187931879418795187961879718798187991880018801188021880318804188051880618807188081880918810188111881218813188141881518816188171881818819188201882118822188231882418825188261882718828188291883018831188321883318834188351883618837188381883918840188411884218843188441884518846188471884818849188501885118852188531885418855188561885718858188591886018861188621886318864188651886618867188681886918870188711887218873188741887518876188771887818879188801888118882188831888418885188861888718888188891889018891188921889318894188951889618897188981889918900189011890218903189041890518906189071890818909189101891118912189131891418915189161891718918189191892018921189221892318924189251892618927189281892918930189311893218933189341893518936189371893818939189401894118942189431894418945189461894718948189491895018951189521895318954189551895618957189581895918960189611896218963189641896518966189671896818969189701897118972189731897418975189761897718978189791898018981189821898318984189851898618987189881898918990189911899218993189941899518996189971899818999190001900119002190031900419005190061900719008190091901019011190121901319014190151901619017190181901919020190211902219023190241902519026190271902819029190301903119032190331903419035190361903719038190391904019041190421904319044190451904619047190481904919050190511905219053190541905519056190571905819059190601906119062190631906419065190661906719068190691907019071190721907319074190751907619077190781907919080190811908219083190841908519086190871908819089190901909119092190931909419095190961909719098190991910019101191021910319104191051910619107191081910919110191111911219113191141911519116191171911819119191201912119122191231912419125191261912719128191291913019131191321913319134191351913619137191381913919140191411914219143191441914519146191471914819149191501915119152191531915419155191561915719158191591916019161191621916319164191651916619167191681916919170191711917219173191741917519176191771917819179191801918119182191831918419185191861918719188191891919019191191921919319194191951919619197191981919919200192011920219203192041920519206192071920819209192101921119212192131921419215192161921719218192191922019221192221922319224192251922619227192281922919230192311923219233192341923519236192371923819239192401924119242192431924419245192461924719248192491925019251192521925319254192551925619257192581925919260192611926219263192641926519266192671926819269192701927119272192731927419275192761927719278192791928019281192821928319284192851928619287192881928919290192911929219293192941929519296192971929819299193001930119302193031930419305193061930719308193091931019311193121931319314193151931619317193181931919320193211932219323193241932519326193271932819329193301933119332193331933419335193361933719338193391934019341193421934319344193451934619347193481934919350193511935219353193541935519356193571935819359193601936119362193631936419365193661936719368193691937019371193721937319374193751937619377193781937919380193811938219383193841938519386193871938819389193901939119392193931939419395193961939719398193991940019401194021940319404194051940619407194081940919410194111941219413194141941519416194171941819419194201942119422194231942419425194261942719428194291943019431194321943319434194351943619437194381943919440194411944219443194441944519446194471944819449194501945119452194531945419455194561945719458194591946019461194621946319464194651946619467194681946919470194711947219473194741947519476194771947819479194801948119482194831948419485194861948719488194891949019491194921949319494194951949619497194981949919500195011950219503195041950519506195071950819509195101951119512195131951419515195161951719518195191952019521195221952319524195251952619527195281952919530195311953219533195341953519536195371953819539195401954119542195431954419545195461954719548195491955019551195521955319554195551955619557195581955919560195611956219563195641956519566195671956819569195701957119572195731957419575195761957719578195791958019581195821958319584195851958619587195881958919590195911959219593195941959519596195971959819599196001960119602196031960419605196061960719608196091961019611196121961319614196151961619617196181961919620196211962219623196241962519626196271962819629196301963119632196331963419635196361963719638196391964019641196421964319644196451964619647196481964919650196511965219653196541965519656196571965819659196601966119662196631966419665196661966719668196691967019671196721967319674196751967619677196781967919680196811968219683196841968519686196871968819689196901969119692196931969419695196961969719698196991970019701197021970319704197051970619707197081970919710197111971219713197141971519716197171971819719197201972119722197231972419725197261972719728197291973019731197321973319734197351973619737197381973919740197411974219743197441974519746197471974819749197501975119752197531975419755197561975719758197591976019761197621976319764197651976619767197681976919770197711977219773197741977519776197771977819779197801978119782197831978419785197861978719788197891979019791197921979319794197951979619797197981979919800198011980219803198041980519806198071980819809198101981119812198131981419815198161981719818198191982019821198221982319824198251982619827198281982919830198311983219833198341983519836198371983819839198401984119842198431984419845198461984719848198491985019851198521985319854198551985619857198581985919860198611986219863198641986519866198671986819869198701987119872198731987419875198761987719878198791988019881198821988319884198851988619887198881988919890198911989219893198941989519896198971989819899199001990119902199031990419905199061990719908199091991019911199121991319914199151991619917199181991919920199211992219923199241992519926199271992819929199301993119932199331993419935199361993719938199391994019941199421994319944199451994619947199481994919950199511995219953199541995519956199571995819959199601996119962199631996419965199661996719968199691997019971199721997319974199751997619977199781997919980199811998219983199841998519986199871998819989199901999119992199931999419995199961999719998199992000020001200022000320004200052000620007200082000920010200112001220013200142001520016200172001820019200202002120022200232002420025200262002720028200292003020031200322003320034200352003620037200382003920040200412004220043200442004520046200472004820049200502005120052200532005420055200562005720058200592006020061200622006320064200652006620067200682006920070200712007220073200742007520076200772007820079200802008120082200832008420085200862008720088200892009020091200922009320094200952009620097200982009920100201012010220103201042010520106201072010820109201102011120112201132011420115201162011720118201192012020121201222012320124201252012620127201282012920130201312013220133201342013520136201372013820139201402014120142201432014420145201462014720148201492015020151201522015320154201552015620157201582015920160201612016220163201642016520166201672016820169201702017120172201732017420175201762017720178201792018020181201822018320184201852018620187201882018920190201912019220193201942019520196201972019820199202002020120202202032020420205202062020720208202092021020211202122021320214202152021620217202182021920220202212022220223202242022520226202272022820229202302023120232202332023420235202362023720238202392024020241202422024320244202452024620247202482024920250202512025220253202542025520256202572025820259202602026120262202632026420265202662026720268202692027020271202722027320274202752027620277202782027920280202812028220283202842028520286202872028820289202902029120292202932029420295202962029720298202992030020301203022030320304203052030620307203082030920310203112031220313203142031520316203172031820319203202032120322203232032420325203262032720328203292033020331203322033320334203352033620337203382033920340203412034220343203442034520346203472034820349203502035120352203532035420355203562035720358203592036020361203622036320364203652036620367203682036920370203712037220373203742037520376203772037820379203802038120382203832038420385203862038720388203892039020391203922039320394203952039620397203982039920400204012040220403204042040520406204072040820409204102041120412204132041420415204162041720418204192042020421204222042320424204252042620427204282042920430204312043220433204342043520436204372043820439204402044120442204432044420445204462044720448204492045020451204522045320454204552045620457204582045920460204612046220463204642046520466204672046820469204702047120472204732047420475204762047720478204792048020481204822048320484204852048620487204882048920490204912049220493204942049520496204972049820499205002050120502205032050420505205062050720508205092051020511205122051320514205152051620517205182051920520205212052220523205242052520526205272052820529205302053120532205332053420535205362053720538205392054020541205422054320544205452054620547205482054920550205512055220553205542055520556205572055820559205602056120562205632056420565205662056720568205692057020571205722057320574205752057620577205782057920580205812058220583205842058520586205872058820589205902059120592205932059420595205962059720598205992060020601206022060320604206052060620607206082060920610206112061220613206142061520616206172061820619206202062120622206232062420625206262062720628206292063020631206322063320634206352063620637206382063920640206412064220643206442064520646206472064820649206502065120652206532065420655206562065720658206592066020661206622066320664206652066620667206682066920670206712067220673206742067520676206772067820679206802068120682206832068420685206862068720688206892069020691206922069320694206952069620697206982069920700207012070220703207042070520706207072070820709207102071120712207132071420715207162071720718207192072020721207222072320724207252072620727207282072920730207312073220733207342073520736207372073820739207402074120742207432074420745207462074720748207492075020751207522075320754207552075620757207582075920760207612076220763207642076520766207672076820769207702077120772207732077420775207762077720778207792078020781207822078320784207852078620787207882078920790207912079220793207942079520796207972079820799208002080120802208032080420805208062080720808208092081020811208122081320814208152081620817208182081920820208212082220823208242082520826208272082820829208302083120832208332083420835208362083720838208392084020841208422084320844208452084620847208482084920850208512085220853208542085520856208572085820859208602086120862208632086420865208662086720868208692087020871208722087320874208752087620877208782087920880208812088220883208842088520886208872088820889208902089120892208932089420895208962089720898208992090020901209022090320904209052090620907209082090920910209112091220913209142091520916209172091820919209202092120922209232092420925209262092720928209292093020931209322093320934209352093620937209382093920940209412094220943209442094520946209472094820949209502095120952209532095420955209562095720958209592096020961209622096320964209652096620967209682096920970209712097220973209742097520976209772097820979209802098120982209832098420985209862098720988209892099020991209922099320994209952099620997209982099921000210012100221003210042100521006210072100821009210102101121012210132101421015210162101721018210192102021021210222102321024210252102621027210282102921030210312103221033210342103521036210372103821039210402104121042210432104421045210462104721048210492105021051210522105321054210552105621057210582105921060210612106221063210642106521066210672106821069210702107121072210732107421075210762107721078210792108021081210822108321084210852108621087210882108921090210912109221093210942109521096210972109821099211002110121102211032110421105211062110721108211092111021111211122111321114211152111621117211182111921120211212112221123211242112521126211272112821129211302113121132211332113421135211362113721138211392114021141211422114321144211452114621147211482114921150211512115221153211542115521156211572115821159211602116121162211632116421165211662116721168211692117021171211722117321174211752117621177211782117921180211812118221183211842118521186211872118821189211902119121192211932119421195211962119721198211992120021201212022120321204212052120621207212082120921210212112121221213212142121521216212172121821219212202122121222212232122421225212262122721228212292123021231212322123321234212352123621237212382123921240212412124221243212442124521246212472124821249212502125121252212532125421255212562125721258212592126021261212622126321264212652126621267212682126921270212712127221273212742127521276212772127821279212802128121282212832128421285212862128721288212892129021291212922129321294212952129621297212982129921300213012130221303213042130521306213072130821309213102131121312213132131421315213162131721318213192132021321213222132321324213252132621327213282132921330213312133221333213342133521336213372133821339213402134121342213432134421345213462134721348213492135021351213522135321354213552135621357213582135921360213612136221363213642136521366213672136821369213702137121372213732137421375213762137721378213792138021381213822138321384213852138621387213882138921390213912139221393213942139521396213972139821399214002140121402214032140421405214062140721408214092141021411214122141321414214152141621417214182141921420214212142221423214242142521426214272142821429214302143121432214332143421435214362143721438214392144021441214422144321444214452144621447214482144921450214512145221453214542145521456214572145821459214602146121462214632146421465214662146721468214692147021471214722147321474214752147621477214782147921480214812148221483214842148521486214872148821489214902149121492214932149421495214962149721498214992150021501215022150321504215052150621507215082150921510215112151221513215142151521516215172151821519215202152121522215232152421525215262152721528215292153021531215322153321534215352153621537215382153921540215412154221543215442154521546215472154821549215502155121552215532155421555215562155721558215592156021561215622156321564215652156621567215682156921570215712157221573215742157521576215772157821579215802158121582215832158421585215862158721588215892159021591215922159321594215952159621597215982159921600216012160221603216042160521606216072160821609216102161121612216132161421615216162161721618216192162021621216222162321624216252162621627216282162921630216312163221633216342163521636216372163821639216402164121642216432164421645216462164721648216492165021651216522165321654216552165621657216582165921660216612166221663216642166521666216672166821669216702167121672216732167421675216762167721678216792168021681216822168321684216852168621687216882168921690216912169221693216942169521696216972169821699217002170121702217032170421705217062170721708217092171021711217122171321714217152171621717217182171921720217212172221723217242172521726217272172821729217302173121732217332173421735217362173721738217392174021741217422174321744217452174621747217482174921750217512175221753217542175521756217572175821759217602176121762217632176421765217662176721768217692177021771217722177321774217752177621777217782177921780217812178221783217842178521786217872178821789217902179121792217932179421795217962179721798217992180021801218022180321804218052180621807218082180921810218112181221813218142181521816218172181821819218202182121822218232182421825218262182721828218292183021831218322183321834218352183621837218382183921840218412184221843218442184521846218472184821849218502185121852218532185421855218562185721858218592186021861218622186321864218652186621867218682186921870218712187221873218742187521876218772187821879218802188121882218832188421885218862188721888218892189021891218922189321894218952189621897218982189921900219012190221903219042190521906219072190821909219102191121912219132191421915219162191721918219192192021921219222192321924219252192621927219282192921930219312193221933219342193521936219372193821939219402194121942219432194421945219462194721948219492195021951219522195321954219552195621957219582195921960219612196221963219642196521966219672196821969219702197121972219732197421975219762197721978219792198021981219822198321984219852198621987219882198921990219912199221993219942199521996219972199821999220002200122002220032200422005220062200722008220092201022011220122201322014220152201622017220182201922020220212202222023220242202522026220272202822029220302203122032220332203422035220362203722038220392204022041220422204322044220452204622047220482204922050220512205222053220542205522056220572205822059220602206122062220632206422065220662206722068220692207022071220722207322074220752207622077220782207922080220812208222083220842208522086220872208822089220902209122092220932209422095220962209722098220992210022101221022210322104221052210622107221082210922110221112211222113221142211522116221172211822119221202212122122221232212422125221262212722128221292213022131221322213322134221352213622137221382213922140221412214222143221442214522146221472214822149221502215122152221532215422155221562215722158221592216022161221622216322164221652216622167221682216922170221712217222173221742217522176221772217822179221802218122182221832218422185221862218722188221892219022191221922219322194221952219622197221982219922200222012220222203222042220522206222072220822209222102221122212222132221422215222162221722218222192222022221222222222322224222252222622227222282222922230222312223222233222342223522236222372223822239222402224122242222432224422245222462224722248222492225022251222522225322254222552225622257222582225922260222612226222263222642226522266222672226822269222702227122272222732227422275222762227722278222792228022281222822228322284222852228622287222882228922290222912229222293222942229522296222972229822299223002230122302223032230422305223062230722308223092231022311223122231322314223152231622317223182231922320223212232222323223242232522326223272232822329223302233122332223332233422335223362233722338223392234022341223422234322344223452234622347223482234922350223512235222353223542235522356223572235822359223602236122362223632236422365223662236722368223692237022371223722237322374223752237622377223782237922380223812238222383223842238522386223872238822389223902239122392223932239422395223962239722398223992240022401224022240322404224052240622407224082240922410224112241222413224142241522416224172241822419224202242122422224232242422425224262242722428224292243022431224322243322434224352243622437224382243922440224412244222443224442244522446224472244822449224502245122452224532245422455224562245722458224592246022461224622246322464224652246622467224682246922470224712247222473224742247522476224772247822479224802248122482224832248422485224862248722488224892249022491224922249322494224952249622497224982249922500225012250222503225042250522506225072250822509225102251122512225132251422515225162251722518225192252022521225222252322524225252252622527225282252922530225312253222533225342253522536225372253822539225402254122542225432254422545225462254722548225492255022551225522255322554225552255622557225582255922560225612256222563225642256522566225672256822569225702257122572225732257422575225762257722578225792258022581225822258322584225852258622587225882258922590225912259222593225942259522596225972259822599226002260122602226032260422605226062260722608226092261022611226122261322614226152261622617226182261922620226212262222623226242262522626226272262822629226302263122632226332263422635226362263722638226392264022641226422264322644226452264622647226482264922650226512265222653226542265522656226572265822659226602266122662226632266422665226662266722668226692267022671226722267322674226752267622677226782267922680226812268222683226842268522686226872268822689226902269122692226932269422695226962269722698226992270022701227022270322704227052270622707227082270922710227112271222713227142271522716227172271822719227202272122722227232272422725227262272722728227292273022731227322273322734227352273622737227382273922740227412274222743227442274522746227472274822749227502275122752227532275422755227562275722758227592276022761227622276322764227652276622767227682276922770227712277222773227742277522776227772277822779227802278122782227832278422785227862278722788227892279022791227922279322794227952279622797227982279922800228012280222803228042280522806228072280822809228102281122812228132281422815228162281722818228192282022821228222282322824228252282622827228282282922830228312283222833228342283522836228372283822839228402284122842228432284422845228462284722848228492285022851228522285322854228552285622857228582285922860228612286222863228642286522866228672286822869228702287122872228732287422875228762287722878228792288022881228822288322884228852288622887228882288922890228912289222893228942289522896228972289822899229002290122902229032290422905229062290722908229092291022911229122291322914229152291622917229182291922920229212292222923229242292522926229272292822929229302293122932229332293422935229362293722938229392294022941229422294322944229452294622947229482294922950229512295222953229542295522956229572295822959229602296122962229632296422965229662296722968229692297022971229722297322974229752297622977229782297922980229812298222983229842298522986229872298822989229902299122992229932299422995229962299722998229992300023001230022300323004230052300623007230082300923010230112301223013230142301523016230172301823019230202302123022230232302423025230262302723028230292303023031230322303323034230352303623037230382303923040230412304223043230442304523046230472304823049230502305123052230532305423055230562305723058230592306023061230622306323064230652306623067230682306923070230712307223073230742307523076230772307823079230802308123082230832308423085230862308723088230892309023091230922309323094230952309623097230982309923100231012310223103231042310523106231072310823109231102311123112231132311423115231162311723118231192312023121231222312323124231252312623127231282312923130231312313223133231342313523136231372313823139231402314123142231432314423145231462314723148231492315023151231522315323154231552315623157231582315923160231612316223163231642316523166231672316823169231702317123172231732317423175231762317723178231792318023181231822318323184231852318623187231882318923190231912319223193231942319523196231972319823199232002320123202232032320423205232062320723208232092321023211232122321323214232152321623217232182321923220232212322223223232242322523226232272322823229232302323123232232332323423235232362323723238232392324023241232422324323244232452324623247232482324923250232512325223253232542325523256232572325823259232602326123262232632326423265232662326723268232692327023271232722327323274232752327623277232782327923280232812328223283232842328523286232872328823289232902329123292232932329423295232962329723298232992330023301233022330323304233052330623307233082330923310233112331223313233142331523316233172331823319233202332123322233232332423325233262332723328233292333023331233322333323334233352333623337233382333923340233412334223343233442334523346233472334823349233502335123352233532335423355233562335723358233592336023361233622336323364233652336623367233682336923370233712337223373233742337523376233772337823379233802338123382233832338423385233862338723388233892339023391233922339323394233952339623397233982339923400234012340223403234042340523406234072340823409234102341123412234132341423415234162341723418234192342023421234222342323424234252342623427234282342923430234312343223433234342343523436234372343823439234402344123442234432344423445234462344723448234492345023451234522345323454234552345623457234582345923460234612346223463234642346523466234672346823469234702347123472234732347423475234762347723478234792348023481234822348323484234852348623487234882348923490234912349223493234942349523496234972349823499235002350123502235032350423505235062350723508235092351023511235122351323514235152351623517235182351923520235212352223523235242352523526235272352823529235302353123532235332353423535235362353723538235392354023541235422354323544235452354623547235482354923550235512355223553235542355523556235572355823559235602356123562235632356423565235662356723568235692357023571235722357323574235752357623577235782357923580235812358223583235842358523586235872358823589235902359123592235932359423595235962359723598235992360023601236022360323604236052360623607236082360923610236112361223613236142361523616236172361823619236202362123622236232362423625236262362723628
  1. diff -Nur gcc-10.3.0.orig/gcc/config/sparc/sparc.c gcc-10.3.0/gcc/config/sparc/sparc.c
  2. --- gcc-10.3.0.orig/gcc/config/sparc/sparc.c 2021-04-08 13:56:28.201742273 +0200
  3. +++ gcc-10.3.0/gcc/config/sparc/sparc.c 2021-04-09 07:51:37.884501308 +0200
  4. @@ -4157,6 +4157,13 @@
  5. static bool
  6. sparc_cannot_force_const_mem (machine_mode mode, rtx x)
  7. {
  8. + /* After IRA has run in PIC mode, it is too late to put anything into the
  9. + constant pool if the PIC register hasn't already been initialized. */
  10. + if ((lra_in_progress || reload_in_progress)
  11. + && flag_pic
  12. + && !crtl->uses_pic_offset_table)
  13. + return true;
  14. +
  15. switch (GET_CODE (x))
  16. {
  17. case CONST_INT:
  18. @@ -4192,11 +4199,9 @@
  19. }
  20. /* Global Offset Table support. */
  21. -static GTY(()) rtx got_symbol_rtx = NULL_RTX;
  22. -static GTY(()) rtx got_register_rtx = NULL_RTX;
  23. static GTY(()) rtx got_helper_rtx = NULL_RTX;
  24. -
  25. -static GTY(()) bool got_helper_needed = false;
  26. +static GTY(()) rtx got_register_rtx = NULL_RTX;
  27. +static GTY(()) rtx got_symbol_rtx = NULL_RTX;
  28. /* Return the SYMBOL_REF for the Global Offset Table. */
  29. @@ -4209,6 +4214,27 @@
  30. return got_symbol_rtx;
  31. }
  32. +#ifdef HAVE_GAS_HIDDEN
  33. +# define USE_HIDDEN_LINKONCE 1
  34. +#else
  35. +# define USE_HIDDEN_LINKONCE 0
  36. +#endif
  37. +
  38. +static void
  39. +get_pc_thunk_name (char name[32], unsigned int regno)
  40. +{
  41. + const char *reg_name = reg_names[regno];
  42. +
  43. + /* Skip the leading '%' as that cannot be used in a
  44. + symbol name. */
  45. + reg_name += 1;
  46. +
  47. + if (USE_HIDDEN_LINKONCE)
  48. + sprintf (name, "__sparc_get_pc_thunk.%s", reg_name);
  49. + else
  50. + ASM_GENERATE_INTERNAL_LABEL (name, "LADDPC", regno);
  51. +}
  52. +
  53. /* Wrapper around the load_pcrel_sym{si,di} patterns. */
  54. static rtx
  55. @@ -4228,78 +4254,30 @@
  56. return insn;
  57. }
  58. -/* Output the load_pcrel_sym{si,di} patterns. */
  59. -
  60. -const char *
  61. -output_load_pcrel_sym (rtx *operands)
  62. -{
  63. - if (flag_delayed_branch)
  64. - {
  65. - output_asm_insn ("sethi\t%%hi(%a1-4), %0", operands);
  66. - output_asm_insn ("call\t%a2", operands);
  67. - output_asm_insn (" add\t%0, %%lo(%a1+4), %0", operands);
  68. - }
  69. - else
  70. - {
  71. - output_asm_insn ("sethi\t%%hi(%a1-8), %0", operands);
  72. - output_asm_insn ("add\t%0, %%lo(%a1-4), %0", operands);
  73. - output_asm_insn ("call\t%a2", operands);
  74. - output_asm_insn (" nop", NULL);
  75. - }
  76. -
  77. - if (operands[2] == got_helper_rtx)
  78. - got_helper_needed = true;
  79. -
  80. - return "";
  81. -}
  82. -
  83. -#ifdef HAVE_GAS_HIDDEN
  84. -# define USE_HIDDEN_LINKONCE 1
  85. -#else
  86. -# define USE_HIDDEN_LINKONCE 0
  87. -#endif
  88. -
  89. /* Emit code to load the GOT register. */
  90. void
  91. load_got_register (void)
  92. {
  93. - rtx insn;
  94. + if (!got_register_rtx)
  95. + got_register_rtx = gen_rtx_REG (Pmode, GLOBAL_OFFSET_TABLE_REGNUM);
  96. if (TARGET_VXWORKS_RTP)
  97. - {
  98. - if (!got_register_rtx)
  99. - got_register_rtx = pic_offset_table_rtx;
  100. -
  101. - insn = gen_vxworks_load_got ();
  102. - }
  103. + emit_insn (gen_vxworks_load_got ());
  104. else
  105. {
  106. - if (!got_register_rtx)
  107. - got_register_rtx = gen_rtx_REG (Pmode, GLOBAL_OFFSET_TABLE_REGNUM);
  108. -
  109. /* The GOT symbol is subject to a PC-relative relocation so we need a
  110. helper function to add the PC value and thus get the final value. */
  111. if (!got_helper_rtx)
  112. {
  113. char name[32];
  114. -
  115. - /* Skip the leading '%' as that cannot be used in a symbol name. */
  116. - if (USE_HIDDEN_LINKONCE)
  117. - sprintf (name, "__sparc_get_pc_thunk.%s",
  118. - reg_names[REGNO (got_register_rtx)] + 1);
  119. - else
  120. - ASM_GENERATE_INTERNAL_LABEL (name, "LADDPC",
  121. - REGNO (got_register_rtx));
  122. -
  123. + get_pc_thunk_name (name, GLOBAL_OFFSET_TABLE_REGNUM);
  124. got_helper_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
  125. }
  126. - insn
  127. - = gen_load_pcrel_sym (got_register_rtx, sparc_got (), got_helper_rtx);
  128. + emit_insn (gen_load_pcrel_sym (got_register_rtx, sparc_got (),
  129. + got_helper_rtx));
  130. }
  131. -
  132. - emit_insn (insn);
  133. }
  134. /* Ensure that we are not using patterns that are not OK with PIC. */
  135. @@ -5464,7 +5442,7 @@
  136. return true;
  137. /* GOT register (%l7) if needed. */
  138. - if (got_register_rtx && regno == REGNO (got_register_rtx))
  139. + if (regno == GLOBAL_OFFSET_TABLE_REGNUM && got_register_rtx)
  140. return true;
  141. /* If the function accesses prior frames, the frame pointer and the return
  142. @@ -12507,9 +12485,10 @@
  143. sparc_file_end (void)
  144. {
  145. /* If we need to emit the special GOT helper function, do so now. */
  146. - if (got_helper_needed)
  147. + if (got_helper_rtx)
  148. {
  149. const char *name = XSTR (got_helper_rtx, 0);
  150. + const char *reg_name = reg_names[GLOBAL_OFFSET_TABLE_REGNUM];
  151. #ifdef DWARF2_UNWIND_INFO
  152. bool do_cfi;
  153. #endif
  154. @@ -12546,22 +12525,17 @@
  155. #ifdef DWARF2_UNWIND_INFO
  156. do_cfi = dwarf2out_do_cfi_asm ();
  157. if (do_cfi)
  158. - output_asm_insn (".cfi_startproc", NULL);
  159. + fprintf (asm_out_file, "\t.cfi_startproc\n");
  160. #endif
  161. if (flag_delayed_branch)
  162. - {
  163. - output_asm_insn ("jmp\t%%o7+8", NULL);
  164. - output_asm_insn (" add\t%%o7, %0, %0", &got_register_rtx);
  165. - }
  166. + fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
  167. + reg_name, reg_name);
  168. else
  169. - {
  170. - output_asm_insn ("add\t%%o7, %0, %0", &got_register_rtx);
  171. - output_asm_insn ("jmp\t%%o7+8", NULL);
  172. - output_asm_insn (" nop", NULL);
  173. - }
  174. + fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
  175. + reg_name, reg_name);
  176. #ifdef DWARF2_UNWIND_INFO
  177. if (do_cfi)
  178. - output_asm_insn (".cfi_endproc", NULL);
  179. + fprintf (asm_out_file, "\t.cfi_endproc\n");
  180. #endif
  181. }
  182. @@ -13056,10 +13030,7 @@
  183. edge entry_edge;
  184. rtx_insn *seq;
  185. - /* In PIC mode, we need to always initialize the PIC register if optimization
  186. - is enabled, because we are called from IRA and LRA may later force things
  187. - to the constant pool for optimization purposes. */
  188. - if (!flag_pic || (!crtl->uses_pic_offset_table && !optimize))
  189. + if (!crtl->uses_pic_offset_table)
  190. return;
  191. start_sequence ();
  192. diff -Nur gcc-10.3.0.orig/gcc/config/sparc/sparc.c.orig gcc-10.3.0/gcc/config/sparc/sparc.c.orig
  193. --- gcc-10.3.0.orig/gcc/config/sparc/sparc.c.orig 1970-01-01 01:00:00.000000000 +0100
  194. +++ gcc-10.3.0/gcc/config/sparc/sparc.c.orig 2021-04-08 13:56:28.201742273 +0200
  195. @@ -0,0 +1,13813 @@
  196. +/* Subroutines for insn-output.c for SPARC.
  197. + Copyright (C) 1987-2020 Free Software Foundation, Inc.
  198. + Contributed by Michael Tiemann (tiemann@cygnus.com)
  199. + 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
  200. + at Cygnus Support.
  201. +
  202. +This file is part of GCC.
  203. +
  204. +GCC is free software; you can redistribute it and/or modify
  205. +it under the terms of the GNU General Public License as published by
  206. +the Free Software Foundation; either version 3, or (at your option)
  207. +any later version.
  208. +
  209. +GCC is distributed in the hope that it will be useful,
  210. +but WITHOUT ANY WARRANTY; without even the implied warranty of
  211. +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  212. +GNU General Public License for more details.
  213. +
  214. +You should have received a copy of the GNU General Public License
  215. +along with GCC; see the file COPYING3. If not see
  216. +<http://www.gnu.org/licenses/>. */
  217. +
  218. +#define IN_TARGET_CODE 1
  219. +
  220. +#include "config.h"
  221. +#include "system.h"
  222. +#include "coretypes.h"
  223. +#include "backend.h"
  224. +#include "target.h"
  225. +#include "rtl.h"
  226. +#include "tree.h"
  227. +#include "memmodel.h"
  228. +#include "gimple.h"
  229. +#include "df.h"
  230. +#include "tm_p.h"
  231. +#include "stringpool.h"
  232. +#include "attribs.h"
  233. +#include "expmed.h"
  234. +#include "optabs.h"
  235. +#include "regs.h"
  236. +#include "emit-rtl.h"
  237. +#include "recog.h"
  238. +#include "diagnostic-core.h"
  239. +#include "alias.h"
  240. +#include "fold-const.h"
  241. +#include "stor-layout.h"
  242. +#include "calls.h"
  243. +#include "varasm.h"
  244. +#include "output.h"
  245. +#include "insn-attr.h"
  246. +#include "explow.h"
  247. +#include "expr.h"
  248. +#include "debug.h"
  249. +#include "cfgrtl.h"
  250. +#include "common/common-target.h"
  251. +#include "gimplify.h"
  252. +#include "langhooks.h"
  253. +#include "reload.h"
  254. +#include "tree-pass.h"
  255. +#include "context.h"
  256. +#include "builtins.h"
  257. +#include "tree-vector-builder.h"
  258. +#include "opts.h"
  259. +
  260. +/* This file should be included last. */
  261. +#include "target-def.h"
  262. +
  263. +/* Processor costs */
  264. +
  265. +struct processor_costs {
  266. + /* Integer load */
  267. + const int int_load;
  268. +
  269. + /* Integer signed load */
  270. + const int int_sload;
  271. +
  272. + /* Integer zeroed load */
  273. + const int int_zload;
  274. +
  275. + /* Float load */
  276. + const int float_load;
  277. +
  278. + /* fmov, fneg, fabs */
  279. + const int float_move;
  280. +
  281. + /* fadd, fsub */
  282. + const int float_plusminus;
  283. +
  284. + /* fcmp */
  285. + const int float_cmp;
  286. +
  287. + /* fmov, fmovr */
  288. + const int float_cmove;
  289. +
  290. + /* fmul */
  291. + const int float_mul;
  292. +
  293. + /* fdivs */
  294. + const int float_div_sf;
  295. +
  296. + /* fdivd */
  297. + const int float_div_df;
  298. +
  299. + /* fsqrts */
  300. + const int float_sqrt_sf;
  301. +
  302. + /* fsqrtd */
  303. + const int float_sqrt_df;
  304. +
  305. + /* umul/smul */
  306. + const int int_mul;
  307. +
  308. + /* mulX */
  309. + const int int_mulX;
  310. +
  311. + /* integer multiply cost for each bit set past the most
  312. + significant 3, so the formula for multiply cost becomes:
  313. +
  314. + if (rs1 < 0)
  315. + highest_bit = highest_clear_bit(rs1);
  316. + else
  317. + highest_bit = highest_set_bit(rs1);
  318. + if (highest_bit < 3)
  319. + highest_bit = 3;
  320. + cost = int_mul{,X} + ((highest_bit - 3) / int_mul_bit_factor);
  321. +
  322. + A value of zero indicates that the multiply costs is fixed,
  323. + and not variable. */
  324. + const int int_mul_bit_factor;
  325. +
  326. + /* udiv/sdiv */
  327. + const int int_div;
  328. +
  329. + /* divX */
  330. + const int int_divX;
  331. +
  332. + /* movcc, movr */
  333. + const int int_cmove;
  334. +
  335. + /* penalty for shifts, due to scheduling rules etc. */
  336. + const int shift_penalty;
  337. +
  338. + /* cost of a (predictable) branch. */
  339. + const int branch_cost;
  340. +};
  341. +
  342. +static const
  343. +struct processor_costs cypress_costs = {
  344. + COSTS_N_INSNS (2), /* int load */
  345. + COSTS_N_INSNS (2), /* int signed load */
  346. + COSTS_N_INSNS (2), /* int zeroed load */
  347. + COSTS_N_INSNS (2), /* float load */
  348. + COSTS_N_INSNS (5), /* fmov, fneg, fabs */
  349. + COSTS_N_INSNS (5), /* fadd, fsub */
  350. + COSTS_N_INSNS (1), /* fcmp */
  351. + COSTS_N_INSNS (1), /* fmov, fmovr */
  352. + COSTS_N_INSNS (7), /* fmul */
  353. + COSTS_N_INSNS (37), /* fdivs */
  354. + COSTS_N_INSNS (37), /* fdivd */
  355. + COSTS_N_INSNS (63), /* fsqrts */
  356. + COSTS_N_INSNS (63), /* fsqrtd */
  357. + COSTS_N_INSNS (1), /* imul */
  358. + COSTS_N_INSNS (1), /* imulX */
  359. + 0, /* imul bit factor */
  360. + COSTS_N_INSNS (1), /* idiv */
  361. + COSTS_N_INSNS (1), /* idivX */
  362. + COSTS_N_INSNS (1), /* movcc/movr */
  363. + 0, /* shift penalty */
  364. + 3 /* branch cost */
  365. +};
  366. +
  367. +static const
  368. +struct processor_costs supersparc_costs = {
  369. + COSTS_N_INSNS (1), /* int load */
  370. + COSTS_N_INSNS (1), /* int signed load */
  371. + COSTS_N_INSNS (1), /* int zeroed load */
  372. + COSTS_N_INSNS (0), /* float load */
  373. + COSTS_N_INSNS (3), /* fmov, fneg, fabs */
  374. + COSTS_N_INSNS (3), /* fadd, fsub */
  375. + COSTS_N_INSNS (3), /* fcmp */
  376. + COSTS_N_INSNS (1), /* fmov, fmovr */
  377. + COSTS_N_INSNS (3), /* fmul */
  378. + COSTS_N_INSNS (6), /* fdivs */
  379. + COSTS_N_INSNS (9), /* fdivd */
  380. + COSTS_N_INSNS (12), /* fsqrts */
  381. + COSTS_N_INSNS (12), /* fsqrtd */
  382. + COSTS_N_INSNS (4), /* imul */
  383. + COSTS_N_INSNS (4), /* imulX */
  384. + 0, /* imul bit factor */
  385. + COSTS_N_INSNS (4), /* idiv */
  386. + COSTS_N_INSNS (4), /* idivX */
  387. + COSTS_N_INSNS (1), /* movcc/movr */
  388. + 1, /* shift penalty */
  389. + 3 /* branch cost */
  390. +};
  391. +
  392. +static const
  393. +struct processor_costs hypersparc_costs = {
  394. + COSTS_N_INSNS (1), /* int load */
  395. + COSTS_N_INSNS (1), /* int signed load */
  396. + COSTS_N_INSNS (1), /* int zeroed load */
  397. + COSTS_N_INSNS (1), /* float load */
  398. + COSTS_N_INSNS (1), /* fmov, fneg, fabs */
  399. + COSTS_N_INSNS (1), /* fadd, fsub */
  400. + COSTS_N_INSNS (1), /* fcmp */
  401. + COSTS_N_INSNS (1), /* fmov, fmovr */
  402. + COSTS_N_INSNS (1), /* fmul */
  403. + COSTS_N_INSNS (8), /* fdivs */
  404. + COSTS_N_INSNS (12), /* fdivd */
  405. + COSTS_N_INSNS (17), /* fsqrts */
  406. + COSTS_N_INSNS (17), /* fsqrtd */
  407. + COSTS_N_INSNS (17), /* imul */
  408. + COSTS_N_INSNS (17), /* imulX */
  409. + 0, /* imul bit factor */
  410. + COSTS_N_INSNS (17), /* idiv */
  411. + COSTS_N_INSNS (17), /* idivX */
  412. + COSTS_N_INSNS (1), /* movcc/movr */
  413. + 0, /* shift penalty */
  414. + 3 /* branch cost */
  415. +};
  416. +
  417. +static const
  418. +struct processor_costs leon_costs = {
  419. + COSTS_N_INSNS (1), /* int load */
  420. + COSTS_N_INSNS (1), /* int signed load */
  421. + COSTS_N_INSNS (1), /* int zeroed load */
  422. + COSTS_N_INSNS (1), /* float load */
  423. + COSTS_N_INSNS (1), /* fmov, fneg, fabs */
  424. + COSTS_N_INSNS (1), /* fadd, fsub */
  425. + COSTS_N_INSNS (1), /* fcmp */
  426. + COSTS_N_INSNS (1), /* fmov, fmovr */
  427. + COSTS_N_INSNS (1), /* fmul */
  428. + COSTS_N_INSNS (15), /* fdivs */
  429. + COSTS_N_INSNS (15), /* fdivd */
  430. + COSTS_N_INSNS (23), /* fsqrts */
  431. + COSTS_N_INSNS (23), /* fsqrtd */
  432. + COSTS_N_INSNS (5), /* imul */
  433. + COSTS_N_INSNS (5), /* imulX */
  434. + 0, /* imul bit factor */
  435. + COSTS_N_INSNS (5), /* idiv */
  436. + COSTS_N_INSNS (5), /* idivX */
  437. + COSTS_N_INSNS (1), /* movcc/movr */
  438. + 0, /* shift penalty */
  439. + 3 /* branch cost */
  440. +};
  441. +
  442. +static const
  443. +struct processor_costs leon3_costs = {
  444. + COSTS_N_INSNS (1), /* int load */
  445. + COSTS_N_INSNS (1), /* int signed load */
  446. + COSTS_N_INSNS (1), /* int zeroed load */
  447. + COSTS_N_INSNS (1), /* float load */
  448. + COSTS_N_INSNS (1), /* fmov, fneg, fabs */
  449. + COSTS_N_INSNS (1), /* fadd, fsub */
  450. + COSTS_N_INSNS (1), /* fcmp */
  451. + COSTS_N_INSNS (1), /* fmov, fmovr */
  452. + COSTS_N_INSNS (1), /* fmul */
  453. + COSTS_N_INSNS (14), /* fdivs */
  454. + COSTS_N_INSNS (15), /* fdivd */
  455. + COSTS_N_INSNS (22), /* fsqrts */
  456. + COSTS_N_INSNS (23), /* fsqrtd */
  457. + COSTS_N_INSNS (5), /* imul */
  458. + COSTS_N_INSNS (5), /* imulX */
  459. + 0, /* imul bit factor */
  460. + COSTS_N_INSNS (35), /* idiv */
  461. + COSTS_N_INSNS (35), /* idivX */
  462. + COSTS_N_INSNS (1), /* movcc/movr */
  463. + 0, /* shift penalty */
  464. + 3 /* branch cost */
  465. +};
  466. +
  467. +static const
  468. +struct processor_costs sparclet_costs = {
  469. + COSTS_N_INSNS (3), /* int load */
  470. + COSTS_N_INSNS (3), /* int signed load */
  471. + COSTS_N_INSNS (1), /* int zeroed load */
  472. + COSTS_N_INSNS (1), /* float load */
  473. + COSTS_N_INSNS (1), /* fmov, fneg, fabs */
  474. + COSTS_N_INSNS (1), /* fadd, fsub */
  475. + COSTS_N_INSNS (1), /* fcmp */
  476. + COSTS_N_INSNS (1), /* fmov, fmovr */
  477. + COSTS_N_INSNS (1), /* fmul */
  478. + COSTS_N_INSNS (1), /* fdivs */
  479. + COSTS_N_INSNS (1), /* fdivd */
  480. + COSTS_N_INSNS (1), /* fsqrts */
  481. + COSTS_N_INSNS (1), /* fsqrtd */
  482. + COSTS_N_INSNS (5), /* imul */
  483. + COSTS_N_INSNS (5), /* imulX */
  484. + 0, /* imul bit factor */
  485. + COSTS_N_INSNS (5), /* idiv */
  486. + COSTS_N_INSNS (5), /* idivX */
  487. + COSTS_N_INSNS (1), /* movcc/movr */
  488. + 0, /* shift penalty */
  489. + 3 /* branch cost */
  490. +};
  491. +
  492. +static const
  493. +struct processor_costs ultrasparc_costs = {
  494. + COSTS_N_INSNS (2), /* int load */
  495. + COSTS_N_INSNS (3), /* int signed load */
  496. + COSTS_N_INSNS (2), /* int zeroed load */
  497. + COSTS_N_INSNS (2), /* float load */
  498. + COSTS_N_INSNS (1), /* fmov, fneg, fabs */
  499. + COSTS_N_INSNS (4), /* fadd, fsub */
  500. + COSTS_N_INSNS (1), /* fcmp */
  501. + COSTS_N_INSNS (2), /* fmov, fmovr */
  502. + COSTS_N_INSNS (4), /* fmul */
  503. + COSTS_N_INSNS (13), /* fdivs */
  504. + COSTS_N_INSNS (23), /* fdivd */
  505. + COSTS_N_INSNS (13), /* fsqrts */
  506. + COSTS_N_INSNS (23), /* fsqrtd */
  507. + COSTS_N_INSNS (4), /* imul */
  508. + COSTS_N_INSNS (4), /* imulX */
  509. + 2, /* imul bit factor */
  510. + COSTS_N_INSNS (37), /* idiv */
  511. + COSTS_N_INSNS (68), /* idivX */
  512. + COSTS_N_INSNS (2), /* movcc/movr */
  513. + 2, /* shift penalty */
  514. + 2 /* branch cost */
  515. +};
  516. +
  517. +static const
  518. +struct processor_costs ultrasparc3_costs = {
  519. + COSTS_N_INSNS (2), /* int load */
  520. + COSTS_N_INSNS (3), /* int signed load */
  521. + COSTS_N_INSNS (3), /* int zeroed load */
  522. + COSTS_N_INSNS (2), /* float load */
  523. + COSTS_N_INSNS (3), /* fmov, fneg, fabs */
  524. + COSTS_N_INSNS (4), /* fadd, fsub */
  525. + COSTS_N_INSNS (5), /* fcmp */
  526. + COSTS_N_INSNS (3), /* fmov, fmovr */
  527. + COSTS_N_INSNS (4), /* fmul */
  528. + COSTS_N_INSNS (17), /* fdivs */
  529. + COSTS_N_INSNS (20), /* fdivd */
  530. + COSTS_N_INSNS (20), /* fsqrts */
  531. + COSTS_N_INSNS (29), /* fsqrtd */
  532. + COSTS_N_INSNS (6), /* imul */
  533. + COSTS_N_INSNS (6), /* imulX */
  534. + 0, /* imul bit factor */
  535. + COSTS_N_INSNS (40), /* idiv */
  536. + COSTS_N_INSNS (71), /* idivX */
  537. + COSTS_N_INSNS (2), /* movcc/movr */
  538. + 0, /* shift penalty */
  539. + 2 /* branch cost */
  540. +};
  541. +
  542. +static const
  543. +struct processor_costs niagara_costs = {
  544. + COSTS_N_INSNS (3), /* int load */
  545. + COSTS_N_INSNS (3), /* int signed load */
  546. + COSTS_N_INSNS (3), /* int zeroed load */
  547. + COSTS_N_INSNS (9), /* float load */
  548. + COSTS_N_INSNS (8), /* fmov, fneg, fabs */
  549. + COSTS_N_INSNS (8), /* fadd, fsub */
  550. + COSTS_N_INSNS (26), /* fcmp */
  551. + COSTS_N_INSNS (8), /* fmov, fmovr */
  552. + COSTS_N_INSNS (29), /* fmul */
  553. + COSTS_N_INSNS (54), /* fdivs */
  554. + COSTS_N_INSNS (83), /* fdivd */
  555. + COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
  556. + COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
  557. + COSTS_N_INSNS (11), /* imul */
  558. + COSTS_N_INSNS (11), /* imulX */
  559. + 0, /* imul bit factor */
  560. + COSTS_N_INSNS (72), /* idiv */
  561. + COSTS_N_INSNS (72), /* idivX */
  562. + COSTS_N_INSNS (1), /* movcc/movr */
  563. + 0, /* shift penalty */
  564. + 4 /* branch cost */
  565. +};
  566. +
  567. +static const
  568. +struct processor_costs niagara2_costs = {
  569. + COSTS_N_INSNS (3), /* int load */
  570. + COSTS_N_INSNS (3), /* int signed load */
  571. + COSTS_N_INSNS (3), /* int zeroed load */
  572. + COSTS_N_INSNS (3), /* float load */
  573. + COSTS_N_INSNS (6), /* fmov, fneg, fabs */
  574. + COSTS_N_INSNS (6), /* fadd, fsub */
  575. + COSTS_N_INSNS (6), /* fcmp */
  576. + COSTS_N_INSNS (6), /* fmov, fmovr */
  577. + COSTS_N_INSNS (6), /* fmul */
  578. + COSTS_N_INSNS (19), /* fdivs */
  579. + COSTS_N_INSNS (33), /* fdivd */
  580. + COSTS_N_INSNS (19), /* fsqrts */
  581. + COSTS_N_INSNS (33), /* fsqrtd */
  582. + COSTS_N_INSNS (5), /* imul */
  583. + COSTS_N_INSNS (5), /* imulX */
  584. + 0, /* imul bit factor */
  585. + COSTS_N_INSNS (26), /* idiv, average of 12 - 41 cycle range */
  586. + COSTS_N_INSNS (26), /* idivX, average of 12 - 41 cycle range */
  587. + COSTS_N_INSNS (1), /* movcc/movr */
  588. + 0, /* shift penalty */
  589. + 5 /* branch cost */
  590. +};
  591. +
  592. +static const
  593. +struct processor_costs niagara3_costs = {
  594. + COSTS_N_INSNS (3), /* int load */
  595. + COSTS_N_INSNS (3), /* int signed load */
  596. + COSTS_N_INSNS (3), /* int zeroed load */
  597. + COSTS_N_INSNS (3), /* float load */
  598. + COSTS_N_INSNS (9), /* fmov, fneg, fabs */
  599. + COSTS_N_INSNS (9), /* fadd, fsub */
  600. + COSTS_N_INSNS (9), /* fcmp */
  601. + COSTS_N_INSNS (9), /* fmov, fmovr */
  602. + COSTS_N_INSNS (9), /* fmul */
  603. + COSTS_N_INSNS (23), /* fdivs */
  604. + COSTS_N_INSNS (37), /* fdivd */
  605. + COSTS_N_INSNS (23), /* fsqrts */
  606. + COSTS_N_INSNS (37), /* fsqrtd */
  607. + COSTS_N_INSNS (9), /* imul */
  608. + COSTS_N_INSNS (9), /* imulX */
  609. + 0, /* imul bit factor */
  610. + COSTS_N_INSNS (31), /* idiv, average of 17 - 45 cycle range */
  611. + COSTS_N_INSNS (30), /* idivX, average of 16 - 44 cycle range */
  612. + COSTS_N_INSNS (1), /* movcc/movr */
  613. + 0, /* shift penalty */
  614. + 5 /* branch cost */
  615. +};
  616. +
  617. +static const
  618. +struct processor_costs niagara4_costs = {
  619. + COSTS_N_INSNS (5), /* int load */
  620. + COSTS_N_INSNS (5), /* int signed load */
  621. + COSTS_N_INSNS (5), /* int zeroed load */
  622. + COSTS_N_INSNS (5), /* float load */
  623. + COSTS_N_INSNS (11), /* fmov, fneg, fabs */
  624. + COSTS_N_INSNS (11), /* fadd, fsub */
  625. + COSTS_N_INSNS (11), /* fcmp */
  626. + COSTS_N_INSNS (11), /* fmov, fmovr */
  627. + COSTS_N_INSNS (11), /* fmul */
  628. + COSTS_N_INSNS (24), /* fdivs */
  629. + COSTS_N_INSNS (37), /* fdivd */
  630. + COSTS_N_INSNS (24), /* fsqrts */
  631. + COSTS_N_INSNS (37), /* fsqrtd */
  632. + COSTS_N_INSNS (12), /* imul */
  633. + COSTS_N_INSNS (12), /* imulX */
  634. + 0, /* imul bit factor */
  635. + COSTS_N_INSNS (50), /* idiv, average of 41 - 60 cycle range */
  636. + COSTS_N_INSNS (35), /* idivX, average of 26 - 44 cycle range */
  637. + COSTS_N_INSNS (1), /* movcc/movr */
  638. + 0, /* shift penalty */
  639. + 2 /* branch cost */
  640. +};
  641. +
  642. +static const
  643. +struct processor_costs niagara7_costs = {
  644. + COSTS_N_INSNS (5), /* int load */
  645. + COSTS_N_INSNS (5), /* int signed load */
  646. + COSTS_N_INSNS (5), /* int zeroed load */
  647. + COSTS_N_INSNS (5), /* float load */
  648. + COSTS_N_INSNS (11), /* fmov, fneg, fabs */
  649. + COSTS_N_INSNS (11), /* fadd, fsub */
  650. + COSTS_N_INSNS (11), /* fcmp */
  651. + COSTS_N_INSNS (11), /* fmov, fmovr */
  652. + COSTS_N_INSNS (11), /* fmul */
  653. + COSTS_N_INSNS (24), /* fdivs */
  654. + COSTS_N_INSNS (37), /* fdivd */
  655. + COSTS_N_INSNS (24), /* fsqrts */
  656. + COSTS_N_INSNS (37), /* fsqrtd */
  657. + COSTS_N_INSNS (12), /* imul */
  658. + COSTS_N_INSNS (12), /* imulX */
  659. + 0, /* imul bit factor */
  660. + COSTS_N_INSNS (51), /* idiv, average of 42 - 61 cycle range */
  661. + COSTS_N_INSNS (35), /* idivX, average of 26 - 44 cycle range */
  662. + COSTS_N_INSNS (1), /* movcc/movr */
  663. + 0, /* shift penalty */
  664. + 1 /* branch cost */
  665. +};
  666. +
  667. +static const
  668. +struct processor_costs m8_costs = {
  669. + COSTS_N_INSNS (3), /* int load */
  670. + COSTS_N_INSNS (3), /* int signed load */
  671. + COSTS_N_INSNS (3), /* int zeroed load */
  672. + COSTS_N_INSNS (3), /* float load */
  673. + COSTS_N_INSNS (9), /* fmov, fneg, fabs */
  674. + COSTS_N_INSNS (9), /* fadd, fsub */
  675. + COSTS_N_INSNS (9), /* fcmp */
  676. + COSTS_N_INSNS (9), /* fmov, fmovr */
  677. + COSTS_N_INSNS (9), /* fmul */
  678. + COSTS_N_INSNS (26), /* fdivs */
  679. + COSTS_N_INSNS (30), /* fdivd */
  680. + COSTS_N_INSNS (33), /* fsqrts */
  681. + COSTS_N_INSNS (41), /* fsqrtd */
  682. + COSTS_N_INSNS (12), /* imul */
  683. + COSTS_N_INSNS (10), /* imulX */
  684. + 0, /* imul bit factor */
  685. + COSTS_N_INSNS (57), /* udiv/sdiv */
  686. + COSTS_N_INSNS (30), /* udivx/sdivx */
  687. + COSTS_N_INSNS (1), /* movcc/movr */
  688. + 0, /* shift penalty */
  689. + 1 /* branch cost */
  690. +};
  691. +
  692. +static const struct processor_costs *sparc_costs = &cypress_costs;
  693. +
  694. +#ifdef HAVE_AS_RELAX_OPTION
  695. +/* If 'as' and 'ld' are relaxing tail call insns into branch always, use
  696. + "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
  697. + With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
  698. + somebody does not branch between the sethi and jmp. */
  699. +#define LEAF_SIBCALL_SLOT_RESERVED_P 1
  700. +#else
  701. +#define LEAF_SIBCALL_SLOT_RESERVED_P \
  702. + ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
  703. +#endif
  704. +
  705. +/* Vector to say how input registers are mapped to output registers.
  706. + HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
  707. + eliminate it. You must use -fomit-frame-pointer to get that. */
  708. +char leaf_reg_remap[] =
  709. +{ 0, 1, 2, 3, 4, 5, 6, 7,
  710. + -1, -1, -1, -1, -1, -1, 14, -1,
  711. + -1, -1, -1, -1, -1, -1, -1, -1,
  712. + 8, 9, 10, 11, 12, 13, -1, 15,
  713. +
  714. + 32, 33, 34, 35, 36, 37, 38, 39,
  715. + 40, 41, 42, 43, 44, 45, 46, 47,
  716. + 48, 49, 50, 51, 52, 53, 54, 55,
  717. + 56, 57, 58, 59, 60, 61, 62, 63,
  718. + 64, 65, 66, 67, 68, 69, 70, 71,
  719. + 72, 73, 74, 75, 76, 77, 78, 79,
  720. + 80, 81, 82, 83, 84, 85, 86, 87,
  721. + 88, 89, 90, 91, 92, 93, 94, 95,
  722. + 96, 97, 98, 99, 100, 101, 102};
  723. +
  724. +/* Vector, indexed by hard register number, which contains 1
  725. + for a register that is allowable in a candidate for leaf
  726. + function treatment. */
  727. +char sparc_leaf_regs[] =
  728. +{ 1, 1, 1, 1, 1, 1, 1, 1,
  729. + 0, 0, 0, 0, 0, 0, 1, 0,
  730. + 0, 0, 0, 0, 0, 0, 0, 0,
  731. + 1, 1, 1, 1, 1, 1, 0, 1,
  732. + 1, 1, 1, 1, 1, 1, 1, 1,
  733. + 1, 1, 1, 1, 1, 1, 1, 1,
  734. + 1, 1, 1, 1, 1, 1, 1, 1,
  735. + 1, 1, 1, 1, 1, 1, 1, 1,
  736. + 1, 1, 1, 1, 1, 1, 1, 1,
  737. + 1, 1, 1, 1, 1, 1, 1, 1,
  738. + 1, 1, 1, 1, 1, 1, 1, 1,
  739. + 1, 1, 1, 1, 1, 1, 1, 1,
  740. + 1, 1, 1, 1, 1, 1, 1};
  741. +
  742. +struct GTY(()) machine_function
  743. +{
  744. + /* Size of the frame of the function. */
  745. + HOST_WIDE_INT frame_size;
  746. +
  747. + /* Size of the frame of the function minus the register window save area
  748. + and the outgoing argument area. */
  749. + HOST_WIDE_INT apparent_frame_size;
  750. +
  751. + /* Register we pretend the frame pointer is allocated to. Normally, this
  752. + is %fp, but if we are in a leaf procedure, this is (%sp + offset). We
  753. + record "offset" separately as it may be too big for (reg + disp). */
  754. + rtx frame_base_reg;
  755. + HOST_WIDE_INT frame_base_offset;
  756. +
  757. + /* Number of global or FP registers to be saved (as 4-byte quantities). */
  758. + int n_global_fp_regs;
  759. +
  760. + /* True if the current function is leaf and uses only leaf regs,
  761. + so that the SPARC leaf function optimization can be applied.
  762. + Private version of crtl->uses_only_leaf_regs, see
  763. + sparc_expand_prologue for the rationale. */
  764. + int leaf_function_p;
  765. +
  766. + /* True if the prologue saves local or in registers. */
  767. + bool save_local_in_regs_p;
  768. +
  769. + /* True if the data calculated by sparc_expand_prologue are valid. */
  770. + bool prologue_data_valid_p;
  771. +};
  772. +
  773. +#define sparc_frame_size cfun->machine->frame_size
  774. +#define sparc_apparent_frame_size cfun->machine->apparent_frame_size
  775. +#define sparc_frame_base_reg cfun->machine->frame_base_reg
  776. +#define sparc_frame_base_offset cfun->machine->frame_base_offset
  777. +#define sparc_n_global_fp_regs cfun->machine->n_global_fp_regs
  778. +#define sparc_leaf_function_p cfun->machine->leaf_function_p
  779. +#define sparc_save_local_in_regs_p cfun->machine->save_local_in_regs_p
  780. +#define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
  781. +
  782. +/* 1 if the next opcode is to be specially indented. */
  783. +int sparc_indent_opcode = 0;
  784. +
  785. +static void sparc_option_override (void);
  786. +static void sparc_init_modes (void);
  787. +static int function_arg_slotno (const CUMULATIVE_ARGS *, machine_mode,
  788. + const_tree, bool, bool, int *, int *);
  789. +
  790. +static int supersparc_adjust_cost (rtx_insn *, int, rtx_insn *, int);
  791. +static int hypersparc_adjust_cost (rtx_insn *, int, rtx_insn *, int);
  792. +
  793. +static void sparc_emit_set_const32 (rtx, rtx);
  794. +static void sparc_emit_set_const64 (rtx, rtx);
  795. +static void sparc_output_addr_vec (rtx);
  796. +static void sparc_output_addr_diff_vec (rtx);
  797. +static void sparc_output_deferred_case_vectors (void);
  798. +static bool sparc_legitimate_address_p (machine_mode, rtx, bool);
  799. +static bool sparc_legitimate_constant_p (machine_mode, rtx);
  800. +static rtx sparc_builtin_saveregs (void);
  801. +static int epilogue_renumber (rtx *, int);
  802. +static bool sparc_assemble_integer (rtx, unsigned int, int);
  803. +static int set_extends (rtx_insn *);
  804. +static void sparc_asm_function_prologue (FILE *);
  805. +static void sparc_asm_function_epilogue (FILE *);
  806. +#ifdef TARGET_SOLARIS
  807. +static void sparc_solaris_elf_asm_named_section (const char *, unsigned int,
  808. + tree) ATTRIBUTE_UNUSED;
  809. +#endif
  810. +static int sparc_adjust_cost (rtx_insn *, int, rtx_insn *, int, unsigned int);
  811. +static int sparc_issue_rate (void);
  812. +static void sparc_sched_init (FILE *, int, int);
  813. +static int sparc_use_sched_lookahead (void);
  814. +
  815. +static void emit_soft_tfmode_libcall (const char *, int, rtx *);
  816. +static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
  817. +static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
  818. +static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
  819. +static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
  820. +
  821. +static bool sparc_function_ok_for_sibcall (tree, tree);
  822. +static void sparc_init_libfuncs (void);
  823. +static void sparc_init_builtins (void);
  824. +static void sparc_fpu_init_builtins (void);
  825. +static void sparc_vis_init_builtins (void);
  826. +static tree sparc_builtin_decl (unsigned, bool);
  827. +static rtx sparc_expand_builtin (tree, rtx, rtx, machine_mode, int);
  828. +static tree sparc_fold_builtin (tree, int, tree *, bool);
  829. +static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
  830. + HOST_WIDE_INT, tree);
  831. +static bool sparc_can_output_mi_thunk (const_tree, HOST_WIDE_INT,
  832. + HOST_WIDE_INT, const_tree);
  833. +static struct machine_function * sparc_init_machine_status (void);
  834. +static bool sparc_cannot_force_const_mem (machine_mode, rtx);
  835. +static rtx sparc_tls_get_addr (void);
  836. +static rtx sparc_tls_got (void);
  837. +static int sparc_register_move_cost (machine_mode,
  838. + reg_class_t, reg_class_t);
  839. +static bool sparc_rtx_costs (rtx, machine_mode, int, int, int *, bool);
  840. +static machine_mode sparc_promote_function_mode (const_tree, machine_mode,
  841. + int *, const_tree, int);
  842. +static bool sparc_strict_argument_naming (cumulative_args_t);
  843. +static void sparc_va_start (tree, rtx);
  844. +static tree sparc_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
  845. +static bool sparc_vector_mode_supported_p (machine_mode);
  846. +static bool sparc_tls_referenced_p (rtx);
  847. +static rtx sparc_legitimize_tls_address (rtx);
  848. +static rtx sparc_legitimize_pic_address (rtx, rtx);
  849. +static rtx sparc_legitimize_address (rtx, rtx, machine_mode);
  850. +static rtx sparc_delegitimize_address (rtx);
  851. +static bool sparc_mode_dependent_address_p (const_rtx, addr_space_t);
  852. +static bool sparc_pass_by_reference (cumulative_args_t,
  853. + const function_arg_info &);
  854. +static void sparc_function_arg_advance (cumulative_args_t,
  855. + const function_arg_info &);
  856. +static rtx sparc_function_arg (cumulative_args_t, const function_arg_info &);
  857. +static rtx sparc_function_incoming_arg (cumulative_args_t,
  858. + const function_arg_info &);
  859. +static pad_direction sparc_function_arg_padding (machine_mode, const_tree);
  860. +static unsigned int sparc_function_arg_boundary (machine_mode,
  861. + const_tree);
  862. +static int sparc_arg_partial_bytes (cumulative_args_t,
  863. + const function_arg_info &);
  864. +static bool sparc_return_in_memory (const_tree, const_tree);
  865. +static rtx sparc_struct_value_rtx (tree, int);
  866. +static rtx sparc_function_value (const_tree, const_tree, bool);
  867. +static rtx sparc_libcall_value (machine_mode, const_rtx);
  868. +static bool sparc_function_value_regno_p (const unsigned int);
  869. +static unsigned HOST_WIDE_INT sparc_asan_shadow_offset (void);
  870. +static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
  871. +static void sparc_file_end (void);
  872. +static bool sparc_frame_pointer_required (void);
  873. +static bool sparc_can_eliminate (const int, const int);
  874. +static void sparc_conditional_register_usage (void);
  875. +static bool sparc_use_pseudo_pic_reg (void);
  876. +static void sparc_init_pic_reg (void);
  877. +#ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
  878. +static const char *sparc_mangle_type (const_tree);
  879. +#endif
  880. +static void sparc_trampoline_init (rtx, tree, rtx);
  881. +static machine_mode sparc_preferred_simd_mode (scalar_mode);
  882. +static reg_class_t sparc_preferred_reload_class (rtx x, reg_class_t rclass);
  883. +static bool sparc_lra_p (void);
  884. +static bool sparc_print_operand_punct_valid_p (unsigned char);
  885. +static void sparc_print_operand (FILE *, rtx, int);
  886. +static void sparc_print_operand_address (FILE *, machine_mode, rtx);
  887. +static reg_class_t sparc_secondary_reload (bool, rtx, reg_class_t,
  888. + machine_mode,
  889. + secondary_reload_info *);
  890. +static bool sparc_secondary_memory_needed (machine_mode, reg_class_t,
  891. + reg_class_t);
  892. +static machine_mode sparc_secondary_memory_needed_mode (machine_mode);
  893. +static scalar_int_mode sparc_cstore_mode (enum insn_code icode);
  894. +static void sparc_atomic_assign_expand_fenv (tree *, tree *, tree *);
  895. +static bool sparc_fixed_condition_code_regs (unsigned int *, unsigned int *);
  896. +static unsigned int sparc_min_arithmetic_precision (void);
  897. +static unsigned int sparc_hard_regno_nregs (unsigned int, machine_mode);
  898. +static bool sparc_hard_regno_mode_ok (unsigned int, machine_mode);
  899. +static bool sparc_modes_tieable_p (machine_mode, machine_mode);
  900. +static bool sparc_can_change_mode_class (machine_mode, machine_mode,
  901. + reg_class_t);
  902. +static HOST_WIDE_INT sparc_constant_alignment (const_tree, HOST_WIDE_INT);
  903. +static bool sparc_vectorize_vec_perm_const (machine_mode, rtx, rtx, rtx,
  904. + const vec_perm_indices &);
  905. +static bool sparc_can_follow_jump (const rtx_insn *, const rtx_insn *);
  906. +
  907. +#ifdef SUBTARGET_ATTRIBUTE_TABLE
  908. +/* Table of valid machine attributes. */
  909. +static const struct attribute_spec sparc_attribute_table[] =
  910. +{
  911. + /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
  912. + do_diagnostic, handler, exclude } */
  913. + SUBTARGET_ATTRIBUTE_TABLE,
  914. + { NULL, 0, 0, false, false, false, false, NULL, NULL }
  915. +};
  916. +#endif
  917. +
  918. +char sparc_hard_reg_printed[8];
  919. +
  920. +/* Initialize the GCC target structure. */
  921. +
  922. +/* The default is to use .half rather than .short for aligned HI objects. */
  923. +#undef TARGET_ASM_ALIGNED_HI_OP
  924. +#define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
  925. +
  926. +#undef TARGET_ASM_UNALIGNED_HI_OP
  927. +#define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
  928. +#undef TARGET_ASM_UNALIGNED_SI_OP
  929. +#define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
  930. +#undef TARGET_ASM_UNALIGNED_DI_OP
  931. +#define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
  932. +
  933. +/* The target hook has to handle DI-mode values. */
  934. +#undef TARGET_ASM_INTEGER
  935. +#define TARGET_ASM_INTEGER sparc_assemble_integer
  936. +
  937. +#undef TARGET_ASM_FUNCTION_PROLOGUE
  938. +#define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
  939. +#undef TARGET_ASM_FUNCTION_EPILOGUE
  940. +#define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
  941. +
  942. +#undef TARGET_SCHED_ADJUST_COST
  943. +#define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
  944. +#undef TARGET_SCHED_ISSUE_RATE
  945. +#define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
  946. +#undef TARGET_SCHED_INIT
  947. +#define TARGET_SCHED_INIT sparc_sched_init
  948. +#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
  949. +#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
  950. +
  951. +#undef TARGET_FUNCTION_OK_FOR_SIBCALL
  952. +#define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
  953. +
  954. +#undef TARGET_INIT_LIBFUNCS
  955. +#define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
  956. +
  957. +#undef TARGET_LEGITIMIZE_ADDRESS
  958. +#define TARGET_LEGITIMIZE_ADDRESS sparc_legitimize_address
  959. +#undef TARGET_DELEGITIMIZE_ADDRESS
  960. +#define TARGET_DELEGITIMIZE_ADDRESS sparc_delegitimize_address
  961. +#undef TARGET_MODE_DEPENDENT_ADDRESS_P
  962. +#define TARGET_MODE_DEPENDENT_ADDRESS_P sparc_mode_dependent_address_p
  963. +
  964. +#undef TARGET_INIT_BUILTINS
  965. +#define TARGET_INIT_BUILTINS sparc_init_builtins
  966. +#undef TARGET_BUILTIN_DECL
  967. +#define TARGET_BUILTIN_DECL sparc_builtin_decl
  968. +#undef TARGET_EXPAND_BUILTIN
  969. +#define TARGET_EXPAND_BUILTIN sparc_expand_builtin
  970. +#undef TARGET_FOLD_BUILTIN
  971. +#define TARGET_FOLD_BUILTIN sparc_fold_builtin
  972. +
  973. +#if TARGET_TLS
  974. +#undef TARGET_HAVE_TLS
  975. +#define TARGET_HAVE_TLS true
  976. +#endif
  977. +
  978. +#undef TARGET_CANNOT_FORCE_CONST_MEM
  979. +#define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
  980. +
  981. +#undef TARGET_ASM_OUTPUT_MI_THUNK
  982. +#define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
  983. +#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
  984. +#define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
  985. +
  986. +#undef TARGET_RTX_COSTS
  987. +#define TARGET_RTX_COSTS sparc_rtx_costs
  988. +#undef TARGET_ADDRESS_COST
  989. +#define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
  990. +#undef TARGET_REGISTER_MOVE_COST
  991. +#define TARGET_REGISTER_MOVE_COST sparc_register_move_cost
  992. +
  993. +#undef TARGET_PROMOTE_FUNCTION_MODE
  994. +#define TARGET_PROMOTE_FUNCTION_MODE sparc_promote_function_mode
  995. +#undef TARGET_STRICT_ARGUMENT_NAMING
  996. +#define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
  997. +
  998. +#undef TARGET_MUST_PASS_IN_STACK
  999. +#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
  1000. +#undef TARGET_PASS_BY_REFERENCE
  1001. +#define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
  1002. +#undef TARGET_ARG_PARTIAL_BYTES
  1003. +#define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
  1004. +#undef TARGET_FUNCTION_ARG_ADVANCE
  1005. +#define TARGET_FUNCTION_ARG_ADVANCE sparc_function_arg_advance
  1006. +#undef TARGET_FUNCTION_ARG
  1007. +#define TARGET_FUNCTION_ARG sparc_function_arg
  1008. +#undef TARGET_FUNCTION_INCOMING_ARG
  1009. +#define TARGET_FUNCTION_INCOMING_ARG sparc_function_incoming_arg
  1010. +#undef TARGET_FUNCTION_ARG_PADDING
  1011. +#define TARGET_FUNCTION_ARG_PADDING sparc_function_arg_padding
  1012. +#undef TARGET_FUNCTION_ARG_BOUNDARY
  1013. +#define TARGET_FUNCTION_ARG_BOUNDARY sparc_function_arg_boundary
  1014. +
  1015. +#undef TARGET_RETURN_IN_MEMORY
  1016. +#define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
  1017. +#undef TARGET_STRUCT_VALUE_RTX
  1018. +#define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
  1019. +#undef TARGET_FUNCTION_VALUE
  1020. +#define TARGET_FUNCTION_VALUE sparc_function_value
  1021. +#undef TARGET_LIBCALL_VALUE
  1022. +#define TARGET_LIBCALL_VALUE sparc_libcall_value
  1023. +#undef TARGET_FUNCTION_VALUE_REGNO_P
  1024. +#define TARGET_FUNCTION_VALUE_REGNO_P sparc_function_value_regno_p
  1025. +
  1026. +#undef TARGET_EXPAND_BUILTIN_SAVEREGS
  1027. +#define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
  1028. +
  1029. +#undef TARGET_ASAN_SHADOW_OFFSET
  1030. +#define TARGET_ASAN_SHADOW_OFFSET sparc_asan_shadow_offset
  1031. +
  1032. +#undef TARGET_EXPAND_BUILTIN_VA_START
  1033. +#define TARGET_EXPAND_BUILTIN_VA_START sparc_va_start
  1034. +#undef TARGET_GIMPLIFY_VA_ARG_EXPR
  1035. +#define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
  1036. +
  1037. +#undef TARGET_VECTOR_MODE_SUPPORTED_P
  1038. +#define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
  1039. +
  1040. +#undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
  1041. +#define TARGET_VECTORIZE_PREFERRED_SIMD_MODE sparc_preferred_simd_mode
  1042. +
  1043. +#ifdef SUBTARGET_INSERT_ATTRIBUTES
  1044. +#undef TARGET_INSERT_ATTRIBUTES
  1045. +#define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
  1046. +#endif
  1047. +
  1048. +#ifdef SUBTARGET_ATTRIBUTE_TABLE
  1049. +#undef TARGET_ATTRIBUTE_TABLE
  1050. +#define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
  1051. +#endif
  1052. +
  1053. +#undef TARGET_OPTION_OVERRIDE
  1054. +#define TARGET_OPTION_OVERRIDE sparc_option_override
  1055. +
  1056. +#ifdef TARGET_THREAD_SSP_OFFSET
  1057. +#undef TARGET_STACK_PROTECT_GUARD
  1058. +#define TARGET_STACK_PROTECT_GUARD hook_tree_void_null
  1059. +#endif
  1060. +
  1061. +#if TARGET_GNU_TLS && defined(HAVE_AS_SPARC_UA_PCREL)
  1062. +#undef TARGET_ASM_OUTPUT_DWARF_DTPREL
  1063. +#define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
  1064. +#endif
  1065. +
  1066. +#undef TARGET_ASM_FILE_END
  1067. +#define TARGET_ASM_FILE_END sparc_file_end
  1068. +
  1069. +#undef TARGET_FRAME_POINTER_REQUIRED
  1070. +#define TARGET_FRAME_POINTER_REQUIRED sparc_frame_pointer_required
  1071. +
  1072. +#undef TARGET_CAN_ELIMINATE
  1073. +#define TARGET_CAN_ELIMINATE sparc_can_eliminate
  1074. +
  1075. +#undef TARGET_PREFERRED_RELOAD_CLASS
  1076. +#define TARGET_PREFERRED_RELOAD_CLASS sparc_preferred_reload_class
  1077. +
  1078. +#undef TARGET_SECONDARY_RELOAD
  1079. +#define TARGET_SECONDARY_RELOAD sparc_secondary_reload
  1080. +#undef TARGET_SECONDARY_MEMORY_NEEDED
  1081. +#define TARGET_SECONDARY_MEMORY_NEEDED sparc_secondary_memory_needed
  1082. +#undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
  1083. +#define TARGET_SECONDARY_MEMORY_NEEDED_MODE sparc_secondary_memory_needed_mode
  1084. +
  1085. +#undef TARGET_CONDITIONAL_REGISTER_USAGE
  1086. +#define TARGET_CONDITIONAL_REGISTER_USAGE sparc_conditional_register_usage
  1087. +
  1088. +#undef TARGET_INIT_PIC_REG
  1089. +#define TARGET_INIT_PIC_REG sparc_init_pic_reg
  1090. +
  1091. +#undef TARGET_USE_PSEUDO_PIC_REG
  1092. +#define TARGET_USE_PSEUDO_PIC_REG sparc_use_pseudo_pic_reg
  1093. +
  1094. +#ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
  1095. +#undef TARGET_MANGLE_TYPE
  1096. +#define TARGET_MANGLE_TYPE sparc_mangle_type
  1097. +#endif
  1098. +
  1099. +#undef TARGET_LRA_P
  1100. +#define TARGET_LRA_P sparc_lra_p
  1101. +
  1102. +#undef TARGET_LEGITIMATE_ADDRESS_P
  1103. +#define TARGET_LEGITIMATE_ADDRESS_P sparc_legitimate_address_p
  1104. +
  1105. +#undef TARGET_LEGITIMATE_CONSTANT_P
  1106. +#define TARGET_LEGITIMATE_CONSTANT_P sparc_legitimate_constant_p
  1107. +
  1108. +#undef TARGET_TRAMPOLINE_INIT
  1109. +#define TARGET_TRAMPOLINE_INIT sparc_trampoline_init
  1110. +
  1111. +#undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
  1112. +#define TARGET_PRINT_OPERAND_PUNCT_VALID_P sparc_print_operand_punct_valid_p
  1113. +#undef TARGET_PRINT_OPERAND
  1114. +#define TARGET_PRINT_OPERAND sparc_print_operand
  1115. +#undef TARGET_PRINT_OPERAND_ADDRESS
  1116. +#define TARGET_PRINT_OPERAND_ADDRESS sparc_print_operand_address
  1117. +
  1118. +/* The value stored by LDSTUB. */
  1119. +#undef TARGET_ATOMIC_TEST_AND_SET_TRUEVAL
  1120. +#define TARGET_ATOMIC_TEST_AND_SET_TRUEVAL 0xff
  1121. +
  1122. +#undef TARGET_CSTORE_MODE
  1123. +#define TARGET_CSTORE_MODE sparc_cstore_mode
  1124. +
  1125. +#undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
  1126. +#define TARGET_ATOMIC_ASSIGN_EXPAND_FENV sparc_atomic_assign_expand_fenv
  1127. +
  1128. +#undef TARGET_FIXED_CONDITION_CODE_REGS
  1129. +#define TARGET_FIXED_CONDITION_CODE_REGS sparc_fixed_condition_code_regs
  1130. +
  1131. +#undef TARGET_MIN_ARITHMETIC_PRECISION
  1132. +#define TARGET_MIN_ARITHMETIC_PRECISION sparc_min_arithmetic_precision
  1133. +
  1134. +#undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
  1135. +#define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
  1136. +
  1137. +#undef TARGET_HARD_REGNO_NREGS
  1138. +#define TARGET_HARD_REGNO_NREGS sparc_hard_regno_nregs
  1139. +#undef TARGET_HARD_REGNO_MODE_OK
  1140. +#define TARGET_HARD_REGNO_MODE_OK sparc_hard_regno_mode_ok
  1141. +
  1142. +#undef TARGET_MODES_TIEABLE_P
  1143. +#define TARGET_MODES_TIEABLE_P sparc_modes_tieable_p
  1144. +
  1145. +#undef TARGET_CAN_CHANGE_MODE_CLASS
  1146. +#define TARGET_CAN_CHANGE_MODE_CLASS sparc_can_change_mode_class
  1147. +
  1148. +#undef TARGET_CONSTANT_ALIGNMENT
  1149. +#define TARGET_CONSTANT_ALIGNMENT sparc_constant_alignment
  1150. +
  1151. +#undef TARGET_VECTORIZE_VEC_PERM_CONST
  1152. +#define TARGET_VECTORIZE_VEC_PERM_CONST sparc_vectorize_vec_perm_const
  1153. +
  1154. +#undef TARGET_CAN_FOLLOW_JUMP
  1155. +#define TARGET_CAN_FOLLOW_JUMP sparc_can_follow_jump
  1156. +
  1157. +struct gcc_target targetm = TARGET_INITIALIZER;
  1158. +
  1159. +/* Return the memory reference contained in X if any, zero otherwise. */
  1160. +
  1161. +static rtx
  1162. +mem_ref (rtx x)
  1163. +{
  1164. + if (GET_CODE (x) == SIGN_EXTEND || GET_CODE (x) == ZERO_EXTEND)
  1165. + x = XEXP (x, 0);
  1166. +
  1167. + if (MEM_P (x))
  1168. + return x;
  1169. +
  1170. + return NULL_RTX;
  1171. +}
  1172. +
  1173. +/* True if any of INSN's source register(s) is REG. */
  1174. +
  1175. +static bool
  1176. +insn_uses_reg_p (rtx_insn *insn, unsigned int reg)
  1177. +{
  1178. + extract_insn (insn);
  1179. + return ((REG_P (recog_data.operand[1])
  1180. + && REGNO (recog_data.operand[1]) == reg)
  1181. + || (recog_data.n_operands == 3
  1182. + && REG_P (recog_data.operand[2])
  1183. + && REGNO (recog_data.operand[2]) == reg));
  1184. +}
  1185. +
  1186. +/* True if INSN is a floating-point division or square-root. */
  1187. +
  1188. +static bool
  1189. +div_sqrt_insn_p (rtx_insn *insn)
  1190. +{
  1191. + if (GET_CODE (PATTERN (insn)) != SET)
  1192. + return false;
  1193. +
  1194. + switch (get_attr_type (insn))
  1195. + {
  1196. + case TYPE_FPDIVS:
  1197. + case TYPE_FPSQRTS:
  1198. + case TYPE_FPDIVD:
  1199. + case TYPE_FPSQRTD:
  1200. + return true;
  1201. + default:
  1202. + return false;
  1203. + }
  1204. +}
  1205. +
  1206. +/* True if INSN is a floating-point instruction. */
  1207. +
  1208. +static bool
  1209. +fpop_insn_p (rtx_insn *insn)
  1210. +{
  1211. + if (GET_CODE (PATTERN (insn)) != SET)
  1212. + return false;
  1213. +
  1214. + switch (get_attr_type (insn))
  1215. + {
  1216. + case TYPE_FPMOVE:
  1217. + case TYPE_FPCMOVE:
  1218. + case TYPE_FP:
  1219. + case TYPE_FPCMP:
  1220. + case TYPE_FPMUL:
  1221. + case TYPE_FPDIVS:
  1222. + case TYPE_FPSQRTS:
  1223. + case TYPE_FPDIVD:
  1224. + case TYPE_FPSQRTD:
  1225. + return true;
  1226. + default:
  1227. + return false;
  1228. + }
  1229. +}
  1230. +
  1231. +/* True if INSN is an atomic instruction. */
  1232. +
  1233. +static bool
  1234. +atomic_insn_for_leon3_p (rtx_insn *insn)
  1235. +{
  1236. + switch (INSN_CODE (insn))
  1237. + {
  1238. + case CODE_FOR_swapsi:
  1239. + case CODE_FOR_ldstub:
  1240. + case CODE_FOR_atomic_compare_and_swap_leon3_1:
  1241. + return true;
  1242. + default:
  1243. + return false;
  1244. + }
  1245. +}
  1246. +
  1247. +/* We use a machine specific pass to enable workarounds for errata.
  1248. +
  1249. + We need to have the (essentially) final form of the insn stream in order
  1250. + to properly detect the various hazards. Therefore, this machine specific
  1251. + pass runs as late as possible. */
  1252. +
  1253. +/* True if INSN is a md pattern or asm statement. */
  1254. +#define USEFUL_INSN_P(INSN) \
  1255. + (NONDEBUG_INSN_P (INSN) \
  1256. + && GET_CODE (PATTERN (INSN)) != USE \
  1257. + && GET_CODE (PATTERN (INSN)) != CLOBBER)
  1258. +
  1259. +static unsigned int
  1260. +sparc_do_work_around_errata (void)
  1261. +{
  1262. + rtx_insn *insn, *next;
  1263. +
  1264. + /* Force all instructions to be split into their final form. */
  1265. + split_all_insns_noflow ();
  1266. +
  1267. + /* Now look for specific patterns in the insn stream. */
  1268. + for (insn = get_insns (); insn; insn = next)
  1269. + {
  1270. + bool insert_nop = false;
  1271. + rtx set;
  1272. + rtx_insn *jump;
  1273. + rtx_sequence *seq;
  1274. +
  1275. + /* Look into the instruction in a delay slot. */
  1276. + if (NONJUMP_INSN_P (insn)
  1277. + && (seq = dyn_cast <rtx_sequence *> (PATTERN (insn))))
  1278. + {
  1279. + jump = seq->insn (0);
  1280. + insn = seq->insn (1);
  1281. + }
  1282. + else if (JUMP_P (insn))
  1283. + jump = insn;
  1284. + else
  1285. + jump = NULL;
  1286. +
  1287. + /* Place a NOP at the branch target of an integer branch if it is a
  1288. + floating-point operation or a floating-point branch. */
  1289. + if (sparc_fix_gr712rc
  1290. + && jump
  1291. + && jump_to_label_p (jump)
  1292. + && get_attr_branch_type (jump) == BRANCH_TYPE_ICC)
  1293. + {
  1294. + rtx_insn *target = next_active_insn (JUMP_LABEL_AS_INSN (jump));
  1295. + if (target
  1296. + && (fpop_insn_p (target)
  1297. + || (JUMP_P (target)
  1298. + && get_attr_branch_type (target) == BRANCH_TYPE_FCC)))
  1299. + emit_insn_before (gen_nop (), target);
  1300. + }
  1301. +
  1302. + /* Insert a NOP between load instruction and atomic instruction. Insert
  1303. + a NOP at branch target if there is a load in delay slot and an atomic
  1304. + instruction at branch target. */
  1305. + if (sparc_fix_ut700
  1306. + && NONJUMP_INSN_P (insn)
  1307. + && (set = single_set (insn)) != NULL_RTX
  1308. + && mem_ref (SET_SRC (set))
  1309. + && REG_P (SET_DEST (set)))
  1310. + {
  1311. + if (jump && jump_to_label_p (jump))
  1312. + {
  1313. + rtx_insn *target = next_active_insn (JUMP_LABEL_AS_INSN (jump));
  1314. + if (target && atomic_insn_for_leon3_p (target))
  1315. + emit_insn_before (gen_nop (), target);
  1316. + }
  1317. +
  1318. + next = next_active_insn (insn);
  1319. + if (!next)
  1320. + break;
  1321. +
  1322. + if (atomic_insn_for_leon3_p (next))
  1323. + insert_nop = true;
  1324. + }
  1325. +
  1326. + /* Look for a sequence that starts with a fdiv or fsqrt instruction and
  1327. + ends with another fdiv or fsqrt instruction with no dependencies on
  1328. + the former, along with an appropriate pattern in between. */
  1329. + if (sparc_fix_lost_divsqrt
  1330. + && NONJUMP_INSN_P (insn)
  1331. + && div_sqrt_insn_p (insn))
  1332. + {
  1333. + int i;
  1334. + int fp_found = 0;
  1335. + rtx_insn *after;
  1336. +
  1337. + const unsigned int dest_reg = REGNO (SET_DEST (single_set (insn)));
  1338. +
  1339. + next = next_active_insn (insn);
  1340. + if (!next)
  1341. + break;
  1342. +
  1343. + for (after = next, i = 0; i < 4; i++)
  1344. + {
  1345. + /* Count floating-point operations. */
  1346. + if (i != 3 && fpop_insn_p (after))
  1347. + {
  1348. + /* If the insn uses the destination register of
  1349. + the div/sqrt, then it cannot be problematic. */
  1350. + if (insn_uses_reg_p (after, dest_reg))
  1351. + break;
  1352. + fp_found++;
  1353. + }
  1354. +
  1355. + /* Count floating-point loads. */
  1356. + if (i != 3
  1357. + && (set = single_set (after)) != NULL_RTX
  1358. + && REG_P (SET_DEST (set))
  1359. + && REGNO (SET_DEST (set)) > 31)
  1360. + {
  1361. + /* If the insn uses the destination register of
  1362. + the div/sqrt, then it cannot be problematic. */
  1363. + if (REGNO (SET_DEST (set)) == dest_reg)
  1364. + break;
  1365. + fp_found++;
  1366. + }
  1367. +
  1368. + /* Check if this is a problematic sequence. */
  1369. + if (i > 1
  1370. + && fp_found >= 2
  1371. + && div_sqrt_insn_p (after))
  1372. + {
  1373. + /* If this is the short version of the problematic
  1374. + sequence we add two NOPs in a row to also prevent
  1375. + the long version. */
  1376. + if (i == 2)
  1377. + emit_insn_before (gen_nop (), next);
  1378. + insert_nop = true;
  1379. + break;
  1380. + }
  1381. +
  1382. + /* No need to scan past a second div/sqrt. */
  1383. + if (div_sqrt_insn_p (after))
  1384. + break;
  1385. +
  1386. + /* Insert NOP before branch. */
  1387. + if (i < 3
  1388. + && (!NONJUMP_INSN_P (after)
  1389. + || GET_CODE (PATTERN (after)) == SEQUENCE))
  1390. + {
  1391. + insert_nop = true;
  1392. + break;
  1393. + }
  1394. +
  1395. + after = next_active_insn (after);
  1396. + if (!after)
  1397. + break;
  1398. + }
  1399. + }
  1400. +
  1401. + /* Look for either of these two sequences:
  1402. +
  1403. + Sequence A:
  1404. + 1. store of word size or less (e.g. st / stb / sth / stf)
  1405. + 2. any single instruction that is not a load or store
  1406. + 3. any store instruction (e.g. st / stb / sth / stf / std / stdf)
  1407. +
  1408. + Sequence B:
  1409. + 1. store of double word size (e.g. std / stdf)
  1410. + 2. any store instruction (e.g. st / stb / sth / stf / std / stdf) */
  1411. + if (sparc_fix_b2bst
  1412. + && NONJUMP_INSN_P (insn)
  1413. + && (set = single_set (insn)) != NULL_RTX
  1414. + && MEM_P (SET_DEST (set)))
  1415. + {
  1416. + /* Sequence B begins with a double-word store. */
  1417. + bool seq_b = GET_MODE_SIZE (GET_MODE (SET_DEST (set))) == 8;
  1418. + rtx_insn *after;
  1419. + int i;
  1420. +
  1421. + next = next_active_insn (insn);
  1422. + if (!next)
  1423. + break;
  1424. +
  1425. + for (after = next, i = 0; i < 2; i++)
  1426. + {
  1427. + /* Skip empty assembly statements. */
  1428. + if ((GET_CODE (PATTERN (after)) == UNSPEC_VOLATILE)
  1429. + || (USEFUL_INSN_P (after)
  1430. + && (asm_noperands (PATTERN (after))>=0)
  1431. + && !strcmp (decode_asm_operands (PATTERN (after),
  1432. + NULL, NULL, NULL,
  1433. + NULL, NULL), "")))
  1434. + after = next_active_insn (after);
  1435. + if (!after)
  1436. + break;
  1437. +
  1438. + /* If the insn is a branch, then it cannot be problematic. */
  1439. + if (!NONJUMP_INSN_P (after)
  1440. + || GET_CODE (PATTERN (after)) == SEQUENCE)
  1441. + break;
  1442. +
  1443. + /* Sequence B is only two instructions long. */
  1444. + if (seq_b)
  1445. + {
  1446. + /* Add NOP if followed by a store. */
  1447. + if ((set = single_set (after)) != NULL_RTX
  1448. + && MEM_P (SET_DEST (set)))
  1449. + insert_nop = true;
  1450. +
  1451. + /* Otherwise it is ok. */
  1452. + break;
  1453. + }
  1454. +
  1455. + /* If the second instruction is a load or a store,
  1456. + then the sequence cannot be problematic. */
  1457. + if (i == 0)
  1458. + {
  1459. + if ((set = single_set (after)) != NULL_RTX
  1460. + && (MEM_P (SET_DEST (set)) || mem_ref (SET_SRC (set))))
  1461. + break;
  1462. +
  1463. + after = next_active_insn (after);
  1464. + if (!after)
  1465. + break;
  1466. + }
  1467. +
  1468. + /* Add NOP if third instruction is a store. */
  1469. + if (i == 1
  1470. + && (set = single_set (after)) != NULL_RTX
  1471. + && MEM_P (SET_DEST (set)))
  1472. + insert_nop = true;
  1473. + }
  1474. + }
  1475. +
  1476. + /* Look for a single-word load into an odd-numbered FP register. */
  1477. + else if (sparc_fix_at697f
  1478. + && NONJUMP_INSN_P (insn)
  1479. + && (set = single_set (insn)) != NULL_RTX
  1480. + && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) == 4
  1481. + && mem_ref (SET_SRC (set))
  1482. + && REG_P (SET_DEST (set))
  1483. + && REGNO (SET_DEST (set)) > 31
  1484. + && REGNO (SET_DEST (set)) % 2 != 0)
  1485. + {
  1486. + /* The wrong dependency is on the enclosing double register. */
  1487. + const unsigned int x = REGNO (SET_DEST (set)) - 1;
  1488. + unsigned int src1, src2, dest;
  1489. + int code;
  1490. +
  1491. + next = next_active_insn (insn);
  1492. + if (!next)
  1493. + break;
  1494. + /* If the insn is a branch, then it cannot be problematic. */
  1495. + if (!NONJUMP_INSN_P (next) || GET_CODE (PATTERN (next)) == SEQUENCE)
  1496. + continue;
  1497. +
  1498. + extract_insn (next);
  1499. + code = INSN_CODE (next);
  1500. +
  1501. + switch (code)
  1502. + {
  1503. + case CODE_FOR_adddf3:
  1504. + case CODE_FOR_subdf3:
  1505. + case CODE_FOR_muldf3:
  1506. + case CODE_FOR_divdf3:
  1507. + dest = REGNO (recog_data.operand[0]);
  1508. + src1 = REGNO (recog_data.operand[1]);
  1509. + src2 = REGNO (recog_data.operand[2]);
  1510. + if (src1 != src2)
  1511. + {
  1512. + /* Case [1-4]:
  1513. + ld [address], %fx+1
  1514. + FPOPd %f{x,y}, %f{y,x}, %f{x,y} */
  1515. + if ((src1 == x || src2 == x)
  1516. + && (dest == src1 || dest == src2))
  1517. + insert_nop = true;
  1518. + }
  1519. + else
  1520. + {
  1521. + /* Case 5:
  1522. + ld [address], %fx+1
  1523. + FPOPd %fx, %fx, %fx */
  1524. + if (src1 == x
  1525. + && dest == src1
  1526. + && (code == CODE_FOR_adddf3 || code == CODE_FOR_muldf3))
  1527. + insert_nop = true;
  1528. + }
  1529. + break;
  1530. +
  1531. + case CODE_FOR_sqrtdf2:
  1532. + dest = REGNO (recog_data.operand[0]);
  1533. + src1 = REGNO (recog_data.operand[1]);
  1534. + /* Case 6:
  1535. + ld [address], %fx+1
  1536. + fsqrtd %fx, %fx */
  1537. + if (src1 == x && dest == src1)
  1538. + insert_nop = true;
  1539. + break;
  1540. +
  1541. + default:
  1542. + break;
  1543. + }
  1544. + }
  1545. +
  1546. + /* Look for a single-word load into an integer register. */
  1547. + else if (sparc_fix_ut699
  1548. + && NONJUMP_INSN_P (insn)
  1549. + && (set = single_set (insn)) != NULL_RTX
  1550. + && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) <= 4
  1551. + && (mem_ref (SET_SRC (set)) != NULL_RTX
  1552. + || INSN_CODE (insn) == CODE_FOR_movsi_pic_gotdata_op)
  1553. + && REG_P (SET_DEST (set))
  1554. + && REGNO (SET_DEST (set)) < 32)
  1555. + {
  1556. + /* There is no problem if the second memory access has a data
  1557. + dependency on the first single-cycle load. */
  1558. + rtx x = SET_DEST (set);
  1559. +
  1560. + next = next_active_insn (insn);
  1561. + if (!next)
  1562. + break;
  1563. + /* If the insn is a branch, then it cannot be problematic. */
  1564. + if (!NONJUMP_INSN_P (next) || GET_CODE (PATTERN (next)) == SEQUENCE)
  1565. + continue;
  1566. +
  1567. + /* Look for a second memory access to/from an integer register. */
  1568. + if ((set = single_set (next)) != NULL_RTX)
  1569. + {
  1570. + rtx src = SET_SRC (set);
  1571. + rtx dest = SET_DEST (set);
  1572. + rtx mem;
  1573. +
  1574. + /* LDD is affected. */
  1575. + if ((mem = mem_ref (src)) != NULL_RTX
  1576. + && REG_P (dest)
  1577. + && REGNO (dest) < 32
  1578. + && !reg_mentioned_p (x, XEXP (mem, 0)))
  1579. + insert_nop = true;
  1580. +
  1581. + /* STD is *not* affected. */
  1582. + else if (MEM_P (dest)
  1583. + && GET_MODE_SIZE (GET_MODE (dest)) <= 4
  1584. + && (src == CONST0_RTX (GET_MODE (dest))
  1585. + || (REG_P (src)
  1586. + && REGNO (src) < 32
  1587. + && REGNO (src) != REGNO (x)))
  1588. + && !reg_mentioned_p (x, XEXP (dest, 0)))
  1589. + insert_nop = true;
  1590. +
  1591. + /* GOT accesses uses LD. */
  1592. + else if (INSN_CODE (next) == CODE_FOR_movsi_pic_gotdata_op
  1593. + && !reg_mentioned_p (x, XEXP (XEXP (src, 0), 1)))
  1594. + insert_nop = true;
  1595. + }
  1596. + }
  1597. +
  1598. + /* Look for a single-word load/operation into an FP register. */
  1599. + else if (sparc_fix_ut699
  1600. + && NONJUMP_INSN_P (insn)
  1601. + && (set = single_set (insn)) != NULL_RTX
  1602. + && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) == 4
  1603. + && REG_P (SET_DEST (set))
  1604. + && REGNO (SET_DEST (set)) > 31)
  1605. + {
  1606. + /* Number of instructions in the problematic window. */
  1607. + const int n_insns = 4;
  1608. + /* The problematic combination is with the sibling FP register. */
  1609. + const unsigned int x = REGNO (SET_DEST (set));
  1610. + const unsigned int y = x ^ 1;
  1611. + rtx_insn *after;
  1612. + int i;
  1613. +
  1614. + next = next_active_insn (insn);
  1615. + if (!next)
  1616. + break;
  1617. + /* If the insn is a branch, then it cannot be problematic. */
  1618. + if (!NONJUMP_INSN_P (next) || GET_CODE (PATTERN (next)) == SEQUENCE)
  1619. + continue;
  1620. +
  1621. + /* Look for a second load/operation into the sibling FP register. */
  1622. + if (!((set = single_set (next)) != NULL_RTX
  1623. + && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) == 4
  1624. + && REG_P (SET_DEST (set))
  1625. + && REGNO (SET_DEST (set)) == y))
  1626. + continue;
  1627. +
  1628. + /* Look for a (possible) store from the FP register in the next N
  1629. + instructions, but bail out if it is again modified or if there
  1630. + is a store from the sibling FP register before this store. */
  1631. + for (after = next, i = 0; i < n_insns; i++)
  1632. + {
  1633. + bool branch_p;
  1634. +
  1635. + after = next_active_insn (after);
  1636. + if (!after)
  1637. + break;
  1638. +
  1639. + /* This is a branch with an empty delay slot. */
  1640. + if (!NONJUMP_INSN_P (after))
  1641. + {
  1642. + if (++i == n_insns)
  1643. + break;
  1644. + branch_p = true;
  1645. + after = NULL;
  1646. + }
  1647. + /* This is a branch with a filled delay slot. */
  1648. + else if (rtx_sequence *seq =
  1649. + dyn_cast <rtx_sequence *> (PATTERN (after)))
  1650. + {
  1651. + if (++i == n_insns)
  1652. + break;
  1653. + branch_p = true;
  1654. + after = seq->insn (1);
  1655. + }
  1656. + /* This is a regular instruction. */
  1657. + else
  1658. + branch_p = false;
  1659. +
  1660. + if (after && (set = single_set (after)) != NULL_RTX)
  1661. + {
  1662. + const rtx src = SET_SRC (set);
  1663. + const rtx dest = SET_DEST (set);
  1664. + const unsigned int size = GET_MODE_SIZE (GET_MODE (dest));
  1665. +
  1666. + /* If the FP register is again modified before the store,
  1667. + then the store isn't affected. */
  1668. + if (REG_P (dest)
  1669. + && (REGNO (dest) == x
  1670. + || (REGNO (dest) == y && size == 8)))
  1671. + break;
  1672. +
  1673. + if (MEM_P (dest) && REG_P (src))
  1674. + {
  1675. + /* If there is a store from the sibling FP register
  1676. + before the store, then the store is not affected. */
  1677. + if (REGNO (src) == y || (REGNO (src) == x && size == 8))
  1678. + break;
  1679. +
  1680. + /* Otherwise, the store is affected. */
  1681. + if (REGNO (src) == x && size == 4)
  1682. + {
  1683. + insert_nop = true;
  1684. + break;
  1685. + }
  1686. + }
  1687. + }
  1688. +
  1689. + /* If we have a branch in the first M instructions, then we
  1690. + cannot see the (M+2)th instruction so we play safe. */
  1691. + if (branch_p && i <= (n_insns - 2))
  1692. + {
  1693. + insert_nop = true;
  1694. + break;
  1695. + }
  1696. + }
  1697. + }
  1698. +
  1699. + else
  1700. + next = NEXT_INSN (insn);
  1701. +
  1702. + if (insert_nop)
  1703. + emit_insn_before (gen_nop (), next);
  1704. + }
  1705. +
  1706. + return 0;
  1707. +}
  1708. +
  1709. +namespace {
  1710. +
  1711. +const pass_data pass_data_work_around_errata =
  1712. +{
  1713. + RTL_PASS, /* type */
  1714. + "errata", /* name */
  1715. + OPTGROUP_NONE, /* optinfo_flags */
  1716. + TV_MACH_DEP, /* tv_id */
  1717. + 0, /* properties_required */
  1718. + 0, /* properties_provided */
  1719. + 0, /* properties_destroyed */
  1720. + 0, /* todo_flags_start */
  1721. + 0, /* todo_flags_finish */
  1722. +};
  1723. +
  1724. +class pass_work_around_errata : public rtl_opt_pass
  1725. +{
  1726. +public:
  1727. + pass_work_around_errata(gcc::context *ctxt)
  1728. + : rtl_opt_pass(pass_data_work_around_errata, ctxt)
  1729. + {}
  1730. +
  1731. + /* opt_pass methods: */
  1732. + virtual bool gate (function *)
  1733. + {
  1734. + return sparc_fix_at697f
  1735. + || sparc_fix_ut699 || sparc_fix_ut700 || sparc_fix_gr712rc
  1736. + || sparc_fix_b2bst || sparc_fix_lost_divsqrt;
  1737. + }
  1738. +
  1739. + virtual unsigned int execute (function *)
  1740. + {
  1741. + return sparc_do_work_around_errata ();
  1742. + }
  1743. +
  1744. +}; // class pass_work_around_errata
  1745. +
  1746. +} // anon namespace
  1747. +
  1748. +rtl_opt_pass *
  1749. +make_pass_work_around_errata (gcc::context *ctxt)
  1750. +{
  1751. + return new pass_work_around_errata (ctxt);
  1752. +}
  1753. +
  1754. +/* Helpers for TARGET_DEBUG_OPTIONS. */
  1755. +static void
  1756. +dump_target_flag_bits (const int flags)
  1757. +{
  1758. + if (flags & MASK_64BIT)
  1759. + fprintf (stderr, "64BIT ");
  1760. + if (flags & MASK_APP_REGS)
  1761. + fprintf (stderr, "APP_REGS ");
  1762. + if (flags & MASK_FASTER_STRUCTS)
  1763. + fprintf (stderr, "FASTER_STRUCTS ");
  1764. + if (flags & MASK_FLAT)
  1765. + fprintf (stderr, "FLAT ");
  1766. + if (flags & MASK_FMAF)
  1767. + fprintf (stderr, "FMAF ");
  1768. + if (flags & MASK_FSMULD)
  1769. + fprintf (stderr, "FSMULD ");
  1770. + if (flags & MASK_FPU)
  1771. + fprintf (stderr, "FPU ");
  1772. + if (flags & MASK_HARD_QUAD)
  1773. + fprintf (stderr, "HARD_QUAD ");
  1774. + if (flags & MASK_POPC)
  1775. + fprintf (stderr, "POPC ");
  1776. + if (flags & MASK_PTR64)
  1777. + fprintf (stderr, "PTR64 ");
  1778. + if (flags & MASK_STACK_BIAS)
  1779. + fprintf (stderr, "STACK_BIAS ");
  1780. + if (flags & MASK_UNALIGNED_DOUBLES)
  1781. + fprintf (stderr, "UNALIGNED_DOUBLES ");
  1782. + if (flags & MASK_V8PLUS)
  1783. + fprintf (stderr, "V8PLUS ");
  1784. + if (flags & MASK_VIS)
  1785. + fprintf (stderr, "VIS ");
  1786. + if (flags & MASK_VIS2)
  1787. + fprintf (stderr, "VIS2 ");
  1788. + if (flags & MASK_VIS3)
  1789. + fprintf (stderr, "VIS3 ");
  1790. + if (flags & MASK_VIS4)
  1791. + fprintf (stderr, "VIS4 ");
  1792. + if (flags & MASK_VIS4B)
  1793. + fprintf (stderr, "VIS4B ");
  1794. + if (flags & MASK_CBCOND)
  1795. + fprintf (stderr, "CBCOND ");
  1796. + if (flags & MASK_DEPRECATED_V8_INSNS)
  1797. + fprintf (stderr, "DEPRECATED_V8_INSNS ");
  1798. + if (flags & MASK_SPARCLET)
  1799. + fprintf (stderr, "SPARCLET ");
  1800. + if (flags & MASK_SPARCLITE)
  1801. + fprintf (stderr, "SPARCLITE ");
  1802. + if (flags & MASK_V8)
  1803. + fprintf (stderr, "V8 ");
  1804. + if (flags & MASK_V9)
  1805. + fprintf (stderr, "V9 ");
  1806. +}
  1807. +
  1808. +static void
  1809. +dump_target_flags (const char *prefix, const int flags)
  1810. +{
  1811. + fprintf (stderr, "%s: (%08x) [ ", prefix, flags);
  1812. + dump_target_flag_bits (flags);
  1813. + fprintf(stderr, "]\n");
  1814. +}
  1815. +
  1816. +/* Validate and override various options, and do some machine dependent
  1817. + initialization. */
  1818. +
  1819. +static void
  1820. +sparc_option_override (void)
  1821. +{
  1822. + /* Map TARGET_CPU_DEFAULT to value for -m{cpu,tune}=. */
  1823. + static struct cpu_default {
  1824. + const int cpu;
  1825. + const enum sparc_processor_type processor;
  1826. + } const cpu_default[] = {
  1827. + /* There must be one entry here for each TARGET_CPU value. */
  1828. + { TARGET_CPU_sparc, PROCESSOR_CYPRESS },
  1829. + { TARGET_CPU_v8, PROCESSOR_V8 },
  1830. + { TARGET_CPU_supersparc, PROCESSOR_SUPERSPARC },
  1831. + { TARGET_CPU_hypersparc, PROCESSOR_HYPERSPARC },
  1832. + { TARGET_CPU_leon, PROCESSOR_LEON },
  1833. + { TARGET_CPU_leon3, PROCESSOR_LEON3 },
  1834. + { TARGET_CPU_leon3v7, PROCESSOR_LEON3V7 },
  1835. + { TARGET_CPU_sparclite, PROCESSOR_F930 },
  1836. + { TARGET_CPU_sparclite86x, PROCESSOR_SPARCLITE86X },
  1837. + { TARGET_CPU_sparclet, PROCESSOR_TSC701 },
  1838. + { TARGET_CPU_v9, PROCESSOR_V9 },
  1839. + { TARGET_CPU_ultrasparc, PROCESSOR_ULTRASPARC },
  1840. + { TARGET_CPU_ultrasparc3, PROCESSOR_ULTRASPARC3 },
  1841. + { TARGET_CPU_niagara, PROCESSOR_NIAGARA },
  1842. + { TARGET_CPU_niagara2, PROCESSOR_NIAGARA2 },
  1843. + { TARGET_CPU_niagara3, PROCESSOR_NIAGARA3 },
  1844. + { TARGET_CPU_niagara4, PROCESSOR_NIAGARA4 },
  1845. + { TARGET_CPU_niagara7, PROCESSOR_NIAGARA7 },
  1846. + { TARGET_CPU_m8, PROCESSOR_M8 },
  1847. + { -1, PROCESSOR_V7 }
  1848. + };
  1849. + const struct cpu_default *def;
  1850. + /* Table of values for -m{cpu,tune}=. This must match the order of
  1851. + the enum processor_type in sparc-opts.h. */
  1852. + static struct cpu_table {
  1853. + const char *const name;
  1854. + const int disable;
  1855. + const int enable;
  1856. + } const cpu_table[] = {
  1857. + { "v7", MASK_ISA, 0 },
  1858. + { "cypress", MASK_ISA, 0 },
  1859. + { "v8", MASK_ISA, MASK_V8 },
  1860. + /* TI TMS390Z55 supersparc */
  1861. + { "supersparc", MASK_ISA, MASK_V8 },
  1862. + { "hypersparc", MASK_ISA, MASK_V8 },
  1863. + { "leon", MASK_ISA|MASK_FSMULD, MASK_V8|MASK_LEON },
  1864. + { "leon3", MASK_ISA, MASK_V8|MASK_LEON3 },
  1865. + { "leon3v7", MASK_ISA, MASK_LEON3 },
  1866. + { "sparclite", MASK_ISA, MASK_SPARCLITE },
  1867. + /* The Fujitsu MB86930 is the original sparclite chip, with no FPU. */
  1868. + { "f930", MASK_ISA|MASK_FPU, MASK_SPARCLITE },
  1869. + /* The Fujitsu MB86934 is the recent sparclite chip, with an FPU. */
  1870. + { "f934", MASK_ISA, MASK_SPARCLITE },
  1871. + { "sparclite86x", MASK_ISA|MASK_FPU, MASK_SPARCLITE },
  1872. + { "sparclet", MASK_ISA, MASK_SPARCLET },
  1873. + /* TEMIC sparclet */
  1874. + { "tsc701", MASK_ISA, MASK_SPARCLET },
  1875. + { "v9", MASK_ISA, MASK_V9 },
  1876. + /* UltraSPARC I, II, IIi */
  1877. + { "ultrasparc", MASK_ISA,
  1878. + /* Although insns using %y are deprecated, it is a clear win. */
  1879. + MASK_V9|MASK_DEPRECATED_V8_INSNS },
  1880. + /* UltraSPARC III */
  1881. + /* ??? Check if %y issue still holds true. */
  1882. + { "ultrasparc3", MASK_ISA,
  1883. + MASK_V9|MASK_DEPRECATED_V8_INSNS|MASK_VIS2 },
  1884. + /* UltraSPARC T1 */
  1885. + { "niagara", MASK_ISA,
  1886. + MASK_V9|MASK_DEPRECATED_V8_INSNS },
  1887. + /* UltraSPARC T2 */
  1888. + { "niagara2", MASK_ISA,
  1889. + MASK_V9|MASK_POPC|MASK_VIS2 },
  1890. + /* UltraSPARC T3 */
  1891. + { "niagara3", MASK_ISA,
  1892. + MASK_V9|MASK_POPC|MASK_VIS3|MASK_FMAF },
  1893. + /* UltraSPARC T4 */
  1894. + { "niagara4", MASK_ISA,
  1895. + MASK_V9|MASK_POPC|MASK_VIS3|MASK_FMAF|MASK_CBCOND },
  1896. + /* UltraSPARC M7 */
  1897. + { "niagara7", MASK_ISA,
  1898. + MASK_V9|MASK_POPC|MASK_VIS4|MASK_FMAF|MASK_CBCOND|MASK_SUBXC },
  1899. + /* UltraSPARC M8 */
  1900. + { "m8", MASK_ISA,
  1901. + MASK_V9|MASK_POPC|MASK_VIS4B|MASK_FMAF|MASK_CBCOND|MASK_SUBXC }
  1902. + };
  1903. + const struct cpu_table *cpu;
  1904. + unsigned int i;
  1905. +
  1906. + if (sparc_debug_string != NULL)
  1907. + {
  1908. + const char *q;
  1909. + char *p;
  1910. +
  1911. + p = ASTRDUP (sparc_debug_string);
  1912. + while ((q = strtok (p, ",")) != NULL)
  1913. + {
  1914. + bool invert;
  1915. + int mask;
  1916. +
  1917. + p = NULL;
  1918. + if (*q == '!')
  1919. + {
  1920. + invert = true;
  1921. + q++;
  1922. + }
  1923. + else
  1924. + invert = false;
  1925. +
  1926. + if (! strcmp (q, "all"))
  1927. + mask = MASK_DEBUG_ALL;
  1928. + else if (! strcmp (q, "options"))
  1929. + mask = MASK_DEBUG_OPTIONS;
  1930. + else
  1931. + error ("unknown %<-mdebug-%s%> switch", q);
  1932. +
  1933. + if (invert)
  1934. + sparc_debug &= ~mask;
  1935. + else
  1936. + sparc_debug |= mask;
  1937. + }
  1938. + }
  1939. +
  1940. + /* Enable the FsMULd instruction by default if not explicitly specified by
  1941. + the user. It may be later disabled by the CPU (explicitly or not). */
  1942. + if (TARGET_FPU && !(target_flags_explicit & MASK_FSMULD))
  1943. + target_flags |= MASK_FSMULD;
  1944. +
  1945. + if (TARGET_DEBUG_OPTIONS)
  1946. + {
  1947. + dump_target_flags("Initial target_flags", target_flags);
  1948. + dump_target_flags("target_flags_explicit", target_flags_explicit);
  1949. + }
  1950. +
  1951. +#ifdef SUBTARGET_OVERRIDE_OPTIONS
  1952. + SUBTARGET_OVERRIDE_OPTIONS;
  1953. +#endif
  1954. +
  1955. +#ifndef SPARC_BI_ARCH
  1956. + /* Check for unsupported architecture size. */
  1957. + if (!TARGET_64BIT != DEFAULT_ARCH32_P)
  1958. + error ("%s is not supported by this configuration",
  1959. + DEFAULT_ARCH32_P ? "-m64" : "-m32");
  1960. +#endif
  1961. +
  1962. + /* We force all 64bit archs to use 128 bit long double */
  1963. + if (TARGET_ARCH64 && !TARGET_LONG_DOUBLE_128)
  1964. + {
  1965. + error ("%<-mlong-double-64%> not allowed with %<-m64%>");
  1966. + target_flags |= MASK_LONG_DOUBLE_128;
  1967. + }
  1968. +
  1969. + /* Check that -fcall-saved-REG wasn't specified for out registers. */
  1970. + for (i = 8; i < 16; i++)
  1971. + if (!call_used_regs [i])
  1972. + {
  1973. + error ("%<-fcall-saved-REG%> is not supported for out registers");
  1974. + call_used_regs [i] = 1;
  1975. + }
  1976. +
  1977. + /* Set the default CPU if no -mcpu option was specified. */
  1978. + if (!global_options_set.x_sparc_cpu_and_features)
  1979. + {
  1980. + for (def = &cpu_default[0]; def->cpu != -1; ++def)
  1981. + if (def->cpu == TARGET_CPU_DEFAULT)
  1982. + break;
  1983. + gcc_assert (def->cpu != -1);
  1984. + sparc_cpu_and_features = def->processor;
  1985. + }
  1986. +
  1987. + /* Set the default CPU if no -mtune option was specified. */
  1988. + if (!global_options_set.x_sparc_cpu)
  1989. + sparc_cpu = sparc_cpu_and_features;
  1990. +
  1991. + cpu = &cpu_table[(int) sparc_cpu_and_features];
  1992. +
  1993. + if (TARGET_DEBUG_OPTIONS)
  1994. + {
  1995. + fprintf (stderr, "sparc_cpu_and_features: %s\n", cpu->name);
  1996. + dump_target_flags ("cpu->disable", cpu->disable);
  1997. + dump_target_flags ("cpu->enable", cpu->enable);
  1998. + }
  1999. +
  2000. + target_flags &= ~cpu->disable;
  2001. + target_flags |= (cpu->enable
  2002. +#ifndef HAVE_AS_FMAF_HPC_VIS3
  2003. + & ~(MASK_FMAF | MASK_VIS3)
  2004. +#endif
  2005. +#ifndef HAVE_AS_SPARC4
  2006. + & ~MASK_CBCOND
  2007. +#endif
  2008. +#ifndef HAVE_AS_SPARC5_VIS4
  2009. + & ~(MASK_VIS4 | MASK_SUBXC)
  2010. +#endif
  2011. +#ifndef HAVE_AS_SPARC6
  2012. + & ~(MASK_VIS4B)
  2013. +#endif
  2014. +#ifndef HAVE_AS_LEON
  2015. + & ~(MASK_LEON | MASK_LEON3)
  2016. +#endif
  2017. + & ~(target_flags_explicit & MASK_FEATURES)
  2018. + );
  2019. +
  2020. + /* FsMULd is a V8 instruction. */
  2021. + if (!TARGET_V8 && !TARGET_V9)
  2022. + target_flags &= ~MASK_FSMULD;
  2023. +
  2024. + /* -mvis2 implies -mvis. */
  2025. + if (TARGET_VIS2)
  2026. + target_flags |= MASK_VIS;
  2027. +
  2028. + /* -mvis3 implies -mvis2 and -mvis. */
  2029. + if (TARGET_VIS3)
  2030. + target_flags |= MASK_VIS2 | MASK_VIS;
  2031. +
  2032. + /* -mvis4 implies -mvis3, -mvis2 and -mvis. */
  2033. + if (TARGET_VIS4)
  2034. + target_flags |= MASK_VIS3 | MASK_VIS2 | MASK_VIS;
  2035. +
  2036. + /* -mvis4b implies -mvis4, -mvis3, -mvis2 and -mvis */
  2037. + if (TARGET_VIS4B)
  2038. + target_flags |= MASK_VIS4 | MASK_VIS3 | MASK_VIS2 | MASK_VIS;
  2039. +
  2040. + /* Don't allow -mvis, -mvis2, -mvis3, -mvis4, -mvis4b, -mfmaf and -mfsmuld if
  2041. + FPU is disabled. */
  2042. + if (!TARGET_FPU)
  2043. + target_flags &= ~(MASK_VIS | MASK_VIS2 | MASK_VIS3 | MASK_VIS4
  2044. + | MASK_VIS4B | MASK_FMAF | MASK_FSMULD);
  2045. +
  2046. + /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
  2047. + are available; -m64 also implies v9. */
  2048. + if (TARGET_VIS || TARGET_ARCH64)
  2049. + {
  2050. + target_flags |= MASK_V9;
  2051. + target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
  2052. + }
  2053. +
  2054. + /* -mvis also implies -mv8plus on 32-bit. */
  2055. + if (TARGET_VIS && !TARGET_ARCH64)
  2056. + target_flags |= MASK_V8PLUS;
  2057. +
  2058. + /* Use the deprecated v8 insns for sparc64 in 32-bit mode. */
  2059. + if (TARGET_V9 && TARGET_ARCH32)
  2060. + target_flags |= MASK_DEPRECATED_V8_INSNS;
  2061. +
  2062. + /* V8PLUS requires V9 and makes no sense in 64-bit mode. */
  2063. + if (!TARGET_V9 || TARGET_ARCH64)
  2064. + target_flags &= ~MASK_V8PLUS;
  2065. +
  2066. + /* Don't use stack biasing in 32-bit mode. */
  2067. + if (TARGET_ARCH32)
  2068. + target_flags &= ~MASK_STACK_BIAS;
  2069. +
  2070. + /* Use LRA instead of reload, unless otherwise instructed. */
  2071. + if (!(target_flags_explicit & MASK_LRA))
  2072. + target_flags |= MASK_LRA;
  2073. +
  2074. + /* Enable applicable errata workarounds for LEON3FT. */
  2075. + if (sparc_fix_ut699 || sparc_fix_ut700 || sparc_fix_gr712rc)
  2076. + {
  2077. + sparc_fix_b2bst = 1;
  2078. + sparc_fix_lost_divsqrt = 1;
  2079. + }
  2080. +
  2081. + /* Disable FsMULd for the UT699 since it doesn't work correctly. */
  2082. + if (sparc_fix_ut699)
  2083. + target_flags &= ~MASK_FSMULD;
  2084. +
  2085. +#ifdef TARGET_DEFAULT_LONG_DOUBLE_128
  2086. + if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
  2087. + target_flags |= MASK_LONG_DOUBLE_128;
  2088. +#endif
  2089. +
  2090. + if (TARGET_DEBUG_OPTIONS)
  2091. + dump_target_flags ("Final target_flags", target_flags);
  2092. +
  2093. + /* Set the code model if no -mcmodel option was specified. */
  2094. + if (global_options_set.x_sparc_code_model)
  2095. + {
  2096. + if (TARGET_ARCH32)
  2097. + error ("%<-mcmodel=%> is not supported in 32-bit mode");
  2098. + }
  2099. + else
  2100. + {
  2101. + if (TARGET_ARCH32)
  2102. + sparc_code_model = CM_32;
  2103. + else
  2104. + sparc_code_model = SPARC_DEFAULT_CMODEL;
  2105. + }
  2106. +
  2107. + /* Set the memory model if no -mmemory-model option was specified. */
  2108. + if (!global_options_set.x_sparc_memory_model)
  2109. + {
  2110. + /* Choose the memory model for the operating system. */
  2111. + enum sparc_memory_model_type os_default = SUBTARGET_DEFAULT_MEMORY_MODEL;
  2112. + if (os_default != SMM_DEFAULT)
  2113. + sparc_memory_model = os_default;
  2114. + /* Choose the most relaxed model for the processor. */
  2115. + else if (TARGET_V9)
  2116. + sparc_memory_model = SMM_RMO;
  2117. + else if (TARGET_LEON3)
  2118. + sparc_memory_model = SMM_TSO;
  2119. + else if (TARGET_LEON)
  2120. + sparc_memory_model = SMM_SC;
  2121. + else if (TARGET_V8)
  2122. + sparc_memory_model = SMM_PSO;
  2123. + else
  2124. + sparc_memory_model = SMM_SC;
  2125. + }
  2126. +
  2127. + /* Supply a default value for align_functions. */
  2128. + if (flag_align_functions && !str_align_functions)
  2129. + {
  2130. + if (sparc_cpu == PROCESSOR_ULTRASPARC
  2131. + || sparc_cpu == PROCESSOR_ULTRASPARC3
  2132. + || sparc_cpu == PROCESSOR_NIAGARA
  2133. + || sparc_cpu == PROCESSOR_NIAGARA2
  2134. + || sparc_cpu == PROCESSOR_NIAGARA3
  2135. + || sparc_cpu == PROCESSOR_NIAGARA4)
  2136. + str_align_functions = "32";
  2137. + else if (sparc_cpu == PROCESSOR_NIAGARA7
  2138. + || sparc_cpu == PROCESSOR_M8)
  2139. + str_align_functions = "64";
  2140. + }
  2141. +
  2142. + /* Validate PCC_STRUCT_RETURN. */
  2143. + if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
  2144. + flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
  2145. +
  2146. + /* Only use .uaxword when compiling for a 64-bit target. */
  2147. + if (!TARGET_ARCH64)
  2148. + targetm.asm_out.unaligned_op.di = NULL;
  2149. +
  2150. + /* Set the processor costs. */
  2151. + switch (sparc_cpu)
  2152. + {
  2153. + case PROCESSOR_V7:
  2154. + case PROCESSOR_CYPRESS:
  2155. + sparc_costs = &cypress_costs;
  2156. + break;
  2157. + case PROCESSOR_V8:
  2158. + case PROCESSOR_SPARCLITE:
  2159. + case PROCESSOR_SUPERSPARC:
  2160. + sparc_costs = &supersparc_costs;
  2161. + break;
  2162. + case PROCESSOR_F930:
  2163. + case PROCESSOR_F934:
  2164. + case PROCESSOR_HYPERSPARC:
  2165. + case PROCESSOR_SPARCLITE86X:
  2166. + sparc_costs = &hypersparc_costs;
  2167. + break;
  2168. + case PROCESSOR_LEON:
  2169. + sparc_costs = &leon_costs;
  2170. + break;
  2171. + case PROCESSOR_LEON3:
  2172. + case PROCESSOR_LEON3V7:
  2173. + sparc_costs = &leon3_costs;
  2174. + break;
  2175. + case PROCESSOR_SPARCLET:
  2176. + case PROCESSOR_TSC701:
  2177. + sparc_costs = &sparclet_costs;
  2178. + break;
  2179. + case PROCESSOR_V9:
  2180. + case PROCESSOR_ULTRASPARC:
  2181. + sparc_costs = &ultrasparc_costs;
  2182. + break;
  2183. + case PROCESSOR_ULTRASPARC3:
  2184. + sparc_costs = &ultrasparc3_costs;
  2185. + break;
  2186. + case PROCESSOR_NIAGARA:
  2187. + sparc_costs = &niagara_costs;
  2188. + break;
  2189. + case PROCESSOR_NIAGARA2:
  2190. + sparc_costs = &niagara2_costs;
  2191. + break;
  2192. + case PROCESSOR_NIAGARA3:
  2193. + sparc_costs = &niagara3_costs;
  2194. + break;
  2195. + case PROCESSOR_NIAGARA4:
  2196. + sparc_costs = &niagara4_costs;
  2197. + break;
  2198. + case PROCESSOR_NIAGARA7:
  2199. + sparc_costs = &niagara7_costs;
  2200. + break;
  2201. + case PROCESSOR_M8:
  2202. + sparc_costs = &m8_costs;
  2203. + break;
  2204. + case PROCESSOR_NATIVE:
  2205. + gcc_unreachable ();
  2206. + };
  2207. +
  2208. + /* param_simultaneous_prefetches is the number of prefetches that
  2209. + can run at the same time. More important, it is the threshold
  2210. + defining when additional prefetches will be dropped by the
  2211. + hardware.
  2212. +
  2213. + The UltraSPARC-III features a documented prefetch queue with a
  2214. + size of 8. Additional prefetches issued in the cpu are
  2215. + dropped.
  2216. +
  2217. + Niagara processors are different. In these processors prefetches
  2218. + are handled much like regular loads. The L1 miss buffer is 32
  2219. + entries, but prefetches start getting affected when 30 entries
  2220. + become occupied. That occupation could be a mix of regular loads
  2221. + and prefetches though. And that buffer is shared by all threads.
  2222. + Once the threshold is reached, if the core is running a single
  2223. + thread the prefetch will retry. If more than one thread is
  2224. + running, the prefetch will be dropped.
  2225. +
  2226. + All this makes it very difficult to determine how many
  2227. + simultaneous prefetches can be issued simultaneously, even in a
  2228. + single-threaded program. Experimental results show that setting
  2229. + this parameter to 32 works well when the number of threads is not
  2230. + high. */
  2231. + SET_OPTION_IF_UNSET (&global_options, &global_options_set,
  2232. + param_simultaneous_prefetches,
  2233. + ((sparc_cpu == PROCESSOR_ULTRASPARC
  2234. + || sparc_cpu == PROCESSOR_NIAGARA
  2235. + || sparc_cpu == PROCESSOR_NIAGARA2
  2236. + || sparc_cpu == PROCESSOR_NIAGARA3
  2237. + || sparc_cpu == PROCESSOR_NIAGARA4)
  2238. + ? 2
  2239. + : (sparc_cpu == PROCESSOR_ULTRASPARC3
  2240. + ? 8 : ((sparc_cpu == PROCESSOR_NIAGARA7
  2241. + || sparc_cpu == PROCESSOR_M8)
  2242. + ? 32 : 3))));
  2243. +
  2244. + /* param_l1_cache_line_size is the size of the L1 cache line, in
  2245. + bytes.
  2246. +
  2247. + The Oracle SPARC Architecture (previously the UltraSPARC
  2248. + Architecture) specification states that when a PREFETCH[A]
  2249. + instruction is executed an implementation-specific amount of data
  2250. + is prefetched, and that it is at least 64 bytes long (aligned to
  2251. + at least 64 bytes).
  2252. +
  2253. + However, this is not correct. The M7 (and implementations prior
  2254. + to that) does not guarantee a 64B prefetch into a cache if the
  2255. + line size is smaller. A single cache line is all that is ever
  2256. + prefetched. So for the M7, where the L1D$ has 32B lines and the
  2257. + L2D$ and L3 have 64B lines, a prefetch will prefetch 64B into the
  2258. + L2 and L3, but only 32B are brought into the L1D$. (Assuming it
  2259. + is a read_n prefetch, which is the only type which allocates to
  2260. + the L1.) */
  2261. + SET_OPTION_IF_UNSET (&global_options, &global_options_set,
  2262. + param_l1_cache_line_size,
  2263. + (sparc_cpu == PROCESSOR_M8 ? 64 : 32));
  2264. +
  2265. + /* param_l1_cache_size is the size of the L1D$ (most SPARC chips use
  2266. + Hardvard level-1 caches) in kilobytes. Both UltraSPARC and
  2267. + Niagara processors feature a L1D$ of 16KB. */
  2268. + SET_OPTION_IF_UNSET (&global_options, &global_options_set,
  2269. + param_l1_cache_size,
  2270. + ((sparc_cpu == PROCESSOR_ULTRASPARC
  2271. + || sparc_cpu == PROCESSOR_ULTRASPARC3
  2272. + || sparc_cpu == PROCESSOR_NIAGARA
  2273. + || sparc_cpu == PROCESSOR_NIAGARA2
  2274. + || sparc_cpu == PROCESSOR_NIAGARA3
  2275. + || sparc_cpu == PROCESSOR_NIAGARA4
  2276. + || sparc_cpu == PROCESSOR_NIAGARA7
  2277. + || sparc_cpu == PROCESSOR_M8)
  2278. + ? 16 : 64));
  2279. +
  2280. + /* param_l2_cache_size is the size fo the L2 in kilobytes. Note
  2281. + that 512 is the default in params.def. */
  2282. + SET_OPTION_IF_UNSET (&global_options, &global_options_set,
  2283. + param_l2_cache_size,
  2284. + ((sparc_cpu == PROCESSOR_NIAGARA4
  2285. + || sparc_cpu == PROCESSOR_M8)
  2286. + ? 128 : (sparc_cpu == PROCESSOR_NIAGARA7
  2287. + ? 256 : 512)));
  2288. +
  2289. +
  2290. + /* Disable save slot sharing for call-clobbered registers by default.
  2291. + The IRA sharing algorithm works on single registers only and this
  2292. + pessimizes for double floating-point registers. */
  2293. + if (!global_options_set.x_flag_ira_share_save_slots)
  2294. + flag_ira_share_save_slots = 0;
  2295. +
  2296. + /* Only enable REE by default in 64-bit mode where it helps to eliminate
  2297. + redundant 32-to-64-bit extensions. */
  2298. + if (!global_options_set.x_flag_ree && TARGET_ARCH32)
  2299. + flag_ree = 0;
  2300. +
  2301. + /* Do various machine dependent initializations. */
  2302. + sparc_init_modes ();
  2303. +
  2304. + /* Set up function hooks. */
  2305. + init_machine_status = sparc_init_machine_status;
  2306. +}
  2307. +
  2308. +/* Miscellaneous utilities. */
  2309. +
  2310. +/* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
  2311. + or branch on register contents instructions. */
  2312. +
  2313. +int
  2314. +v9_regcmp_p (enum rtx_code code)
  2315. +{
  2316. + return (code == EQ || code == NE || code == GE || code == LT
  2317. + || code == LE || code == GT);
  2318. +}
  2319. +
  2320. +/* Nonzero if OP is a floating point constant which can
  2321. + be loaded into an integer register using a single
  2322. + sethi instruction. */
  2323. +
  2324. +int
  2325. +fp_sethi_p (rtx op)
  2326. +{
  2327. + if (GET_CODE (op) == CONST_DOUBLE)
  2328. + {
  2329. + long i;
  2330. +
  2331. + REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), i);
  2332. + return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
  2333. + }
  2334. +
  2335. + return 0;
  2336. +}
  2337. +
  2338. +/* Nonzero if OP is a floating point constant which can
  2339. + be loaded into an integer register using a single
  2340. + mov instruction. */
  2341. +
  2342. +int
  2343. +fp_mov_p (rtx op)
  2344. +{
  2345. + if (GET_CODE (op) == CONST_DOUBLE)
  2346. + {
  2347. + long i;
  2348. +
  2349. + REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), i);
  2350. + return SPARC_SIMM13_P (i);
  2351. + }
  2352. +
  2353. + return 0;
  2354. +}
  2355. +
  2356. +/* Nonzero if OP is a floating point constant which can
  2357. + be loaded into an integer register using a high/losum
  2358. + instruction sequence. */
  2359. +
  2360. +int
  2361. +fp_high_losum_p (rtx op)
  2362. +{
  2363. + /* The constraints calling this should only be in
  2364. + SFmode move insns, so any constant which cannot
  2365. + be moved using a single insn will do. */
  2366. + if (GET_CODE (op) == CONST_DOUBLE)
  2367. + {
  2368. + long i;
  2369. +
  2370. + REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), i);
  2371. + return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
  2372. + }
  2373. +
  2374. + return 0;
  2375. +}
  2376. +
  2377. +/* Return true if the address of LABEL can be loaded by means of the
  2378. + mov{si,di}_pic_label_ref patterns in PIC mode. */
  2379. +
  2380. +static bool
  2381. +can_use_mov_pic_label_ref (rtx label)
  2382. +{
  2383. + /* VxWorks does not impose a fixed gap between segments; the run-time
  2384. + gap can be different from the object-file gap. We therefore can't
  2385. + assume X - _GLOBAL_OFFSET_TABLE_ is a link-time constant unless we
  2386. + are absolutely sure that X is in the same segment as the GOT.
  2387. + Unfortunately, the flexibility of linker scripts means that we
  2388. + can't be sure of that in general, so assume that GOT-relative
  2389. + accesses are never valid on VxWorks. */
  2390. + if (TARGET_VXWORKS_RTP)
  2391. + return false;
  2392. +
  2393. + /* Similarly, if the label is non-local, it might end up being placed
  2394. + in a different section than the current one; now mov_pic_label_ref
  2395. + requires the label and the code to be in the same section. */
  2396. + if (LABEL_REF_NONLOCAL_P (label))
  2397. + return false;
  2398. +
  2399. + /* Finally, if we are reordering basic blocks and partition into hot
  2400. + and cold sections, this might happen for any label. */
  2401. + if (flag_reorder_blocks_and_partition)
  2402. + return false;
  2403. +
  2404. + return true;
  2405. +}
  2406. +
  2407. +/* Expand a move instruction. Return true if all work is done. */
  2408. +
  2409. +bool
  2410. +sparc_expand_move (machine_mode mode, rtx *operands)
  2411. +{
  2412. + /* Handle sets of MEM first. */
  2413. + if (GET_CODE (operands[0]) == MEM)
  2414. + {
  2415. + /* 0 is a register (or a pair of registers) on SPARC. */
  2416. + if (register_or_zero_operand (operands[1], mode))
  2417. + return false;
  2418. +
  2419. + if (!reload_in_progress)
  2420. + {
  2421. + operands[0] = validize_mem (operands[0]);
  2422. + operands[1] = force_reg (mode, operands[1]);
  2423. + }
  2424. + }
  2425. +
  2426. + /* Fix up TLS cases. */
  2427. + if (TARGET_HAVE_TLS
  2428. + && CONSTANT_P (operands[1])
  2429. + && sparc_tls_referenced_p (operands [1]))
  2430. + {
  2431. + operands[1] = sparc_legitimize_tls_address (operands[1]);
  2432. + return false;
  2433. + }
  2434. +
  2435. + /* Fix up PIC cases. */
  2436. + if (flag_pic && CONSTANT_P (operands[1]))
  2437. + {
  2438. + if (pic_address_needs_scratch (operands[1]))
  2439. + operands[1] = sparc_legitimize_pic_address (operands[1], NULL_RTX);
  2440. +
  2441. + /* We cannot use the mov{si,di}_pic_label_ref patterns in all cases. */
  2442. + if ((GET_CODE (operands[1]) == LABEL_REF
  2443. + && can_use_mov_pic_label_ref (operands[1]))
  2444. + || (GET_CODE (operands[1]) == CONST
  2445. + && GET_CODE (XEXP (operands[1], 0)) == PLUS
  2446. + && GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
  2447. + && GET_CODE (XEXP (XEXP (operands[1], 0), 1)) == CONST_INT
  2448. + && can_use_mov_pic_label_ref (XEXP (XEXP (operands[1], 0), 0))))
  2449. + {
  2450. + if (mode == SImode)
  2451. + {
  2452. + emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
  2453. + return true;
  2454. + }
  2455. +
  2456. + if (mode == DImode)
  2457. + {
  2458. + emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
  2459. + return true;
  2460. + }
  2461. + }
  2462. +
  2463. + if (symbolic_operand (operands[1], mode))
  2464. + {
  2465. + operands[1]
  2466. + = sparc_legitimize_pic_address (operands[1],
  2467. + reload_in_progress
  2468. + ? operands[0] : NULL_RTX);
  2469. + return false;
  2470. + }
  2471. + }
  2472. +
  2473. + /* If we are trying to toss an integer constant into FP registers,
  2474. + or loading a FP or vector constant, force it into memory. */
  2475. + if (CONSTANT_P (operands[1])
  2476. + && REG_P (operands[0])
  2477. + && (SPARC_FP_REG_P (REGNO (operands[0]))
  2478. + || SCALAR_FLOAT_MODE_P (mode)
  2479. + || VECTOR_MODE_P (mode)))
  2480. + {
  2481. + /* emit_group_store will send such bogosity to us when it is
  2482. + not storing directly into memory. So fix this up to avoid
  2483. + crashes in output_constant_pool. */
  2484. + if (operands [1] == const0_rtx)
  2485. + operands[1] = CONST0_RTX (mode);
  2486. +
  2487. + /* We can clear or set to all-ones FP registers if TARGET_VIS, and
  2488. + always other regs. */
  2489. + if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
  2490. + && (const_zero_operand (operands[1], mode)
  2491. + || const_all_ones_operand (operands[1], mode)))
  2492. + return false;
  2493. +
  2494. + if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
  2495. + /* We are able to build any SF constant in integer registers
  2496. + with at most 2 instructions. */
  2497. + && (mode == SFmode
  2498. + /* And any DF constant in integer registers if needed. */
  2499. + || (mode == DFmode && !can_create_pseudo_p ())))
  2500. + return false;
  2501. +
  2502. + operands[1] = force_const_mem (mode, operands[1]);
  2503. + if (!reload_in_progress)
  2504. + operands[1] = validize_mem (operands[1]);
  2505. + return false;
  2506. + }
  2507. +
  2508. + /* Accept non-constants and valid constants unmodified. */
  2509. + if (!CONSTANT_P (operands[1])
  2510. + || GET_CODE (operands[1]) == HIGH
  2511. + || input_operand (operands[1], mode))
  2512. + return false;
  2513. +
  2514. + switch (mode)
  2515. + {
  2516. + case E_QImode:
  2517. + /* All QImode constants require only one insn, so proceed. */
  2518. + break;
  2519. +
  2520. + case E_HImode:
  2521. + case E_SImode:
  2522. + sparc_emit_set_const32 (operands[0], operands[1]);
  2523. + return true;
  2524. +
  2525. + case E_DImode:
  2526. + /* input_operand should have filtered out 32-bit mode. */
  2527. + sparc_emit_set_const64 (operands[0], operands[1]);
  2528. + return true;
  2529. +
  2530. + case E_TImode:
  2531. + {
  2532. + rtx high, low;
  2533. + /* TImode isn't available in 32-bit mode. */
  2534. + split_double (operands[1], &high, &low);
  2535. + emit_insn (gen_movdi (operand_subword (operands[0], 0, 0, TImode),
  2536. + high));
  2537. + emit_insn (gen_movdi (operand_subword (operands[0], 1, 0, TImode),
  2538. + low));
  2539. + }
  2540. + return true;
  2541. +
  2542. + default:
  2543. + gcc_unreachable ();
  2544. + }
  2545. +
  2546. + return false;
  2547. +}
  2548. +
  2549. +/* Load OP1, a 32-bit constant, into OP0, a register.
  2550. + We know it can't be done in one insn when we get
  2551. + here, the move expander guarantees this. */
  2552. +
  2553. +static void
  2554. +sparc_emit_set_const32 (rtx op0, rtx op1)
  2555. +{
  2556. + machine_mode mode = GET_MODE (op0);
  2557. + rtx temp = op0;
  2558. +
  2559. + if (can_create_pseudo_p ())
  2560. + temp = gen_reg_rtx (mode);
  2561. +
  2562. + if (GET_CODE (op1) == CONST_INT)
  2563. + {
  2564. + gcc_assert (!small_int_operand (op1, mode)
  2565. + && !const_high_operand (op1, mode));
  2566. +
  2567. + /* Emit them as real moves instead of a HIGH/LO_SUM,
  2568. + this way CSE can see everything and reuse intermediate
  2569. + values if it wants. */
  2570. + emit_insn (gen_rtx_SET (temp, GEN_INT (INTVAL (op1)
  2571. + & ~(HOST_WIDE_INT) 0x3ff)));
  2572. +
  2573. + emit_insn (gen_rtx_SET (op0,
  2574. + gen_rtx_IOR (mode, temp,
  2575. + GEN_INT (INTVAL (op1) & 0x3ff))));
  2576. + }
  2577. + else
  2578. + {
  2579. + /* A symbol, emit in the traditional way. */
  2580. + emit_insn (gen_rtx_SET (temp, gen_rtx_HIGH (mode, op1)));
  2581. + emit_insn (gen_rtx_SET (op0, gen_rtx_LO_SUM (mode, temp, op1)));
  2582. + }
  2583. +}
  2584. +
  2585. +/* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
  2586. + If TEMP is nonzero, we are forbidden to use any other scratch
  2587. + registers. Otherwise, we are allowed to generate them as needed.
  2588. +
  2589. + Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
  2590. + or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
  2591. +
  2592. +void
  2593. +sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
  2594. +{
  2595. + rtx cst, temp1, temp2, temp3, temp4, temp5;
  2596. + rtx ti_temp = 0;
  2597. +
  2598. + /* Deal with too large offsets. */
  2599. + if (GET_CODE (op1) == CONST
  2600. + && GET_CODE (XEXP (op1, 0)) == PLUS
  2601. + && CONST_INT_P (cst = XEXP (XEXP (op1, 0), 1))
  2602. + && trunc_int_for_mode (INTVAL (cst), SImode) != INTVAL (cst))
  2603. + {
  2604. + gcc_assert (!temp);
  2605. + temp1 = gen_reg_rtx (DImode);
  2606. + temp2 = gen_reg_rtx (DImode);
  2607. + sparc_emit_set_const64 (temp2, cst);
  2608. + sparc_emit_set_symbolic_const64 (temp1, XEXP (XEXP (op1, 0), 0),
  2609. + NULL_RTX);
  2610. + emit_insn (gen_rtx_SET (op0, gen_rtx_PLUS (DImode, temp1, temp2)));
  2611. + return;
  2612. + }
  2613. +
  2614. + if (temp && GET_MODE (temp) == TImode)
  2615. + {
  2616. + ti_temp = temp;
  2617. + temp = gen_rtx_REG (DImode, REGNO (temp));
  2618. + }
  2619. +
  2620. + /* SPARC-V9 code model support. */
  2621. + switch (sparc_code_model)
  2622. + {
  2623. + case CM_MEDLOW:
  2624. + /* The range spanned by all instructions in the object is less
  2625. + than 2^31 bytes (2GB) and the distance from any instruction
  2626. + to the location of the label _GLOBAL_OFFSET_TABLE_ is less
  2627. + than 2^31 bytes (2GB).
  2628. +
  2629. + The executable must be in the low 4TB of the virtual address
  2630. + space.
  2631. +
  2632. + sethi %hi(symbol), %temp1
  2633. + or %temp1, %lo(symbol), %reg */
  2634. + if (temp)
  2635. + temp1 = temp; /* op0 is allowed. */
  2636. + else
  2637. + temp1 = gen_reg_rtx (DImode);
  2638. +
  2639. + emit_insn (gen_rtx_SET (temp1, gen_rtx_HIGH (DImode, op1)));
  2640. + emit_insn (gen_rtx_SET (op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
  2641. + break;
  2642. +
  2643. + case CM_MEDMID:
  2644. + /* The range spanned by all instructions in the object is less
  2645. + than 2^31 bytes (2GB) and the distance from any instruction
  2646. + to the location of the label _GLOBAL_OFFSET_TABLE_ is less
  2647. + than 2^31 bytes (2GB).
  2648. +
  2649. + The executable must be in the low 16TB of the virtual address
  2650. + space.
  2651. +
  2652. + sethi %h44(symbol), %temp1
  2653. + or %temp1, %m44(symbol), %temp2
  2654. + sllx %temp2, 12, %temp3
  2655. + or %temp3, %l44(symbol), %reg */
  2656. + if (temp)
  2657. + {
  2658. + temp1 = op0;
  2659. + temp2 = op0;
  2660. + temp3 = temp; /* op0 is allowed. */
  2661. + }
  2662. + else
  2663. + {
  2664. + temp1 = gen_reg_rtx (DImode);
  2665. + temp2 = gen_reg_rtx (DImode);
  2666. + temp3 = gen_reg_rtx (DImode);
  2667. + }
  2668. +
  2669. + emit_insn (gen_seth44 (temp1, op1));
  2670. + emit_insn (gen_setm44 (temp2, temp1, op1));
  2671. + emit_insn (gen_rtx_SET (temp3,
  2672. + gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
  2673. + emit_insn (gen_setl44 (op0, temp3, op1));
  2674. + break;
  2675. +
  2676. + case CM_MEDANY:
  2677. + /* The range spanned by all instructions in the object is less
  2678. + than 2^31 bytes (2GB) and the distance from any instruction
  2679. + to the location of the label _GLOBAL_OFFSET_TABLE_ is less
  2680. + than 2^31 bytes (2GB).
  2681. +
  2682. + The executable can be placed anywhere in the virtual address
  2683. + space.
  2684. +
  2685. + sethi %hh(symbol), %temp1
  2686. + sethi %lm(symbol), %temp2
  2687. + or %temp1, %hm(symbol), %temp3
  2688. + sllx %temp3, 32, %temp4
  2689. + or %temp4, %temp2, %temp5
  2690. + or %temp5, %lo(symbol), %reg */
  2691. + if (temp)
  2692. + {
  2693. + /* It is possible that one of the registers we got for operands[2]
  2694. + might coincide with that of operands[0] (which is why we made
  2695. + it TImode). Pick the other one to use as our scratch. */
  2696. + if (rtx_equal_p (temp, op0))
  2697. + {
  2698. + gcc_assert (ti_temp);
  2699. + temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
  2700. + }
  2701. + temp1 = op0;
  2702. + temp2 = temp; /* op0 is _not_ allowed, see above. */
  2703. + temp3 = op0;
  2704. + temp4 = op0;
  2705. + temp5 = op0;
  2706. + }
  2707. + else
  2708. + {
  2709. + temp1 = gen_reg_rtx (DImode);
  2710. + temp2 = gen_reg_rtx (DImode);
  2711. + temp3 = gen_reg_rtx (DImode);
  2712. + temp4 = gen_reg_rtx (DImode);
  2713. + temp5 = gen_reg_rtx (DImode);
  2714. + }
  2715. +
  2716. + emit_insn (gen_sethh (temp1, op1));
  2717. + emit_insn (gen_setlm (temp2, op1));
  2718. + emit_insn (gen_sethm (temp3, temp1, op1));
  2719. + emit_insn (gen_rtx_SET (temp4,
  2720. + gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
  2721. + emit_insn (gen_rtx_SET (temp5, gen_rtx_PLUS (DImode, temp4, temp2)));
  2722. + emit_insn (gen_setlo (op0, temp5, op1));
  2723. + break;
  2724. +
  2725. + case CM_EMBMEDANY:
  2726. + /* Old old old backwards compatibility kruft here.
  2727. + Essentially it is MEDLOW with a fixed 64-bit
  2728. + virtual base added to all data segment addresses.
  2729. + Text-segment stuff is computed like MEDANY, we can't
  2730. + reuse the code above because the relocation knobs
  2731. + look different.
  2732. +
  2733. + Data segment: sethi %hi(symbol), %temp1
  2734. + add %temp1, EMBMEDANY_BASE_REG, %temp2
  2735. + or %temp2, %lo(symbol), %reg */
  2736. + if (data_segment_operand (op1, GET_MODE (op1)))
  2737. + {
  2738. + if (temp)
  2739. + {
  2740. + temp1 = temp; /* op0 is allowed. */
  2741. + temp2 = op0;
  2742. + }
  2743. + else
  2744. + {
  2745. + temp1 = gen_reg_rtx (DImode);
  2746. + temp2 = gen_reg_rtx (DImode);
  2747. + }
  2748. +
  2749. + emit_insn (gen_embmedany_sethi (temp1, op1));
  2750. + emit_insn (gen_embmedany_brsum (temp2, temp1));
  2751. + emit_insn (gen_embmedany_losum (op0, temp2, op1));
  2752. + }
  2753. +
  2754. + /* Text segment: sethi %uhi(symbol), %temp1
  2755. + sethi %hi(symbol), %temp2
  2756. + or %temp1, %ulo(symbol), %temp3
  2757. + sllx %temp3, 32, %temp4
  2758. + or %temp4, %temp2, %temp5
  2759. + or %temp5, %lo(symbol), %reg */
  2760. + else
  2761. + {
  2762. + if (temp)
  2763. + {
  2764. + /* It is possible that one of the registers we got for operands[2]
  2765. + might coincide with that of operands[0] (which is why we made
  2766. + it TImode). Pick the other one to use as our scratch. */
  2767. + if (rtx_equal_p (temp, op0))
  2768. + {
  2769. + gcc_assert (ti_temp);
  2770. + temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
  2771. + }
  2772. + temp1 = op0;
  2773. + temp2 = temp; /* op0 is _not_ allowed, see above. */
  2774. + temp3 = op0;
  2775. + temp4 = op0;
  2776. + temp5 = op0;
  2777. + }
  2778. + else
  2779. + {
  2780. + temp1 = gen_reg_rtx (DImode);
  2781. + temp2 = gen_reg_rtx (DImode);
  2782. + temp3 = gen_reg_rtx (DImode);
  2783. + temp4 = gen_reg_rtx (DImode);
  2784. + temp5 = gen_reg_rtx (DImode);
  2785. + }
  2786. +
  2787. + emit_insn (gen_embmedany_textuhi (temp1, op1));
  2788. + emit_insn (gen_embmedany_texthi (temp2, op1));
  2789. + emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
  2790. + emit_insn (gen_rtx_SET (temp4,
  2791. + gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
  2792. + emit_insn (gen_rtx_SET (temp5, gen_rtx_PLUS (DImode, temp4, temp2)));
  2793. + emit_insn (gen_embmedany_textlo (op0, temp5, op1));
  2794. + }
  2795. + break;
  2796. +
  2797. + default:
  2798. + gcc_unreachable ();
  2799. + }
  2800. +}
  2801. +
  2802. +/* These avoid problems when cross compiling. If we do not
  2803. + go through all this hair then the optimizer will see
  2804. + invalid REG_EQUAL notes or in some cases none at all. */
  2805. +static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
  2806. +static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
  2807. +static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
  2808. +static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
  2809. +
  2810. +/* The optimizer is not to assume anything about exactly
  2811. + which bits are set for a HIGH, they are unspecified.
  2812. + Unfortunately this leads to many missed optimizations
  2813. + during CSE. We mask out the non-HIGH bits, and matches
  2814. + a plain movdi, to alleviate this problem. */
  2815. +static rtx
  2816. +gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
  2817. +{
  2818. + return gen_rtx_SET (dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
  2819. +}
  2820. +
  2821. +static rtx
  2822. +gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
  2823. +{
  2824. + return gen_rtx_SET (dest, GEN_INT (val));
  2825. +}
  2826. +
  2827. +static rtx
  2828. +gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
  2829. +{
  2830. + return gen_rtx_IOR (DImode, src, GEN_INT (val));
  2831. +}
  2832. +
  2833. +static rtx
  2834. +gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
  2835. +{
  2836. + return gen_rtx_XOR (DImode, src, GEN_INT (val));
  2837. +}
  2838. +
  2839. +/* Worker routines for 64-bit constant formation on arch64.
  2840. + One of the key things to be doing in these emissions is
  2841. + to create as many temp REGs as possible. This makes it
  2842. + possible for half-built constants to be used later when
  2843. + such values are similar to something required later on.
  2844. + Without doing this, the optimizer cannot see such
  2845. + opportunities. */
  2846. +
  2847. +static void sparc_emit_set_const64_quick1 (rtx, rtx,
  2848. + unsigned HOST_WIDE_INT, int);
  2849. +
  2850. +static void
  2851. +sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
  2852. + unsigned HOST_WIDE_INT low_bits, int is_neg)
  2853. +{
  2854. + unsigned HOST_WIDE_INT high_bits;
  2855. +
  2856. + if (is_neg)
  2857. + high_bits = (~low_bits) & 0xffffffff;
  2858. + else
  2859. + high_bits = low_bits;
  2860. +
  2861. + emit_insn (gen_safe_HIGH64 (temp, high_bits));
  2862. + if (!is_neg)
  2863. + {
  2864. + emit_insn (gen_rtx_SET (op0, gen_safe_OR64 (temp, (high_bits & 0x3ff))));
  2865. + }
  2866. + else
  2867. + {
  2868. + /* If we are XOR'ing with -1, then we should emit a one's complement
  2869. + instead. This way the combiner will notice logical operations
  2870. + such as ANDN later on and substitute. */
  2871. + if ((low_bits & 0x3ff) == 0x3ff)
  2872. + {
  2873. + emit_insn (gen_rtx_SET (op0, gen_rtx_NOT (DImode, temp)));
  2874. + }
  2875. + else
  2876. + {
  2877. + emit_insn (gen_rtx_SET (op0,
  2878. + gen_safe_XOR64 (temp,
  2879. + (-(HOST_WIDE_INT)0x400
  2880. + | (low_bits & 0x3ff)))));
  2881. + }
  2882. + }
  2883. +}
  2884. +
  2885. +static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
  2886. + unsigned HOST_WIDE_INT, int);
  2887. +
  2888. +static void
  2889. +sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
  2890. + unsigned HOST_WIDE_INT high_bits,
  2891. + unsigned HOST_WIDE_INT low_immediate,
  2892. + int shift_count)
  2893. +{
  2894. + rtx temp2 = op0;
  2895. +
  2896. + if ((high_bits & 0xfffffc00) != 0)
  2897. + {
  2898. + emit_insn (gen_safe_HIGH64 (temp, high_bits));
  2899. + if ((high_bits & ~0xfffffc00) != 0)
  2900. + emit_insn (gen_rtx_SET (op0,
  2901. + gen_safe_OR64 (temp, (high_bits & 0x3ff))));
  2902. + else
  2903. + temp2 = temp;
  2904. + }
  2905. + else
  2906. + {
  2907. + emit_insn (gen_safe_SET64 (temp, high_bits));
  2908. + temp2 = temp;
  2909. + }
  2910. +
  2911. + /* Now shift it up into place. */
  2912. + emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, temp2,
  2913. + GEN_INT (shift_count))));
  2914. +
  2915. + /* If there is a low immediate part piece, finish up by
  2916. + putting that in as well. */
  2917. + if (low_immediate != 0)
  2918. + emit_insn (gen_rtx_SET (op0, gen_safe_OR64 (op0, low_immediate)));
  2919. +}
  2920. +
  2921. +static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
  2922. + unsigned HOST_WIDE_INT);
  2923. +
  2924. +/* Full 64-bit constant decomposition. Even though this is the
  2925. + 'worst' case, we still optimize a few things away. */
  2926. +static void
  2927. +sparc_emit_set_const64_longway (rtx op0, rtx temp,
  2928. + unsigned HOST_WIDE_INT high_bits,
  2929. + unsigned HOST_WIDE_INT low_bits)
  2930. +{
  2931. + rtx sub_temp = op0;
  2932. +
  2933. + if (can_create_pseudo_p ())
  2934. + sub_temp = gen_reg_rtx (DImode);
  2935. +
  2936. + if ((high_bits & 0xfffffc00) != 0)
  2937. + {
  2938. + emit_insn (gen_safe_HIGH64 (temp, high_bits));
  2939. + if ((high_bits & ~0xfffffc00) != 0)
  2940. + emit_insn (gen_rtx_SET (sub_temp,
  2941. + gen_safe_OR64 (temp, (high_bits & 0x3ff))));
  2942. + else
  2943. + sub_temp = temp;
  2944. + }
  2945. + else
  2946. + {
  2947. + emit_insn (gen_safe_SET64 (temp, high_bits));
  2948. + sub_temp = temp;
  2949. + }
  2950. +
  2951. + if (can_create_pseudo_p ())
  2952. + {
  2953. + rtx temp2 = gen_reg_rtx (DImode);
  2954. + rtx temp3 = gen_reg_rtx (DImode);
  2955. + rtx temp4 = gen_reg_rtx (DImode);
  2956. +
  2957. + emit_insn (gen_rtx_SET (temp4, gen_rtx_ASHIFT (DImode, sub_temp,
  2958. + GEN_INT (32))));
  2959. +
  2960. + emit_insn (gen_safe_HIGH64 (temp2, low_bits));
  2961. + if ((low_bits & ~0xfffffc00) != 0)
  2962. + {
  2963. + emit_insn (gen_rtx_SET (temp3,
  2964. + gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
  2965. + emit_insn (gen_rtx_SET (op0, gen_rtx_PLUS (DImode, temp4, temp3)));
  2966. + }
  2967. + else
  2968. + {
  2969. + emit_insn (gen_rtx_SET (op0, gen_rtx_PLUS (DImode, temp4, temp2)));
  2970. + }
  2971. + }
  2972. + else
  2973. + {
  2974. + rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
  2975. + rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
  2976. + rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
  2977. + int to_shift = 12;
  2978. +
  2979. + /* We are in the middle of reload, so this is really
  2980. + painful. However we do still make an attempt to
  2981. + avoid emitting truly stupid code. */
  2982. + if (low1 != const0_rtx)
  2983. + {
  2984. + emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, sub_temp,
  2985. + GEN_INT (to_shift))));
  2986. + emit_insn (gen_rtx_SET (op0, gen_rtx_IOR (DImode, op0, low1)));
  2987. + sub_temp = op0;
  2988. + to_shift = 12;
  2989. + }
  2990. + else
  2991. + {
  2992. + to_shift += 12;
  2993. + }
  2994. + if (low2 != const0_rtx)
  2995. + {
  2996. + emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, sub_temp,
  2997. + GEN_INT (to_shift))));
  2998. + emit_insn (gen_rtx_SET (op0, gen_rtx_IOR (DImode, op0, low2)));
  2999. + sub_temp = op0;
  3000. + to_shift = 8;
  3001. + }
  3002. + else
  3003. + {
  3004. + to_shift += 8;
  3005. + }
  3006. + emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, sub_temp,
  3007. + GEN_INT (to_shift))));
  3008. + if (low3 != const0_rtx)
  3009. + emit_insn (gen_rtx_SET (op0, gen_rtx_IOR (DImode, op0, low3)));
  3010. + /* phew... */
  3011. + }
  3012. +}
  3013. +
  3014. +/* Analyze a 64-bit constant for certain properties. */
  3015. +static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
  3016. + unsigned HOST_WIDE_INT,
  3017. + int *, int *, int *);
  3018. +
  3019. +static void
  3020. +analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
  3021. + unsigned HOST_WIDE_INT low_bits,
  3022. + int *hbsp, int *lbsp, int *abbasp)
  3023. +{
  3024. + int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
  3025. + int i;
  3026. +
  3027. + lowest_bit_set = highest_bit_set = -1;
  3028. + i = 0;
  3029. + do
  3030. + {
  3031. + if ((lowest_bit_set == -1)
  3032. + && ((low_bits >> i) & 1))
  3033. + lowest_bit_set = i;
  3034. + if ((highest_bit_set == -1)
  3035. + && ((high_bits >> (32 - i - 1)) & 1))
  3036. + highest_bit_set = (64 - i - 1);
  3037. + }
  3038. + while (++i < 32
  3039. + && ((highest_bit_set == -1)
  3040. + || (lowest_bit_set == -1)));
  3041. + if (i == 32)
  3042. + {
  3043. + i = 0;
  3044. + do
  3045. + {
  3046. + if ((lowest_bit_set == -1)
  3047. + && ((high_bits >> i) & 1))
  3048. + lowest_bit_set = i + 32;
  3049. + if ((highest_bit_set == -1)
  3050. + && ((low_bits >> (32 - i - 1)) & 1))
  3051. + highest_bit_set = 32 - i - 1;
  3052. + }
  3053. + while (++i < 32
  3054. + && ((highest_bit_set == -1)
  3055. + || (lowest_bit_set == -1)));
  3056. + }
  3057. + /* If there are no bits set this should have gone out
  3058. + as one instruction! */
  3059. + gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
  3060. + all_bits_between_are_set = 1;
  3061. + for (i = lowest_bit_set; i <= highest_bit_set; i++)
  3062. + {
  3063. + if (i < 32)
  3064. + {
  3065. + if ((low_bits & (1 << i)) != 0)
  3066. + continue;
  3067. + }
  3068. + else
  3069. + {
  3070. + if ((high_bits & (1 << (i - 32))) != 0)
  3071. + continue;
  3072. + }
  3073. + all_bits_between_are_set = 0;
  3074. + break;
  3075. + }
  3076. + *hbsp = highest_bit_set;
  3077. + *lbsp = lowest_bit_set;
  3078. + *abbasp = all_bits_between_are_set;
  3079. +}
  3080. +
  3081. +static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
  3082. +
  3083. +static int
  3084. +const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
  3085. + unsigned HOST_WIDE_INT low_bits)
  3086. +{
  3087. + int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
  3088. +
  3089. + if (high_bits == 0
  3090. + || high_bits == 0xffffffff)
  3091. + return 1;
  3092. +
  3093. + analyze_64bit_constant (high_bits, low_bits,
  3094. + &highest_bit_set, &lowest_bit_set,
  3095. + &all_bits_between_are_set);
  3096. +
  3097. + if ((highest_bit_set == 63
  3098. + || lowest_bit_set == 0)
  3099. + && all_bits_between_are_set != 0)
  3100. + return 1;
  3101. +
  3102. + if ((highest_bit_set - lowest_bit_set) < 21)
  3103. + return 1;
  3104. +
  3105. + return 0;
  3106. +}
  3107. +
  3108. +static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
  3109. + unsigned HOST_WIDE_INT,
  3110. + int, int);
  3111. +
  3112. +static unsigned HOST_WIDE_INT
  3113. +create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
  3114. + unsigned HOST_WIDE_INT low_bits,
  3115. + int lowest_bit_set, int shift)
  3116. +{
  3117. + HOST_WIDE_INT hi, lo;
  3118. +
  3119. + if (lowest_bit_set < 32)
  3120. + {
  3121. + lo = (low_bits >> lowest_bit_set) << shift;
  3122. + hi = ((high_bits << (32 - lowest_bit_set)) << shift);
  3123. + }
  3124. + else
  3125. + {
  3126. + lo = 0;
  3127. + hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
  3128. + }
  3129. + gcc_assert (! (hi & lo));
  3130. + return (hi | lo);
  3131. +}
  3132. +
  3133. +/* Here we are sure to be arch64 and this is an integer constant
  3134. + being loaded into a register. Emit the most efficient
  3135. + insn sequence possible. Detection of all the 1-insn cases
  3136. + has been done already. */
  3137. +static void
  3138. +sparc_emit_set_const64 (rtx op0, rtx op1)
  3139. +{
  3140. + unsigned HOST_WIDE_INT high_bits, low_bits;
  3141. + int lowest_bit_set, highest_bit_set;
  3142. + int all_bits_between_are_set;
  3143. + rtx temp = 0;
  3144. +
  3145. + /* Sanity check that we know what we are working with. */
  3146. + gcc_assert (TARGET_ARCH64
  3147. + && (GET_CODE (op0) == SUBREG
  3148. + || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
  3149. +
  3150. + if (! can_create_pseudo_p ())
  3151. + temp = op0;
  3152. +
  3153. + if (GET_CODE (op1) != CONST_INT)
  3154. + {
  3155. + sparc_emit_set_symbolic_const64 (op0, op1, temp);
  3156. + return;
  3157. + }
  3158. +
  3159. + if (! temp)
  3160. + temp = gen_reg_rtx (DImode);
  3161. +
  3162. + high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
  3163. + low_bits = (INTVAL (op1) & 0xffffffff);
  3164. +
  3165. + /* low_bits bits 0 --> 31
  3166. + high_bits bits 32 --> 63 */
  3167. +
  3168. + analyze_64bit_constant (high_bits, low_bits,
  3169. + &highest_bit_set, &lowest_bit_set,
  3170. + &all_bits_between_are_set);
  3171. +
  3172. + /* First try for a 2-insn sequence. */
  3173. +
  3174. + /* These situations are preferred because the optimizer can
  3175. + * do more things with them:
  3176. + * 1) mov -1, %reg
  3177. + * sllx %reg, shift, %reg
  3178. + * 2) mov -1, %reg
  3179. + * srlx %reg, shift, %reg
  3180. + * 3) mov some_small_const, %reg
  3181. + * sllx %reg, shift, %reg
  3182. + */
  3183. + if (((highest_bit_set == 63
  3184. + || lowest_bit_set == 0)
  3185. + && all_bits_between_are_set != 0)
  3186. + || ((highest_bit_set - lowest_bit_set) < 12))
  3187. + {
  3188. + HOST_WIDE_INT the_const = -1;
  3189. + int shift = lowest_bit_set;
  3190. +
  3191. + if ((highest_bit_set != 63
  3192. + && lowest_bit_set != 0)
  3193. + || all_bits_between_are_set == 0)
  3194. + {
  3195. + the_const =
  3196. + create_simple_focus_bits (high_bits, low_bits,
  3197. + lowest_bit_set, 0);
  3198. + }
  3199. + else if (lowest_bit_set == 0)
  3200. + shift = -(63 - highest_bit_set);
  3201. +
  3202. + gcc_assert (SPARC_SIMM13_P (the_const));
  3203. + gcc_assert (shift != 0);
  3204. +
  3205. + emit_insn (gen_safe_SET64 (temp, the_const));
  3206. + if (shift > 0)
  3207. + emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, temp,
  3208. + GEN_INT (shift))));
  3209. + else if (shift < 0)
  3210. + emit_insn (gen_rtx_SET (op0, gen_rtx_LSHIFTRT (DImode, temp,
  3211. + GEN_INT (-shift))));
  3212. + return;
  3213. + }
  3214. +
  3215. + /* Now a range of 22 or less bits set somewhere.
  3216. + * 1) sethi %hi(focus_bits), %reg
  3217. + * sllx %reg, shift, %reg
  3218. + * 2) sethi %hi(focus_bits), %reg
  3219. + * srlx %reg, shift, %reg
  3220. + */
  3221. + if ((highest_bit_set - lowest_bit_set) < 21)
  3222. + {
  3223. + unsigned HOST_WIDE_INT focus_bits =
  3224. + create_simple_focus_bits (high_bits, low_bits,
  3225. + lowest_bit_set, 10);
  3226. +
  3227. + gcc_assert (SPARC_SETHI_P (focus_bits));
  3228. + gcc_assert (lowest_bit_set != 10);
  3229. +
  3230. + emit_insn (gen_safe_HIGH64 (temp, focus_bits));
  3231. +
  3232. + /* If lowest_bit_set == 10 then a sethi alone could have done it. */
  3233. + if (lowest_bit_set < 10)
  3234. + emit_insn (gen_rtx_SET (op0,
  3235. + gen_rtx_LSHIFTRT (DImode, temp,
  3236. + GEN_INT (10 - lowest_bit_set))));
  3237. + else if (lowest_bit_set > 10)
  3238. + emit_insn (gen_rtx_SET (op0,
  3239. + gen_rtx_ASHIFT (DImode, temp,
  3240. + GEN_INT (lowest_bit_set - 10))));
  3241. + return;
  3242. + }
  3243. +
  3244. + /* 1) sethi %hi(low_bits), %reg
  3245. + * or %reg, %lo(low_bits), %reg
  3246. + * 2) sethi %hi(~low_bits), %reg
  3247. + * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
  3248. + */
  3249. + if (high_bits == 0
  3250. + || high_bits == 0xffffffff)
  3251. + {
  3252. + sparc_emit_set_const64_quick1 (op0, temp, low_bits,
  3253. + (high_bits == 0xffffffff));
  3254. + return;
  3255. + }
  3256. +
  3257. + /* Now, try 3-insn sequences. */
  3258. +
  3259. + /* 1) sethi %hi(high_bits), %reg
  3260. + * or %reg, %lo(high_bits), %reg
  3261. + * sllx %reg, 32, %reg
  3262. + */
  3263. + if (low_bits == 0)
  3264. + {
  3265. + sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
  3266. + return;
  3267. + }
  3268. +
  3269. + /* We may be able to do something quick
  3270. + when the constant is negated, so try that. */
  3271. + if (const64_is_2insns ((~high_bits) & 0xffffffff,
  3272. + (~low_bits) & 0xfffffc00))
  3273. + {
  3274. + /* NOTE: The trailing bits get XOR'd so we need the
  3275. + non-negated bits, not the negated ones. */
  3276. + unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
  3277. +
  3278. + if ((((~high_bits) & 0xffffffff) == 0
  3279. + && ((~low_bits) & 0x80000000) == 0)
  3280. + || (((~high_bits) & 0xffffffff) == 0xffffffff
  3281. + && ((~low_bits) & 0x80000000) != 0))
  3282. + {
  3283. + unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
  3284. +
  3285. + if ((SPARC_SETHI_P (fast_int)
  3286. + && (~high_bits & 0xffffffff) == 0)
  3287. + || SPARC_SIMM13_P (fast_int))
  3288. + emit_insn (gen_safe_SET64 (temp, fast_int));
  3289. + else
  3290. + sparc_emit_set_const64 (temp, GEN_INT (fast_int));
  3291. + }
  3292. + else
  3293. + {
  3294. + rtx negated_const;
  3295. + negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
  3296. + (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
  3297. + sparc_emit_set_const64 (temp, negated_const);
  3298. + }
  3299. +
  3300. + /* If we are XOR'ing with -1, then we should emit a one's complement
  3301. + instead. This way the combiner will notice logical operations
  3302. + such as ANDN later on and substitute. */
  3303. + if (trailing_bits == 0x3ff)
  3304. + {
  3305. + emit_insn (gen_rtx_SET (op0, gen_rtx_NOT (DImode, temp)));
  3306. + }
  3307. + else
  3308. + {
  3309. + emit_insn (gen_rtx_SET (op0,
  3310. + gen_safe_XOR64 (temp,
  3311. + (-0x400 | trailing_bits))));
  3312. + }
  3313. + return;
  3314. + }
  3315. +
  3316. + /* 1) sethi %hi(xxx), %reg
  3317. + * or %reg, %lo(xxx), %reg
  3318. + * sllx %reg, yyy, %reg
  3319. + *
  3320. + * ??? This is just a generalized version of the low_bits==0
  3321. + * thing above, FIXME...
  3322. + */
  3323. + if ((highest_bit_set - lowest_bit_set) < 32)
  3324. + {
  3325. + unsigned HOST_WIDE_INT focus_bits =
  3326. + create_simple_focus_bits (high_bits, low_bits,
  3327. + lowest_bit_set, 0);
  3328. +
  3329. + /* We can't get here in this state. */
  3330. + gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
  3331. +
  3332. + /* So what we know is that the set bits straddle the
  3333. + middle of the 64-bit word. */
  3334. + sparc_emit_set_const64_quick2 (op0, temp,
  3335. + focus_bits, 0,
  3336. + lowest_bit_set);
  3337. + return;
  3338. + }
  3339. +
  3340. + /* 1) sethi %hi(high_bits), %reg
  3341. + * or %reg, %lo(high_bits), %reg
  3342. + * sllx %reg, 32, %reg
  3343. + * or %reg, low_bits, %reg
  3344. + */
  3345. + if (SPARC_SIMM13_P (low_bits) && ((int)low_bits > 0))
  3346. + {
  3347. + sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
  3348. + return;
  3349. + }
  3350. +
  3351. + /* The easiest way when all else fails, is full decomposition. */
  3352. + sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
  3353. +}
  3354. +
  3355. +/* Implement TARGET_FIXED_CONDITION_CODE_REGS. */
  3356. +
  3357. +static bool
  3358. +sparc_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
  3359. +{
  3360. + *p1 = SPARC_ICC_REG;
  3361. + *p2 = SPARC_FCC_REG;
  3362. + return true;
  3363. +}
  3364. +
  3365. +/* Implement TARGET_MIN_ARITHMETIC_PRECISION. */
  3366. +
  3367. +static unsigned int
  3368. +sparc_min_arithmetic_precision (void)
  3369. +{
  3370. + return 32;
  3371. +}
  3372. +
  3373. +/* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
  3374. + return the mode to be used for the comparison. For floating-point,
  3375. + CCFP[E]mode is used. CCNZmode should be used when the first operand
  3376. + is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
  3377. + processing is needed. */
  3378. +
  3379. +machine_mode
  3380. +select_cc_mode (enum rtx_code op, rtx x, rtx y)
  3381. +{
  3382. + if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
  3383. + {
  3384. + switch (op)
  3385. + {
  3386. + case EQ:
  3387. + case NE:
  3388. + case UNORDERED:
  3389. + case ORDERED:
  3390. + case UNLT:
  3391. + case UNLE:
  3392. + case UNGT:
  3393. + case UNGE:
  3394. + case UNEQ:
  3395. + return CCFPmode;
  3396. +
  3397. + case LT:
  3398. + case LE:
  3399. + case GT:
  3400. + case GE:
  3401. + case LTGT:
  3402. + return CCFPEmode;
  3403. +
  3404. + default:
  3405. + gcc_unreachable ();
  3406. + }
  3407. + }
  3408. + else if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
  3409. + || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
  3410. + && y == const0_rtx)
  3411. + {
  3412. + if (TARGET_ARCH64 && GET_MODE (x) == DImode)
  3413. + return CCXNZmode;
  3414. + else
  3415. + return CCNZmode;
  3416. + }
  3417. + else
  3418. + {
  3419. + /* This is for the cmp<mode>_sne pattern. */
  3420. + if (GET_CODE (x) == NOT && y == constm1_rtx)
  3421. + {
  3422. + if (TARGET_ARCH64 && GET_MODE (x) == DImode)
  3423. + return CCXCmode;
  3424. + else
  3425. + return CCCmode;
  3426. + }
  3427. +
  3428. + /* This is for the [u]addvdi4_sp32 and [u]subvdi4_sp32 patterns. */
  3429. + if (!TARGET_ARCH64 && GET_MODE (x) == DImode)
  3430. + {
  3431. + if (GET_CODE (y) == UNSPEC
  3432. + && (XINT (y, 1) == UNSPEC_ADDV
  3433. + || XINT (y, 1) == UNSPEC_SUBV
  3434. + || XINT (y, 1) == UNSPEC_NEGV))
  3435. + return CCVmode;
  3436. + else
  3437. + return CCCmode;
  3438. + }
  3439. +
  3440. + if (TARGET_ARCH64 && GET_MODE (x) == DImode)
  3441. + return CCXmode;
  3442. + else
  3443. + return CCmode;
  3444. + }
  3445. +}
  3446. +
  3447. +/* Emit the compare insn and return the CC reg for a CODE comparison
  3448. + with operands X and Y. */
  3449. +
  3450. +static rtx
  3451. +gen_compare_reg_1 (enum rtx_code code, rtx x, rtx y)
  3452. +{
  3453. + machine_mode mode;
  3454. + rtx cc_reg;
  3455. +
  3456. + if (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC)
  3457. + return x;
  3458. +
  3459. + mode = SELECT_CC_MODE (code, x, y);
  3460. +
  3461. + /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
  3462. + fcc regs (cse can't tell they're really call clobbered regs and will
  3463. + remove a duplicate comparison even if there is an intervening function
  3464. + call - it will then try to reload the cc reg via an int reg which is why
  3465. + we need the movcc patterns). It is possible to provide the movcc
  3466. + patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
  3467. + registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
  3468. + to tell cse that CCFPE mode registers (even pseudos) are call
  3469. + clobbered. */
  3470. +
  3471. + /* ??? This is an experiment. Rather than making changes to cse which may
  3472. + or may not be easy/clean, we do our own cse. This is possible because
  3473. + we will generate hard registers. Cse knows they're call clobbered (it
  3474. + doesn't know the same thing about pseudos). If we guess wrong, no big
  3475. + deal, but if we win, great! */
  3476. +
  3477. + if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
  3478. +#if 1 /* experiment */
  3479. + {
  3480. + int reg;
  3481. + /* We cycle through the registers to ensure they're all exercised. */
  3482. + static int next_fcc_reg = 0;
  3483. + /* Previous x,y for each fcc reg. */
  3484. + static rtx prev_args[4][2];
  3485. +
  3486. + /* Scan prev_args for x,y. */
  3487. + for (reg = 0; reg < 4; reg++)
  3488. + if (prev_args[reg][0] == x && prev_args[reg][1] == y)
  3489. + break;
  3490. + if (reg == 4)
  3491. + {
  3492. + reg = next_fcc_reg;
  3493. + prev_args[reg][0] = x;
  3494. + prev_args[reg][1] = y;
  3495. + next_fcc_reg = (next_fcc_reg + 1) & 3;
  3496. + }
  3497. + cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
  3498. + }
  3499. +#else
  3500. + cc_reg = gen_reg_rtx (mode);
  3501. +#endif /* ! experiment */
  3502. + else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
  3503. + cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
  3504. + else
  3505. + cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
  3506. +
  3507. + /* We shouldn't get there for TFmode if !TARGET_HARD_QUAD. If we do, this
  3508. + will only result in an unrecognizable insn so no point in asserting. */
  3509. + emit_insn (gen_rtx_SET (cc_reg, gen_rtx_COMPARE (mode, x, y)));
  3510. +
  3511. + return cc_reg;
  3512. +}
  3513. +
  3514. +
  3515. +/* Emit the compare insn and return the CC reg for the comparison in CMP. */
  3516. +
  3517. +rtx
  3518. +gen_compare_reg (rtx cmp)
  3519. +{
  3520. + return gen_compare_reg_1 (GET_CODE (cmp), XEXP (cmp, 0), XEXP (cmp, 1));
  3521. +}
  3522. +
  3523. +/* This function is used for v9 only.
  3524. + DEST is the target of the Scc insn.
  3525. + CODE is the code for an Scc's comparison.
  3526. + X and Y are the values we compare.
  3527. +
  3528. + This function is needed to turn
  3529. +
  3530. + (set (reg:SI 110)
  3531. + (gt (reg:CCX 100 %icc)
  3532. + (const_int 0)))
  3533. + into
  3534. + (set (reg:SI 110)
  3535. + (gt:DI (reg:CCX 100 %icc)
  3536. + (const_int 0)))
  3537. +
  3538. + IE: The instruction recognizer needs to see the mode of the comparison to
  3539. + find the right instruction. We could use "gt:DI" right in the
  3540. + define_expand, but leaving it out allows us to handle DI, SI, etc. */
  3541. +
  3542. +static int
  3543. +gen_v9_scc (rtx dest, enum rtx_code compare_code, rtx x, rtx y)
  3544. +{
  3545. + if (! TARGET_ARCH64
  3546. + && (GET_MODE (x) == DImode
  3547. + || GET_MODE (dest) == DImode))
  3548. + return 0;
  3549. +
  3550. + /* Try to use the movrCC insns. */
  3551. + if (TARGET_ARCH64
  3552. + && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
  3553. + && y == const0_rtx
  3554. + && v9_regcmp_p (compare_code))
  3555. + {
  3556. + rtx op0 = x;
  3557. + rtx temp;
  3558. +
  3559. + /* Special case for op0 != 0. This can be done with one instruction if
  3560. + dest == x. */
  3561. +
  3562. + if (compare_code == NE
  3563. + && GET_MODE (dest) == DImode
  3564. + && rtx_equal_p (op0, dest))
  3565. + {
  3566. + emit_insn (gen_rtx_SET (dest,
  3567. + gen_rtx_IF_THEN_ELSE (DImode,
  3568. + gen_rtx_fmt_ee (compare_code, DImode,
  3569. + op0, const0_rtx),
  3570. + const1_rtx,
  3571. + dest)));
  3572. + return 1;
  3573. + }
  3574. +
  3575. + if (reg_overlap_mentioned_p (dest, op0))
  3576. + {
  3577. + /* Handle the case where dest == x.
  3578. + We "early clobber" the result. */
  3579. + op0 = gen_reg_rtx (GET_MODE (x));
  3580. + emit_move_insn (op0, x);
  3581. + }
  3582. +
  3583. + emit_insn (gen_rtx_SET (dest, const0_rtx));
  3584. + if (GET_MODE (op0) != DImode)
  3585. + {
  3586. + temp = gen_reg_rtx (DImode);
  3587. + convert_move (temp, op0, 0);
  3588. + }
  3589. + else
  3590. + temp = op0;
  3591. + emit_insn (gen_rtx_SET (dest,
  3592. + gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
  3593. + gen_rtx_fmt_ee (compare_code, DImode,
  3594. + temp, const0_rtx),
  3595. + const1_rtx,
  3596. + dest)));
  3597. + return 1;
  3598. + }
  3599. + else
  3600. + {
  3601. + x = gen_compare_reg_1 (compare_code, x, y);
  3602. + y = const0_rtx;
  3603. +
  3604. + emit_insn (gen_rtx_SET (dest, const0_rtx));
  3605. + emit_insn (gen_rtx_SET (dest,
  3606. + gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
  3607. + gen_rtx_fmt_ee (compare_code,
  3608. + GET_MODE (x), x, y),
  3609. + const1_rtx, dest)));
  3610. + return 1;
  3611. + }
  3612. +}
  3613. +
  3614. +
  3615. +/* Emit an scc insn. For seq, sne, sgeu, and sltu, we can do this
  3616. + without jumps using the addx/subx instructions. */
  3617. +
  3618. +bool
  3619. +emit_scc_insn (rtx operands[])
  3620. +{
  3621. + rtx tem, x, y;
  3622. + enum rtx_code code;
  3623. + machine_mode mode;
  3624. +
  3625. + /* The quad-word fp compare library routines all return nonzero to indicate
  3626. + true, which is different from the equivalent libgcc routines, so we must
  3627. + handle them specially here. */
  3628. + if (GET_MODE (operands[2]) == TFmode && ! TARGET_HARD_QUAD)
  3629. + {
  3630. + operands[1] = sparc_emit_float_lib_cmp (operands[2], operands[3],
  3631. + GET_CODE (operands[1]));
  3632. + operands[2] = XEXP (operands[1], 0);
  3633. + operands[3] = XEXP (operands[1], 1);
  3634. + }
  3635. +
  3636. + code = GET_CODE (operands[1]);
  3637. + x = operands[2];
  3638. + y = operands[3];
  3639. + mode = GET_MODE (x);
  3640. +
  3641. + /* For seq/sne on v9 we use the same code as v8 (the addx/subx method has
  3642. + more applications). The exception to this is "reg != 0" which can
  3643. + be done in one instruction on v9 (so we do it). */
  3644. + if ((code == EQ || code == NE) && (mode == SImode || mode == DImode))
  3645. + {
  3646. + if (y != const0_rtx)
  3647. + x = force_reg (mode, gen_rtx_XOR (mode, x, y));
  3648. +
  3649. + rtx pat = gen_rtx_SET (operands[0],
  3650. + gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
  3651. + x, const0_rtx));
  3652. +
  3653. + /* If we can use addx/subx or addxc, add a clobber for CC. */
  3654. + if (mode == SImode || (code == NE && TARGET_VIS3))
  3655. + {
  3656. + rtx clobber
  3657. + = gen_rtx_CLOBBER (VOIDmode,
  3658. + gen_rtx_REG (mode == SImode ? CCmode : CCXmode,
  3659. + SPARC_ICC_REG));
  3660. + pat = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, pat, clobber));
  3661. + }
  3662. +
  3663. + emit_insn (pat);
  3664. + return true;
  3665. + }
  3666. +
  3667. + /* We can do LTU in DImode using the addxc instruction with VIS3. */
  3668. + if (TARGET_ARCH64
  3669. + && mode == DImode
  3670. + && !((code == LTU || code == GTU) && TARGET_VIS3)
  3671. + && gen_v9_scc (operands[0], code, x, y))
  3672. + return true;
  3673. +
  3674. + /* We can do LTU and GEU using the addx/subx instructions too. And
  3675. + for GTU/LEU, if both operands are registers swap them and fall
  3676. + back to the easy case. */
  3677. + if (code == GTU || code == LEU)
  3678. + {
  3679. + if ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
  3680. + && (GET_CODE (y) == REG || GET_CODE (y) == SUBREG))
  3681. + {
  3682. + tem = x;
  3683. + x = y;
  3684. + y = tem;
  3685. + code = swap_condition (code);
  3686. + }
  3687. + }
  3688. +
  3689. + if (code == LTU || code == GEU)
  3690. + {
  3691. + emit_insn (gen_rtx_SET (operands[0],
  3692. + gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
  3693. + gen_compare_reg_1 (code, x, y),
  3694. + const0_rtx)));
  3695. + return true;
  3696. + }
  3697. +
  3698. + /* All the posibilities to use addx/subx based sequences has been
  3699. + exhausted, try for a 3 instruction sequence using v9 conditional
  3700. + moves. */
  3701. + if (TARGET_V9 && gen_v9_scc (operands[0], code, x, y))
  3702. + return true;
  3703. +
  3704. + /* Nope, do branches. */
  3705. + return false;
  3706. +}
  3707. +
  3708. +/* Emit a conditional jump insn for the v9 architecture using comparison code
  3709. + CODE and jump target LABEL.
  3710. + This function exists to take advantage of the v9 brxx insns. */
  3711. +
  3712. +static void
  3713. +emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
  3714. +{
  3715. + emit_jump_insn (gen_rtx_SET (pc_rtx,
  3716. + gen_rtx_IF_THEN_ELSE (VOIDmode,
  3717. + gen_rtx_fmt_ee (code, GET_MODE (op0),
  3718. + op0, const0_rtx),
  3719. + gen_rtx_LABEL_REF (VOIDmode, label),
  3720. + pc_rtx)));
  3721. +}
  3722. +
  3723. +/* Emit a conditional jump insn for the UA2011 architecture using
  3724. + comparison code CODE and jump target LABEL. This function exists
  3725. + to take advantage of the UA2011 Compare and Branch insns. */
  3726. +
  3727. +static void
  3728. +emit_cbcond_insn (enum rtx_code code, rtx op0, rtx op1, rtx label)
  3729. +{
  3730. + rtx if_then_else;
  3731. +
  3732. + if_then_else = gen_rtx_IF_THEN_ELSE (VOIDmode,
  3733. + gen_rtx_fmt_ee(code, GET_MODE(op0),
  3734. + op0, op1),
  3735. + gen_rtx_LABEL_REF (VOIDmode, label),
  3736. + pc_rtx);
  3737. +
  3738. + emit_jump_insn (gen_rtx_SET (pc_rtx, if_then_else));
  3739. +}
  3740. +
  3741. +void
  3742. +emit_conditional_branch_insn (rtx operands[])
  3743. +{
  3744. + /* The quad-word fp compare library routines all return nonzero to indicate
  3745. + true, which is different from the equivalent libgcc routines, so we must
  3746. + handle them specially here. */
  3747. + if (GET_MODE (operands[1]) == TFmode && ! TARGET_HARD_QUAD)
  3748. + {
  3749. + operands[0] = sparc_emit_float_lib_cmp (operands[1], operands[2],
  3750. + GET_CODE (operands[0]));
  3751. + operands[1] = XEXP (operands[0], 0);
  3752. + operands[2] = XEXP (operands[0], 1);
  3753. + }
  3754. +
  3755. + /* If we can tell early on that the comparison is against a constant
  3756. + that won't fit in the 5-bit signed immediate field of a cbcond,
  3757. + use one of the other v9 conditional branch sequences. */
  3758. + if (TARGET_CBCOND
  3759. + && GET_CODE (operands[1]) == REG
  3760. + && (GET_MODE (operands[1]) == SImode
  3761. + || (TARGET_ARCH64 && GET_MODE (operands[1]) == DImode))
  3762. + && (GET_CODE (operands[2]) != CONST_INT
  3763. + || SPARC_SIMM5_P (INTVAL (operands[2]))))
  3764. + {
  3765. + emit_cbcond_insn (GET_CODE (operands[0]), operands[1], operands[2], operands[3]);
  3766. + return;
  3767. + }
  3768. +
  3769. + if (TARGET_ARCH64 && operands[2] == const0_rtx
  3770. + && GET_CODE (operands[1]) == REG
  3771. + && GET_MODE (operands[1]) == DImode)
  3772. + {
  3773. + emit_v9_brxx_insn (GET_CODE (operands[0]), operands[1], operands[3]);
  3774. + return;
  3775. + }
  3776. +
  3777. + operands[1] = gen_compare_reg (operands[0]);
  3778. + operands[2] = const0_rtx;
  3779. + operands[0] = gen_rtx_fmt_ee (GET_CODE (operands[0]), VOIDmode,
  3780. + operands[1], operands[2]);
  3781. + emit_jump_insn (gen_cbranchcc4 (operands[0], operands[1], operands[2],
  3782. + operands[3]));
  3783. +}
  3784. +
  3785. +
  3786. +/* Generate a DFmode part of a hard TFmode register.
  3787. + REG is the TFmode hard register, LOW is 1 for the
  3788. + low 64bit of the register and 0 otherwise.
  3789. + */
  3790. +rtx
  3791. +gen_df_reg (rtx reg, int low)
  3792. +{
  3793. + int regno = REGNO (reg);
  3794. +
  3795. + if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
  3796. + regno += (TARGET_ARCH64 && SPARC_INT_REG_P (regno)) ? 1 : 2;
  3797. + return gen_rtx_REG (DFmode, regno);
  3798. +}
  3799. +
  3800. +/* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
  3801. + Unlike normal calls, TFmode operands are passed by reference. It is
  3802. + assumed that no more than 3 operands are required. */
  3803. +
  3804. +static void
  3805. +emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
  3806. +{
  3807. + rtx ret_slot = NULL, arg[3], func_sym;
  3808. + int i;
  3809. +
  3810. + /* We only expect to be called for conversions, unary, and binary ops. */
  3811. + gcc_assert (nargs == 2 || nargs == 3);
  3812. +
  3813. + for (i = 0; i < nargs; ++i)
  3814. + {
  3815. + rtx this_arg = operands[i];
  3816. + rtx this_slot;
  3817. +
  3818. + /* TFmode arguments and return values are passed by reference. */
  3819. + if (GET_MODE (this_arg) == TFmode)
  3820. + {
  3821. + int force_stack_temp;
  3822. +
  3823. + force_stack_temp = 0;
  3824. + if (TARGET_BUGGY_QP_LIB && i == 0)
  3825. + force_stack_temp = 1;
  3826. +
  3827. + if (GET_CODE (this_arg) == MEM
  3828. + && ! force_stack_temp)
  3829. + {
  3830. + tree expr = MEM_EXPR (this_arg);
  3831. + if (expr)
  3832. + mark_addressable (expr);
  3833. + this_arg = XEXP (this_arg, 0);
  3834. + }
  3835. + else if (CONSTANT_P (this_arg)
  3836. + && ! force_stack_temp)
  3837. + {
  3838. + this_slot = force_const_mem (TFmode, this_arg);
  3839. + this_arg = XEXP (this_slot, 0);
  3840. + }
  3841. + else
  3842. + {
  3843. + this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode));
  3844. +
  3845. + /* Operand 0 is the return value. We'll copy it out later. */
  3846. + if (i > 0)
  3847. + emit_move_insn (this_slot, this_arg);
  3848. + else
  3849. + ret_slot = this_slot;
  3850. +
  3851. + this_arg = XEXP (this_slot, 0);
  3852. + }
  3853. + }
  3854. +
  3855. + arg[i] = this_arg;
  3856. + }
  3857. +
  3858. + func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
  3859. +
  3860. + if (GET_MODE (operands[0]) == TFmode)
  3861. + {
  3862. + if (nargs == 2)
  3863. + emit_library_call (func_sym, LCT_NORMAL, VOIDmode,
  3864. + arg[0], GET_MODE (arg[0]),
  3865. + arg[1], GET_MODE (arg[1]));
  3866. + else
  3867. + emit_library_call (func_sym, LCT_NORMAL, VOIDmode,
  3868. + arg[0], GET_MODE (arg[0]),
  3869. + arg[1], GET_MODE (arg[1]),
  3870. + arg[2], GET_MODE (arg[2]));
  3871. +
  3872. + if (ret_slot)
  3873. + emit_move_insn (operands[0], ret_slot);
  3874. + }
  3875. + else
  3876. + {
  3877. + rtx ret;
  3878. +
  3879. + gcc_assert (nargs == 2);
  3880. +
  3881. + ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
  3882. + GET_MODE (operands[0]),
  3883. + arg[1], GET_MODE (arg[1]));
  3884. +
  3885. + if (ret != operands[0])
  3886. + emit_move_insn (operands[0], ret);
  3887. + }
  3888. +}
  3889. +
  3890. +/* Expand soft-float TFmode calls to sparc abi routines. */
  3891. +
  3892. +static void
  3893. +emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
  3894. +{
  3895. + const char *func;
  3896. +
  3897. + switch (code)
  3898. + {
  3899. + case PLUS:
  3900. + func = "_Qp_add";
  3901. + break;
  3902. + case MINUS:
  3903. + func = "_Qp_sub";
  3904. + break;
  3905. + case MULT:
  3906. + func = "_Qp_mul";
  3907. + break;
  3908. + case DIV:
  3909. + func = "_Qp_div";
  3910. + break;
  3911. + default:
  3912. + gcc_unreachable ();
  3913. + }
  3914. +
  3915. + emit_soft_tfmode_libcall (func, 3, operands);
  3916. +}
  3917. +
  3918. +static void
  3919. +emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
  3920. +{
  3921. + const char *func;
  3922. +
  3923. + gcc_assert (code == SQRT);
  3924. + func = "_Qp_sqrt";
  3925. +
  3926. + emit_soft_tfmode_libcall (func, 2, operands);
  3927. +}
  3928. +
  3929. +static void
  3930. +emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
  3931. +{
  3932. + const char *func;
  3933. +
  3934. + switch (code)
  3935. + {
  3936. + case FLOAT_EXTEND:
  3937. + switch (GET_MODE (operands[1]))
  3938. + {
  3939. + case E_SFmode:
  3940. + func = "_Qp_stoq";
  3941. + break;
  3942. + case E_DFmode:
  3943. + func = "_Qp_dtoq";
  3944. + break;
  3945. + default:
  3946. + gcc_unreachable ();
  3947. + }
  3948. + break;
  3949. +
  3950. + case FLOAT_TRUNCATE:
  3951. + switch (GET_MODE (operands[0]))
  3952. + {
  3953. + case E_SFmode:
  3954. + func = "_Qp_qtos";
  3955. + break;
  3956. + case E_DFmode:
  3957. + func = "_Qp_qtod";
  3958. + break;
  3959. + default:
  3960. + gcc_unreachable ();
  3961. + }
  3962. + break;
  3963. +
  3964. + case FLOAT:
  3965. + switch (GET_MODE (operands[1]))
  3966. + {
  3967. + case E_SImode:
  3968. + func = "_Qp_itoq";
  3969. + if (TARGET_ARCH64)
  3970. + operands[1] = gen_rtx_SIGN_EXTEND (DImode, operands[1]);
  3971. + break;
  3972. + case E_DImode:
  3973. + func = "_Qp_xtoq";
  3974. + break;
  3975. + default:
  3976. + gcc_unreachable ();
  3977. + }
  3978. + break;
  3979. +
  3980. + case UNSIGNED_FLOAT:
  3981. + switch (GET_MODE (operands[1]))
  3982. + {
  3983. + case E_SImode:
  3984. + func = "_Qp_uitoq";
  3985. + if (TARGET_ARCH64)
  3986. + operands[1] = gen_rtx_ZERO_EXTEND (DImode, operands[1]);
  3987. + break;
  3988. + case E_DImode:
  3989. + func = "_Qp_uxtoq";
  3990. + break;
  3991. + default:
  3992. + gcc_unreachable ();
  3993. + }
  3994. + break;
  3995. +
  3996. + case FIX:
  3997. + switch (GET_MODE (operands[0]))
  3998. + {
  3999. + case E_SImode:
  4000. + func = "_Qp_qtoi";
  4001. + break;
  4002. + case E_DImode:
  4003. + func = "_Qp_qtox";
  4004. + break;
  4005. + default:
  4006. + gcc_unreachable ();
  4007. + }
  4008. + break;
  4009. +
  4010. + case UNSIGNED_FIX:
  4011. + switch (GET_MODE (operands[0]))
  4012. + {
  4013. + case E_SImode:
  4014. + func = "_Qp_qtoui";
  4015. + break;
  4016. + case E_DImode:
  4017. + func = "_Qp_qtoux";
  4018. + break;
  4019. + default:
  4020. + gcc_unreachable ();
  4021. + }
  4022. + break;
  4023. +
  4024. + default:
  4025. + gcc_unreachable ();
  4026. + }
  4027. +
  4028. + emit_soft_tfmode_libcall (func, 2, operands);
  4029. +}
  4030. +
  4031. +/* Expand a hard-float tfmode operation. All arguments must be in
  4032. + registers. */
  4033. +
  4034. +static void
  4035. +emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
  4036. +{
  4037. + rtx op, dest;
  4038. +
  4039. + if (GET_RTX_CLASS (code) == RTX_UNARY)
  4040. + {
  4041. + operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
  4042. + op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
  4043. + }
  4044. + else
  4045. + {
  4046. + operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
  4047. + operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
  4048. + op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
  4049. + operands[1], operands[2]);
  4050. + }
  4051. +
  4052. + if (register_operand (operands[0], VOIDmode))
  4053. + dest = operands[0];
  4054. + else
  4055. + dest = gen_reg_rtx (GET_MODE (operands[0]));
  4056. +
  4057. + emit_insn (gen_rtx_SET (dest, op));
  4058. +
  4059. + if (dest != operands[0])
  4060. + emit_move_insn (operands[0], dest);
  4061. +}
  4062. +
  4063. +void
  4064. +emit_tfmode_binop (enum rtx_code code, rtx *operands)
  4065. +{
  4066. + if (TARGET_HARD_QUAD)
  4067. + emit_hard_tfmode_operation (code, operands);
  4068. + else
  4069. + emit_soft_tfmode_binop (code, operands);
  4070. +}
  4071. +
  4072. +void
  4073. +emit_tfmode_unop (enum rtx_code code, rtx *operands)
  4074. +{
  4075. + if (TARGET_HARD_QUAD)
  4076. + emit_hard_tfmode_operation (code, operands);
  4077. + else
  4078. + emit_soft_tfmode_unop (code, operands);
  4079. +}
  4080. +
  4081. +void
  4082. +emit_tfmode_cvt (enum rtx_code code, rtx *operands)
  4083. +{
  4084. + if (TARGET_HARD_QUAD)
  4085. + emit_hard_tfmode_operation (code, operands);
  4086. + else
  4087. + emit_soft_tfmode_cvt (code, operands);
  4088. +}
  4089. +
  4090. +/* Return nonzero if a branch/jump/call instruction will be emitting
  4091. + nop into its delay slot. */
  4092. +
  4093. +int
  4094. +empty_delay_slot (rtx_insn *insn)
  4095. +{
  4096. + rtx seq;
  4097. +
  4098. + /* If no previous instruction (should not happen), return true. */
  4099. + if (PREV_INSN (insn) == NULL)
  4100. + return 1;
  4101. +
  4102. + seq = NEXT_INSN (PREV_INSN (insn));
  4103. + if (GET_CODE (PATTERN (seq)) == SEQUENCE)
  4104. + return 0;
  4105. +
  4106. + return 1;
  4107. +}
  4108. +
  4109. +/* Return nonzero if we should emit a nop after a cbcond instruction.
  4110. + The cbcond instruction does not have a delay slot, however there is
  4111. + a severe performance penalty if a control transfer appears right
  4112. + after a cbcond. Therefore we emit a nop when we detect this
  4113. + situation. */
  4114. +
  4115. +int
  4116. +emit_cbcond_nop (rtx_insn *insn)
  4117. +{
  4118. + rtx next = next_active_insn (insn);
  4119. +
  4120. + if (!next)
  4121. + return 1;
  4122. +
  4123. + if (NONJUMP_INSN_P (next)
  4124. + && GET_CODE (PATTERN (next)) == SEQUENCE)
  4125. + next = XVECEXP (PATTERN (next), 0, 0);
  4126. + else if (CALL_P (next)
  4127. + && GET_CODE (PATTERN (next)) == PARALLEL)
  4128. + {
  4129. + rtx delay = XVECEXP (PATTERN (next), 0, 1);
  4130. +
  4131. + if (GET_CODE (delay) == RETURN)
  4132. + {
  4133. + /* It's a sibling call. Do not emit the nop if we're going
  4134. + to emit something other than the jump itself as the first
  4135. + instruction of the sibcall sequence. */
  4136. + if (sparc_leaf_function_p || TARGET_FLAT)
  4137. + return 0;
  4138. + }
  4139. + }
  4140. +
  4141. + if (NONJUMP_INSN_P (next))
  4142. + return 0;
  4143. +
  4144. + return 1;
  4145. +}
  4146. +
  4147. +/* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
  4148. + instruction. RETURN_P is true if the v9 variant 'return' is to be
  4149. + considered in the test too.
  4150. +
  4151. + TRIAL must be a SET whose destination is a REG appropriate for the
  4152. + 'restore' instruction or, if RETURN_P is true, for the 'return'
  4153. + instruction. */
  4154. +
  4155. +static int
  4156. +eligible_for_restore_insn (rtx trial, bool return_p)
  4157. +{
  4158. + rtx pat = PATTERN (trial);
  4159. + rtx src = SET_SRC (pat);
  4160. + bool src_is_freg = false;
  4161. + rtx src_reg;
  4162. +
  4163. + /* Since we now can do moves between float and integer registers when
  4164. + VIS3 is enabled, we have to catch this case. We can allow such
  4165. + moves when doing a 'return' however. */
  4166. + src_reg = src;
  4167. + if (GET_CODE (src_reg) == SUBREG)
  4168. + src_reg = SUBREG_REG (src_reg);
  4169. + if (GET_CODE (src_reg) == REG
  4170. + && SPARC_FP_REG_P (REGNO (src_reg)))
  4171. + src_is_freg = true;
  4172. +
  4173. + /* The 'restore src,%g0,dest' pattern for word mode and below. */
  4174. + if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
  4175. + && arith_operand (src, GET_MODE (src))
  4176. + && ! src_is_freg)
  4177. + {
  4178. + if (TARGET_ARCH64)
  4179. + return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
  4180. + else
  4181. + return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
  4182. + }
  4183. +
  4184. + /* The 'restore src,%g0,dest' pattern for double-word mode. */
  4185. + else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
  4186. + && arith_double_operand (src, GET_MODE (src))
  4187. + && ! src_is_freg)
  4188. + return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
  4189. +
  4190. + /* The 'restore src,%g0,dest' pattern for float if no FPU. */
  4191. + else if (! TARGET_FPU && register_operand (src, SFmode))
  4192. + return 1;
  4193. +
  4194. + /* The 'restore src,%g0,dest' pattern for double if no FPU. */
  4195. + else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
  4196. + return 1;
  4197. +
  4198. + /* If we have the 'return' instruction, anything that does not use
  4199. + local or output registers and can go into a delay slot wins. */
  4200. + else if (return_p && TARGET_V9 && !epilogue_renumber (&pat, 1))
  4201. + return 1;
  4202. +
  4203. + /* The 'restore src1,src2,dest' pattern for SImode. */
  4204. + else if (GET_CODE (src) == PLUS
  4205. + && register_operand (XEXP (src, 0), SImode)
  4206. + && arith_operand (XEXP (src, 1), SImode))
  4207. + return 1;
  4208. +
  4209. + /* The 'restore src1,src2,dest' pattern for DImode. */
  4210. + else if (GET_CODE (src) == PLUS
  4211. + && register_operand (XEXP (src, 0), DImode)
  4212. + && arith_double_operand (XEXP (src, 1), DImode))
  4213. + return 1;
  4214. +
  4215. + /* The 'restore src1,%lo(src2),dest' pattern. */
  4216. + else if (GET_CODE (src) == LO_SUM
  4217. + && ! TARGET_CM_MEDMID
  4218. + && ((register_operand (XEXP (src, 0), SImode)
  4219. + && immediate_operand (XEXP (src, 1), SImode))
  4220. + || (TARGET_ARCH64
  4221. + && register_operand (XEXP (src, 0), DImode)
  4222. + && immediate_operand (XEXP (src, 1), DImode))))
  4223. + return 1;
  4224. +
  4225. + /* The 'restore src,src,dest' pattern. */
  4226. + else if (GET_CODE (src) == ASHIFT
  4227. + && (register_operand (XEXP (src, 0), SImode)
  4228. + || register_operand (XEXP (src, 0), DImode))
  4229. + && XEXP (src, 1) == const1_rtx)
  4230. + return 1;
  4231. +
  4232. + return 0;
  4233. +}
  4234. +
  4235. +/* Return nonzero if TRIAL can go into the function return's delay slot. */
  4236. +
  4237. +int
  4238. +eligible_for_return_delay (rtx_insn *trial)
  4239. +{
  4240. + int regno;
  4241. + rtx pat;
  4242. +
  4243. + /* If the function uses __builtin_eh_return, the eh_return machinery
  4244. + occupies the delay slot. */
  4245. + if (crtl->calls_eh_return)
  4246. + return 0;
  4247. +
  4248. + if (get_attr_in_branch_delay (trial) == IN_BRANCH_DELAY_FALSE)
  4249. + return 0;
  4250. +
  4251. + /* In the case of a leaf or flat function, anything can go into the slot. */
  4252. + if (sparc_leaf_function_p || TARGET_FLAT)
  4253. + return 1;
  4254. +
  4255. + if (!NONJUMP_INSN_P (trial))
  4256. + return 0;
  4257. +
  4258. + pat = PATTERN (trial);
  4259. + if (GET_CODE (pat) == PARALLEL)
  4260. + {
  4261. + int i;
  4262. +
  4263. + if (! TARGET_V9)
  4264. + return 0;
  4265. + for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
  4266. + {
  4267. + rtx expr = XVECEXP (pat, 0, i);
  4268. + if (GET_CODE (expr) != SET)
  4269. + return 0;
  4270. + if (GET_CODE (SET_DEST (expr)) != REG)
  4271. + return 0;
  4272. + regno = REGNO (SET_DEST (expr));
  4273. + if (regno >= 8 && regno < 24)
  4274. + return 0;
  4275. + }
  4276. + return !epilogue_renumber (&pat, 1);
  4277. + }
  4278. +
  4279. + if (GET_CODE (pat) != SET)
  4280. + return 0;
  4281. +
  4282. + if (GET_CODE (SET_DEST (pat)) != REG)
  4283. + return 0;
  4284. +
  4285. + regno = REGNO (SET_DEST (pat));
  4286. +
  4287. + /* Otherwise, only operations which can be done in tandem with
  4288. + a `restore' or `return' insn can go into the delay slot. */
  4289. + if (regno >= 8 && regno < 24)
  4290. + return 0;
  4291. +
  4292. + /* If this instruction sets up floating point register and we have a return
  4293. + instruction, it can probably go in. But restore will not work
  4294. + with FP_REGS. */
  4295. + if (! SPARC_INT_REG_P (regno))
  4296. + return TARGET_V9 && !epilogue_renumber (&pat, 1);
  4297. +
  4298. + return eligible_for_restore_insn (trial, true);
  4299. +}
  4300. +
  4301. +/* Return nonzero if TRIAL can go into the sibling call's delay slot. */
  4302. +
  4303. +int
  4304. +eligible_for_sibcall_delay (rtx_insn *trial)
  4305. +{
  4306. + rtx pat;
  4307. +
  4308. + if (get_attr_in_branch_delay (trial) == IN_BRANCH_DELAY_FALSE)
  4309. + return 0;
  4310. +
  4311. + if (!NONJUMP_INSN_P (trial))
  4312. + return 0;
  4313. +
  4314. + pat = PATTERN (trial);
  4315. +
  4316. + if (sparc_leaf_function_p || TARGET_FLAT)
  4317. + {
  4318. + /* If the tail call is done using the call instruction,
  4319. + we have to restore %o7 in the delay slot. */
  4320. + if (LEAF_SIBCALL_SLOT_RESERVED_P)
  4321. + return 0;
  4322. +
  4323. + /* %g1 is used to build the function address */
  4324. + if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
  4325. + return 0;
  4326. +
  4327. + return 1;
  4328. + }
  4329. +
  4330. + if (GET_CODE (pat) != SET)
  4331. + return 0;
  4332. +
  4333. + /* Otherwise, only operations which can be done in tandem with
  4334. + a `restore' insn can go into the delay slot. */
  4335. + if (GET_CODE (SET_DEST (pat)) != REG
  4336. + || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
  4337. + || ! SPARC_INT_REG_P (REGNO (SET_DEST (pat))))
  4338. + return 0;
  4339. +
  4340. + /* If it mentions %o7, it can't go in, because sibcall will clobber it
  4341. + in most cases. */
  4342. + if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
  4343. + return 0;
  4344. +
  4345. + return eligible_for_restore_insn (trial, false);
  4346. +}
  4347. +
  4348. +/* Determine if it's legal to put X into the constant pool. This
  4349. + is not possible if X contains the address of a symbol that is
  4350. + not constant (TLS) or not known at final link time (PIC). */
  4351. +
  4352. +static bool
  4353. +sparc_cannot_force_const_mem (machine_mode mode, rtx x)
  4354. +{
  4355. + switch (GET_CODE (x))
  4356. + {
  4357. + case CONST_INT:
  4358. + case CONST_WIDE_INT:
  4359. + case CONST_DOUBLE:
  4360. + case CONST_VECTOR:
  4361. + /* Accept all non-symbolic constants. */
  4362. + return false;
  4363. +
  4364. + case LABEL_REF:
  4365. + /* Labels are OK iff we are non-PIC. */
  4366. + return flag_pic != 0;
  4367. +
  4368. + case SYMBOL_REF:
  4369. + /* 'Naked' TLS symbol references are never OK,
  4370. + non-TLS symbols are OK iff we are non-PIC. */
  4371. + if (SYMBOL_REF_TLS_MODEL (x))
  4372. + return true;
  4373. + else
  4374. + return flag_pic != 0;
  4375. +
  4376. + case CONST:
  4377. + return sparc_cannot_force_const_mem (mode, XEXP (x, 0));
  4378. + case PLUS:
  4379. + case MINUS:
  4380. + return sparc_cannot_force_const_mem (mode, XEXP (x, 0))
  4381. + || sparc_cannot_force_const_mem (mode, XEXP (x, 1));
  4382. + case UNSPEC:
  4383. + return true;
  4384. + default:
  4385. + gcc_unreachable ();
  4386. + }
  4387. +}
  4388. +
  4389. +/* Global Offset Table support. */
  4390. +static GTY(()) rtx got_symbol_rtx = NULL_RTX;
  4391. +static GTY(()) rtx got_register_rtx = NULL_RTX;
  4392. +static GTY(()) rtx got_helper_rtx = NULL_RTX;
  4393. +
  4394. +static GTY(()) bool got_helper_needed = false;
  4395. +
  4396. +/* Return the SYMBOL_REF for the Global Offset Table. */
  4397. +
  4398. +static rtx
  4399. +sparc_got (void)
  4400. +{
  4401. + if (!got_symbol_rtx)
  4402. + got_symbol_rtx = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
  4403. +
  4404. + return got_symbol_rtx;
  4405. +}
  4406. +
  4407. +/* Wrapper around the load_pcrel_sym{si,di} patterns. */
  4408. +
  4409. +static rtx
  4410. +gen_load_pcrel_sym (rtx op0, rtx op1, rtx op2)
  4411. +{
  4412. + int orig_flag_pic = flag_pic;
  4413. + rtx insn;
  4414. +
  4415. + /* The load_pcrel_sym{si,di} patterns require absolute addressing. */
  4416. + flag_pic = 0;
  4417. + if (TARGET_ARCH64)
  4418. + insn = gen_load_pcrel_symdi (op0, op1, op2, GEN_INT (REGNO (op0)));
  4419. + else
  4420. + insn = gen_load_pcrel_symsi (op0, op1, op2, GEN_INT (REGNO (op0)));
  4421. + flag_pic = orig_flag_pic;
  4422. +
  4423. + return insn;
  4424. +}
  4425. +
  4426. +/* Output the load_pcrel_sym{si,di} patterns. */
  4427. +
  4428. +const char *
  4429. +output_load_pcrel_sym (rtx *operands)
  4430. +{
  4431. + if (flag_delayed_branch)
  4432. + {
  4433. + output_asm_insn ("sethi\t%%hi(%a1-4), %0", operands);
  4434. + output_asm_insn ("call\t%a2", operands);
  4435. + output_asm_insn (" add\t%0, %%lo(%a1+4), %0", operands);
  4436. + }
  4437. + else
  4438. + {
  4439. + output_asm_insn ("sethi\t%%hi(%a1-8), %0", operands);
  4440. + output_asm_insn ("add\t%0, %%lo(%a1-4), %0", operands);
  4441. + output_asm_insn ("call\t%a2", operands);
  4442. + output_asm_insn (" nop", NULL);
  4443. + }
  4444. +
  4445. + if (operands[2] == got_helper_rtx)
  4446. + got_helper_needed = true;
  4447. +
  4448. + return "";
  4449. +}
  4450. +
  4451. +#ifdef HAVE_GAS_HIDDEN
  4452. +# define USE_HIDDEN_LINKONCE 1
  4453. +#else
  4454. +# define USE_HIDDEN_LINKONCE 0
  4455. +#endif
  4456. +
  4457. +/* Emit code to load the GOT register. */
  4458. +
  4459. +void
  4460. +load_got_register (void)
  4461. +{
  4462. + rtx insn;
  4463. +
  4464. + if (TARGET_VXWORKS_RTP)
  4465. + {
  4466. + if (!got_register_rtx)
  4467. + got_register_rtx = pic_offset_table_rtx;
  4468. +
  4469. + insn = gen_vxworks_load_got ();
  4470. + }
  4471. + else
  4472. + {
  4473. + if (!got_register_rtx)
  4474. + got_register_rtx = gen_rtx_REG (Pmode, GLOBAL_OFFSET_TABLE_REGNUM);
  4475. +
  4476. + /* The GOT symbol is subject to a PC-relative relocation so we need a
  4477. + helper function to add the PC value and thus get the final value. */
  4478. + if (!got_helper_rtx)
  4479. + {
  4480. + char name[32];
  4481. +
  4482. + /* Skip the leading '%' as that cannot be used in a symbol name. */
  4483. + if (USE_HIDDEN_LINKONCE)
  4484. + sprintf (name, "__sparc_get_pc_thunk.%s",
  4485. + reg_names[REGNO (got_register_rtx)] + 1);
  4486. + else
  4487. + ASM_GENERATE_INTERNAL_LABEL (name, "LADDPC",
  4488. + REGNO (got_register_rtx));
  4489. +
  4490. + got_helper_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
  4491. + }
  4492. +
  4493. + insn
  4494. + = gen_load_pcrel_sym (got_register_rtx, sparc_got (), got_helper_rtx);
  4495. + }
  4496. +
  4497. + emit_insn (insn);
  4498. +}
  4499. +
  4500. +/* Ensure that we are not using patterns that are not OK with PIC. */
  4501. +
  4502. +int
  4503. +check_pic (int i)
  4504. +{
  4505. + rtx op;
  4506. +
  4507. + switch (flag_pic)
  4508. + {
  4509. + case 1:
  4510. + op = recog_data.operand[i];
  4511. + gcc_assert (GET_CODE (op) != SYMBOL_REF
  4512. + && (GET_CODE (op) != CONST
  4513. + || (GET_CODE (XEXP (op, 0)) == MINUS
  4514. + && XEXP (XEXP (op, 0), 0) == sparc_got ()
  4515. + && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST)));
  4516. + /* fallthrough */
  4517. + case 2:
  4518. + default:
  4519. + return 1;
  4520. + }
  4521. +}
  4522. +
  4523. +/* Return true if X is an address which needs a temporary register when
  4524. + reloaded while generating PIC code. */
  4525. +
  4526. +int
  4527. +pic_address_needs_scratch (rtx x)
  4528. +{
  4529. + /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
  4530. + if (GET_CODE (x) == CONST
  4531. + && GET_CODE (XEXP (x, 0)) == PLUS
  4532. + && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
  4533. + && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
  4534. + && !SMALL_INT (XEXP (XEXP (x, 0), 1)))
  4535. + return 1;
  4536. +
  4537. + return 0;
  4538. +}
  4539. +
  4540. +/* Determine if a given RTX is a valid constant. We already know this
  4541. + satisfies CONSTANT_P. */
  4542. +
  4543. +static bool
  4544. +sparc_legitimate_constant_p (machine_mode mode, rtx x)
  4545. +{
  4546. + switch (GET_CODE (x))
  4547. + {
  4548. + case CONST:
  4549. + case SYMBOL_REF:
  4550. + if (sparc_tls_referenced_p (x))
  4551. + return false;
  4552. + break;
  4553. +
  4554. + case CONST_DOUBLE:
  4555. + /* Floating point constants are generally not ok.
  4556. + The only exception is 0.0 and all-ones in VIS. */
  4557. + if (TARGET_VIS
  4558. + && SCALAR_FLOAT_MODE_P (mode)
  4559. + && (const_zero_operand (x, mode)
  4560. + || const_all_ones_operand (x, mode)))
  4561. + return true;
  4562. +
  4563. + return false;
  4564. +
  4565. + case CONST_VECTOR:
  4566. + /* Vector constants are generally not ok.
  4567. + The only exception is 0 or -1 in VIS. */
  4568. + if (TARGET_VIS
  4569. + && (const_zero_operand (x, mode)
  4570. + || const_all_ones_operand (x, mode)))
  4571. + return true;
  4572. +
  4573. + return false;
  4574. +
  4575. + default:
  4576. + break;
  4577. + }
  4578. +
  4579. + return true;
  4580. +}
  4581. +
  4582. +/* Determine if a given RTX is a valid constant address. */
  4583. +
  4584. +bool
  4585. +constant_address_p (rtx x)
  4586. +{
  4587. + switch (GET_CODE (x))
  4588. + {
  4589. + case LABEL_REF:
  4590. + case CONST_INT:
  4591. + case HIGH:
  4592. + return true;
  4593. +
  4594. + case CONST:
  4595. + if (flag_pic && pic_address_needs_scratch (x))
  4596. + return false;
  4597. + return sparc_legitimate_constant_p (Pmode, x);
  4598. +
  4599. + case SYMBOL_REF:
  4600. + return !flag_pic && sparc_legitimate_constant_p (Pmode, x);
  4601. +
  4602. + default:
  4603. + return false;
  4604. + }
  4605. +}
  4606. +
  4607. +/* Nonzero if the constant value X is a legitimate general operand
  4608. + when generating PIC code. It is given that flag_pic is on and
  4609. + that X satisfies CONSTANT_P. */
  4610. +
  4611. +bool
  4612. +legitimate_pic_operand_p (rtx x)
  4613. +{
  4614. + if (pic_address_needs_scratch (x))
  4615. + return false;
  4616. + if (sparc_tls_referenced_p (x))
  4617. + return false;
  4618. + return true;
  4619. +}
  4620. +
  4621. +/* Return true if X is a representation of the PIC register. */
  4622. +
  4623. +static bool
  4624. +sparc_pic_register_p (rtx x)
  4625. +{
  4626. + if (!REG_P (x) || !pic_offset_table_rtx)
  4627. + return false;
  4628. +
  4629. + if (x == pic_offset_table_rtx)
  4630. + return true;
  4631. +
  4632. + if (!HARD_REGISTER_P (pic_offset_table_rtx)
  4633. + && (HARD_REGISTER_P (x) || lra_in_progress || reload_in_progress)
  4634. + && ORIGINAL_REGNO (x) == REGNO (pic_offset_table_rtx))
  4635. + return true;
  4636. +
  4637. + return false;
  4638. +}
  4639. +
  4640. +#define RTX_OK_FOR_OFFSET_P(X, MODE) \
  4641. + (CONST_INT_P (X) \
  4642. + && INTVAL (X) >= -0x1000 \
  4643. + && INTVAL (X) <= (0x1000 - GET_MODE_SIZE (MODE)))
  4644. +
  4645. +#define RTX_OK_FOR_OLO10_P(X, MODE) \
  4646. + (CONST_INT_P (X) \
  4647. + && INTVAL (X) >= -0x1000 \
  4648. + && INTVAL (X) <= (0xc00 - GET_MODE_SIZE (MODE)))
  4649. +
  4650. +/* Handle the TARGET_LEGITIMATE_ADDRESS_P target hook.
  4651. +
  4652. + On SPARC, the actual legitimate addresses must be REG+REG or REG+SMALLINT
  4653. + ordinarily. This changes a bit when generating PIC. */
  4654. +
  4655. +static bool
  4656. +sparc_legitimate_address_p (machine_mode mode, rtx addr, bool strict)
  4657. +{
  4658. + rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
  4659. +
  4660. + if (REG_P (addr) || GET_CODE (addr) == SUBREG)
  4661. + rs1 = addr;
  4662. + else if (GET_CODE (addr) == PLUS)
  4663. + {
  4664. + rs1 = XEXP (addr, 0);
  4665. + rs2 = XEXP (addr, 1);
  4666. +
  4667. + /* Canonicalize. REG comes first, if there are no regs,
  4668. + LO_SUM comes first. */
  4669. + if (!REG_P (rs1)
  4670. + && GET_CODE (rs1) != SUBREG
  4671. + && (REG_P (rs2)
  4672. + || GET_CODE (rs2) == SUBREG
  4673. + || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
  4674. + {
  4675. + rs1 = XEXP (addr, 1);
  4676. + rs2 = XEXP (addr, 0);
  4677. + }
  4678. +
  4679. + if ((flag_pic == 1
  4680. + && sparc_pic_register_p (rs1)
  4681. + && !REG_P (rs2)
  4682. + && GET_CODE (rs2) != SUBREG
  4683. + && GET_CODE (rs2) != LO_SUM
  4684. + && GET_CODE (rs2) != MEM
  4685. + && !(GET_CODE (rs2) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs2))
  4686. + && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
  4687. + && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
  4688. + || ((REG_P (rs1)
  4689. + || GET_CODE (rs1) == SUBREG)
  4690. + && RTX_OK_FOR_OFFSET_P (rs2, mode)))
  4691. + {
  4692. + imm1 = rs2;
  4693. + rs2 = NULL;
  4694. + }
  4695. + else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
  4696. + && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
  4697. + {
  4698. + /* We prohibit REG + REG for TFmode when there are no quad move insns
  4699. + and we consequently need to split. We do this because REG+REG
  4700. + is not an offsettable address. If we get the situation in reload
  4701. + where source and destination of a movtf pattern are both MEMs with
  4702. + REG+REG address, then only one of them gets converted to an
  4703. + offsettable address. */
  4704. + if (mode == TFmode
  4705. + && ! (TARGET_ARCH64 && TARGET_HARD_QUAD))
  4706. + return 0;
  4707. +
  4708. + /* Likewise for TImode, but in all cases. */
  4709. + if (mode == TImode)
  4710. + return 0;
  4711. +
  4712. + /* We prohibit REG + REG on ARCH32 if not optimizing for
  4713. + DFmode/DImode because then mem_min_alignment is likely to be zero
  4714. + after reload and the forced split would lack a matching splitter
  4715. + pattern. */
  4716. + if (TARGET_ARCH32 && !optimize
  4717. + && (mode == DFmode || mode == DImode))
  4718. + return 0;
  4719. + }
  4720. + else if (USE_AS_OFFSETABLE_LO10
  4721. + && GET_CODE (rs1) == LO_SUM
  4722. + && TARGET_ARCH64
  4723. + && ! TARGET_CM_MEDMID
  4724. + && RTX_OK_FOR_OLO10_P (rs2, mode))
  4725. + {
  4726. + rs2 = NULL;
  4727. + imm1 = XEXP (rs1, 1);
  4728. + rs1 = XEXP (rs1, 0);
  4729. + if (!CONSTANT_P (imm1)
  4730. + || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
  4731. + return 0;
  4732. + }
  4733. + }
  4734. + else if (GET_CODE (addr) == LO_SUM)
  4735. + {
  4736. + rs1 = XEXP (addr, 0);
  4737. + imm1 = XEXP (addr, 1);
  4738. +
  4739. + if (!CONSTANT_P (imm1)
  4740. + || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
  4741. + return 0;
  4742. +
  4743. + /* We can't allow TFmode in 32-bit mode, because an offset greater
  4744. + than the alignment (8) may cause the LO_SUM to overflow. */
  4745. + if (mode == TFmode && TARGET_ARCH32)
  4746. + return 0;
  4747. +
  4748. + /* During reload, accept the HIGH+LO_SUM construct generated by
  4749. + sparc_legitimize_reload_address. */
  4750. + if (reload_in_progress
  4751. + && GET_CODE (rs1) == HIGH
  4752. + && XEXP (rs1, 0) == imm1)
  4753. + return 1;
  4754. + }
  4755. + else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
  4756. + return 1;
  4757. + else
  4758. + return 0;
  4759. +
  4760. + if (GET_CODE (rs1) == SUBREG)
  4761. + rs1 = SUBREG_REG (rs1);
  4762. + if (!REG_P (rs1))
  4763. + return 0;
  4764. +
  4765. + if (rs2)
  4766. + {
  4767. + if (GET_CODE (rs2) == SUBREG)
  4768. + rs2 = SUBREG_REG (rs2);
  4769. + if (!REG_P (rs2))
  4770. + return 0;
  4771. + }
  4772. +
  4773. + if (strict)
  4774. + {
  4775. + if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
  4776. + || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
  4777. + return 0;
  4778. + }
  4779. + else
  4780. + {
  4781. + if ((! SPARC_INT_REG_P (REGNO (rs1))
  4782. + && REGNO (rs1) != FRAME_POINTER_REGNUM
  4783. + && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
  4784. + || (rs2
  4785. + && (! SPARC_INT_REG_P (REGNO (rs2))
  4786. + && REGNO (rs2) != FRAME_POINTER_REGNUM
  4787. + && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
  4788. + return 0;
  4789. + }
  4790. + return 1;
  4791. +}
  4792. +
  4793. +/* Return the SYMBOL_REF for the tls_get_addr function. */
  4794. +
  4795. +static GTY(()) rtx sparc_tls_symbol = NULL_RTX;
  4796. +
  4797. +static rtx
  4798. +sparc_tls_get_addr (void)
  4799. +{
  4800. + if (!sparc_tls_symbol)
  4801. + sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
  4802. +
  4803. + return sparc_tls_symbol;
  4804. +}
  4805. +
  4806. +/* Return the Global Offset Table to be used in TLS mode. */
  4807. +
  4808. +static rtx
  4809. +sparc_tls_got (void)
  4810. +{
  4811. + /* In PIC mode, this is just the PIC offset table. */
  4812. + if (flag_pic)
  4813. + {
  4814. + crtl->uses_pic_offset_table = 1;
  4815. + return pic_offset_table_rtx;
  4816. + }
  4817. +
  4818. + /* In non-PIC mode, Sun as (unlike GNU as) emits PC-relative relocations for
  4819. + the GOT symbol with the 32-bit ABI, so we reload the GOT register. */
  4820. + if (TARGET_SUN_TLS && TARGET_ARCH32)
  4821. + {
  4822. + load_got_register ();
  4823. + return got_register_rtx;
  4824. + }
  4825. +
  4826. + /* In all other cases, we load a new pseudo with the GOT symbol. */
  4827. + return copy_to_reg (sparc_got ());
  4828. +}
  4829. +
  4830. +/* Return true if X contains a thread-local symbol. */
  4831. +
  4832. +static bool
  4833. +sparc_tls_referenced_p (rtx x)
  4834. +{
  4835. + if (!TARGET_HAVE_TLS)
  4836. + return false;
  4837. +
  4838. + if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS)
  4839. + x = XEXP (XEXP (x, 0), 0);
  4840. +
  4841. + if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x))
  4842. + return true;
  4843. +
  4844. + /* That's all we handle in sparc_legitimize_tls_address for now. */
  4845. + return false;
  4846. +}
  4847. +
  4848. +/* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
  4849. + this (thread-local) address. */
  4850. +
  4851. +static rtx
  4852. +sparc_legitimize_tls_address (rtx addr)
  4853. +{
  4854. + rtx temp1, temp2, temp3, ret, o0, got;
  4855. + rtx_insn *insn;
  4856. +
  4857. + gcc_assert (can_create_pseudo_p ());
  4858. +
  4859. + if (GET_CODE (addr) == SYMBOL_REF)
  4860. + /* Although the various sethi/or sequences generate SImode values, many of
  4861. + them can be transformed by the linker when relaxing and, if relaxing to
  4862. + local-exec, will become a sethi/xor pair, which is signed and therefore
  4863. + a full DImode value in 64-bit mode. Thus we must use Pmode, lest these
  4864. + values be spilled onto the stack in 64-bit mode. */
  4865. + switch (SYMBOL_REF_TLS_MODEL (addr))
  4866. + {
  4867. + case TLS_MODEL_GLOBAL_DYNAMIC:
  4868. + start_sequence ();
  4869. + temp1 = gen_reg_rtx (Pmode);
  4870. + temp2 = gen_reg_rtx (Pmode);
  4871. + ret = gen_reg_rtx (Pmode);
  4872. + o0 = gen_rtx_REG (Pmode, 8);
  4873. + got = sparc_tls_got ();
  4874. + if (TARGET_ARCH32)
  4875. + {
  4876. + emit_insn (gen_tgd_hi22si (temp1, addr));
  4877. + emit_insn (gen_tgd_lo10si (temp2, temp1, addr));
  4878. + emit_insn (gen_tgd_addsi (o0, got, temp2, addr));
  4879. + insn = emit_call_insn (gen_tgd_callsi (o0, sparc_tls_get_addr (),
  4880. + addr, const1_rtx));
  4881. + }
  4882. + else
  4883. + {
  4884. + emit_insn (gen_tgd_hi22di (temp1, addr));
  4885. + emit_insn (gen_tgd_lo10di (temp2, temp1, addr));
  4886. + emit_insn (gen_tgd_adddi (o0, got, temp2, addr));
  4887. + insn = emit_call_insn (gen_tgd_calldi (o0, sparc_tls_get_addr (),
  4888. + addr, const1_rtx));
  4889. + }
  4890. + use_reg (&CALL_INSN_FUNCTION_USAGE (insn), o0);
  4891. + RTL_CONST_CALL_P (insn) = 1;
  4892. + insn = get_insns ();
  4893. + end_sequence ();
  4894. + emit_libcall_block (insn, ret, o0, addr);
  4895. + break;
  4896. +
  4897. + case TLS_MODEL_LOCAL_DYNAMIC:
  4898. + start_sequence ();
  4899. + temp1 = gen_reg_rtx (Pmode);
  4900. + temp2 = gen_reg_rtx (Pmode);
  4901. + temp3 = gen_reg_rtx (Pmode);
  4902. + ret = gen_reg_rtx (Pmode);
  4903. + o0 = gen_rtx_REG (Pmode, 8);
  4904. + got = sparc_tls_got ();
  4905. + if (TARGET_ARCH32)
  4906. + {
  4907. + emit_insn (gen_tldm_hi22si (temp1));
  4908. + emit_insn (gen_tldm_lo10si (temp2, temp1));
  4909. + emit_insn (gen_tldm_addsi (o0, got, temp2));
  4910. + insn = emit_call_insn (gen_tldm_callsi (o0, sparc_tls_get_addr (),
  4911. + const1_rtx));
  4912. + }
  4913. + else
  4914. + {
  4915. + emit_insn (gen_tldm_hi22di (temp1));
  4916. + emit_insn (gen_tldm_lo10di (temp2, temp1));
  4917. + emit_insn (gen_tldm_adddi (o0, got, temp2));
  4918. + insn = emit_call_insn (gen_tldm_calldi (o0, sparc_tls_get_addr (),
  4919. + const1_rtx));
  4920. + }
  4921. + use_reg (&CALL_INSN_FUNCTION_USAGE (insn), o0);
  4922. + RTL_CONST_CALL_P (insn) = 1;
  4923. + insn = get_insns ();
  4924. + end_sequence ();
  4925. + /* Attach a unique REG_EQUAL, to allow the RTL optimizers to
  4926. + share the LD_BASE result with other LD model accesses. */
  4927. + emit_libcall_block (insn, temp3, o0,
  4928. + gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
  4929. + UNSPEC_TLSLD_BASE));
  4930. + temp1 = gen_reg_rtx (Pmode);
  4931. + temp2 = gen_reg_rtx (Pmode);
  4932. + if (TARGET_ARCH32)
  4933. + {
  4934. + emit_insn (gen_tldo_hix22si (temp1, addr));
  4935. + emit_insn (gen_tldo_lox10si (temp2, temp1, addr));
  4936. + emit_insn (gen_tldo_addsi (ret, temp3, temp2, addr));
  4937. + }
  4938. + else
  4939. + {
  4940. + emit_insn (gen_tldo_hix22di (temp1, addr));
  4941. + emit_insn (gen_tldo_lox10di (temp2, temp1, addr));
  4942. + emit_insn (gen_tldo_adddi (ret, temp3, temp2, addr));
  4943. + }
  4944. + break;
  4945. +
  4946. + case TLS_MODEL_INITIAL_EXEC:
  4947. + temp1 = gen_reg_rtx (Pmode);
  4948. + temp2 = gen_reg_rtx (Pmode);
  4949. + temp3 = gen_reg_rtx (Pmode);
  4950. + got = sparc_tls_got ();
  4951. + if (TARGET_ARCH32)
  4952. + {
  4953. + emit_insn (gen_tie_hi22si (temp1, addr));
  4954. + emit_insn (gen_tie_lo10si (temp2, temp1, addr));
  4955. + emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
  4956. + }
  4957. + else
  4958. + {
  4959. + emit_insn (gen_tie_hi22di (temp1, addr));
  4960. + emit_insn (gen_tie_lo10di (temp2, temp1, addr));
  4961. + emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
  4962. + }
  4963. + if (TARGET_SUN_TLS)
  4964. + {
  4965. + ret = gen_reg_rtx (Pmode);
  4966. + if (TARGET_ARCH32)
  4967. + emit_insn (gen_tie_addsi (ret, gen_rtx_REG (Pmode, 7),
  4968. + temp3, addr));
  4969. + else
  4970. + emit_insn (gen_tie_adddi (ret, gen_rtx_REG (Pmode, 7),
  4971. + temp3, addr));
  4972. + }
  4973. + else
  4974. + ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
  4975. + break;
  4976. +
  4977. + case TLS_MODEL_LOCAL_EXEC:
  4978. + temp1 = gen_reg_rtx (Pmode);
  4979. + temp2 = gen_reg_rtx (Pmode);
  4980. + if (TARGET_ARCH32)
  4981. + {
  4982. + emit_insn (gen_tle_hix22si (temp1, addr));
  4983. + emit_insn (gen_tle_lox10si (temp2, temp1, addr));
  4984. + }
  4985. + else
  4986. + {
  4987. + emit_insn (gen_tle_hix22di (temp1, addr));
  4988. + emit_insn (gen_tle_lox10di (temp2, temp1, addr));
  4989. + }
  4990. + ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
  4991. + break;
  4992. +
  4993. + default:
  4994. + gcc_unreachable ();
  4995. + }
  4996. +
  4997. + else if (GET_CODE (addr) == CONST)
  4998. + {
  4999. + rtx base, offset;
  5000. +
  5001. + gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS);
  5002. +
  5003. + base = sparc_legitimize_tls_address (XEXP (XEXP (addr, 0), 0));
  5004. + offset = XEXP (XEXP (addr, 0), 1);
  5005. +
  5006. + base = force_operand (base, NULL_RTX);
  5007. + if (!(GET_CODE (offset) == CONST_INT && SMALL_INT (offset)))
  5008. + offset = force_reg (Pmode, offset);
  5009. + ret = gen_rtx_PLUS (Pmode, base, offset);
  5010. + }
  5011. +
  5012. + else
  5013. + gcc_unreachable (); /* for now ... */
  5014. +
  5015. + return ret;
  5016. +}
  5017. +
  5018. +/* Legitimize PIC addresses. If the address is already position-independent,
  5019. + we return ORIG. Newly generated position-independent addresses go into a
  5020. + reg. This is REG if nonzero, otherwise we allocate register(s) as
  5021. + necessary. */
  5022. +
  5023. +static rtx
  5024. +sparc_legitimize_pic_address (rtx orig, rtx reg)
  5025. +{
  5026. + if (GET_CODE (orig) == SYMBOL_REF
  5027. + /* See the comment in sparc_expand_move. */
  5028. + || (GET_CODE (orig) == LABEL_REF && !can_use_mov_pic_label_ref (orig)))
  5029. + {
  5030. + bool gotdata_op = false;
  5031. + rtx pic_ref, address;
  5032. + rtx_insn *insn;
  5033. +
  5034. + if (!reg)
  5035. + {
  5036. + gcc_assert (can_create_pseudo_p ());
  5037. + reg = gen_reg_rtx (Pmode);
  5038. + }
  5039. +
  5040. + if (flag_pic == 2)
  5041. + {
  5042. + /* If not during reload, allocate another temp reg here for loading
  5043. + in the address, so that these instructions can be optimized
  5044. + properly. */
  5045. + rtx temp_reg = can_create_pseudo_p () ? gen_reg_rtx (Pmode) : reg;
  5046. +
  5047. + /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
  5048. + won't get confused into thinking that these two instructions
  5049. + are loading in the true address of the symbol. If in the
  5050. + future a PIC rtx exists, that should be used instead. */
  5051. + if (TARGET_ARCH64)
  5052. + {
  5053. + emit_insn (gen_movdi_high_pic (temp_reg, orig));
  5054. + emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
  5055. + }
  5056. + else
  5057. + {
  5058. + emit_insn (gen_movsi_high_pic (temp_reg, orig));
  5059. + emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
  5060. + }
  5061. +
  5062. + address = temp_reg;
  5063. + gotdata_op = true;
  5064. + }
  5065. + else
  5066. + address = orig;
  5067. +
  5068. + crtl->uses_pic_offset_table = 1;
  5069. + if (gotdata_op)
  5070. + {
  5071. + if (TARGET_ARCH64)
  5072. + insn = emit_insn (gen_movdi_pic_gotdata_op (reg,
  5073. + pic_offset_table_rtx,
  5074. + address, orig));
  5075. + else
  5076. + insn = emit_insn (gen_movsi_pic_gotdata_op (reg,
  5077. + pic_offset_table_rtx,
  5078. + address, orig));
  5079. + }
  5080. + else
  5081. + {
  5082. + pic_ref
  5083. + = gen_const_mem (Pmode,
  5084. + gen_rtx_PLUS (Pmode,
  5085. + pic_offset_table_rtx, address));
  5086. + insn = emit_move_insn (reg, pic_ref);
  5087. + }
  5088. +
  5089. + /* Put a REG_EQUAL note on this insn, so that it can be optimized
  5090. + by loop. */
  5091. + set_unique_reg_note (insn, REG_EQUAL, orig);
  5092. + return reg;
  5093. + }
  5094. + else if (GET_CODE (orig) == CONST)
  5095. + {
  5096. + rtx base, offset;
  5097. +
  5098. + if (GET_CODE (XEXP (orig, 0)) == PLUS
  5099. + && sparc_pic_register_p (XEXP (XEXP (orig, 0), 0)))
  5100. + return orig;
  5101. +
  5102. + if (!reg)
  5103. + {
  5104. + gcc_assert (can_create_pseudo_p ());
  5105. + reg = gen_reg_rtx (Pmode);
  5106. + }
  5107. +
  5108. + gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
  5109. + base = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 0), reg);
  5110. + offset = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
  5111. + base == reg ? NULL_RTX : reg);
  5112. +
  5113. + if (GET_CODE (offset) == CONST_INT)
  5114. + {
  5115. + if (SMALL_INT (offset))
  5116. + return plus_constant (Pmode, base, INTVAL (offset));
  5117. + else if (can_create_pseudo_p ())
  5118. + offset = force_reg (Pmode, offset);
  5119. + else
  5120. + /* If we reach here, then something is seriously wrong. */
  5121. + gcc_unreachable ();
  5122. + }
  5123. + return gen_rtx_PLUS (Pmode, base, offset);
  5124. + }
  5125. + else if (GET_CODE (orig) == LABEL_REF)
  5126. + /* ??? We ought to be checking that the register is live instead, in case
  5127. + it is eliminated. */
  5128. + crtl->uses_pic_offset_table = 1;
  5129. +
  5130. + return orig;
  5131. +}
  5132. +
  5133. +/* Try machine-dependent ways of modifying an illegitimate address X
  5134. + to be legitimate. If we find one, return the new, valid address.
  5135. +
  5136. + OLDX is the address as it was before break_out_memory_refs was called.
  5137. + In some cases it is useful to look at this to decide what needs to be done.
  5138. +
  5139. + MODE is the mode of the operand pointed to by X.
  5140. +
  5141. + On SPARC, change REG+N into REG+REG, and REG+(X*Y) into REG+REG. */
  5142. +
  5143. +static rtx
  5144. +sparc_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
  5145. + machine_mode mode)
  5146. +{
  5147. + rtx orig_x = x;
  5148. +
  5149. + if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
  5150. + x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
  5151. + force_operand (XEXP (x, 0), NULL_RTX));
  5152. + if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
  5153. + x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
  5154. + force_operand (XEXP (x, 1), NULL_RTX));
  5155. + if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
  5156. + x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
  5157. + XEXP (x, 1));
  5158. + if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
  5159. + x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
  5160. + force_operand (XEXP (x, 1), NULL_RTX));
  5161. +
  5162. + if (x != orig_x && sparc_legitimate_address_p (mode, x, FALSE))
  5163. + return x;
  5164. +
  5165. + if (sparc_tls_referenced_p (x))
  5166. + x = sparc_legitimize_tls_address (x);
  5167. + else if (flag_pic)
  5168. + x = sparc_legitimize_pic_address (x, NULL_RTX);
  5169. + else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
  5170. + x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
  5171. + copy_to_mode_reg (Pmode, XEXP (x, 1)));
  5172. + else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
  5173. + x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
  5174. + copy_to_mode_reg (Pmode, XEXP (x, 0)));
  5175. + else if (GET_CODE (x) == SYMBOL_REF
  5176. + || GET_CODE (x) == CONST
  5177. + || GET_CODE (x) == LABEL_REF)
  5178. + x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
  5179. +
  5180. + return x;
  5181. +}
  5182. +
  5183. +/* Delegitimize an address that was legitimized by the above function. */
  5184. +
  5185. +static rtx
  5186. +sparc_delegitimize_address (rtx x)
  5187. +{
  5188. + x = delegitimize_mem_from_attrs (x);
  5189. +
  5190. + if (GET_CODE (x) == LO_SUM)
  5191. + x = XEXP (x, 1);
  5192. +
  5193. + if (GET_CODE (x) == UNSPEC)
  5194. + switch (XINT (x, 1))
  5195. + {
  5196. + case UNSPEC_MOVE_PIC:
  5197. + case UNSPEC_TLSLE:
  5198. + x = XVECEXP (x, 0, 0);
  5199. + gcc_assert (GET_CODE (x) == SYMBOL_REF);
  5200. + break;
  5201. + case UNSPEC_MOVE_GOTDATA:
  5202. + x = XVECEXP (x, 0, 2);
  5203. + gcc_assert (GET_CODE (x) == SYMBOL_REF);
  5204. + break;
  5205. + default:
  5206. + break;
  5207. + }
  5208. +
  5209. + /* This is generated by mov{si,di}_pic_label_ref in PIC mode. */
  5210. + if (GET_CODE (x) == MINUS
  5211. + && (XEXP (x, 0) == got_register_rtx
  5212. + || sparc_pic_register_p (XEXP (x, 0))))
  5213. + {
  5214. + rtx y = XEXP (x, 1);
  5215. +
  5216. + if (GET_CODE (y) == LO_SUM)
  5217. + y = XEXP (y, 1);
  5218. +
  5219. + if (GET_CODE (y) == UNSPEC && XINT (y, 1) == UNSPEC_MOVE_PIC_LABEL)
  5220. + {
  5221. + x = XVECEXP (y, 0, 0);
  5222. + gcc_assert (GET_CODE (x) == LABEL_REF
  5223. + || (GET_CODE (x) == CONST
  5224. + && GET_CODE (XEXP (x, 0)) == PLUS
  5225. + && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
  5226. + && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT));
  5227. + }
  5228. + }
  5229. +
  5230. + return x;
  5231. +}
  5232. +
  5233. +/* SPARC implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
  5234. + replace the input X, or the original X if no replacement is called for.
  5235. + The output parameter *WIN is 1 if the calling macro should goto WIN,
  5236. + 0 if it should not.
  5237. +
  5238. + For SPARC, we wish to handle addresses by splitting them into
  5239. + HIGH+LO_SUM pairs, retaining the LO_SUM in the memory reference.
  5240. + This cuts the number of extra insns by one.
  5241. +
  5242. + Do nothing when generating PIC code and the address is a symbolic
  5243. + operand or requires a scratch register. */
  5244. +
  5245. +rtx
  5246. +sparc_legitimize_reload_address (rtx x, machine_mode mode,
  5247. + int opnum, int type,
  5248. + int ind_levels ATTRIBUTE_UNUSED, int *win)
  5249. +{
  5250. + /* Decompose SImode constants into HIGH+LO_SUM. */
  5251. + if (CONSTANT_P (x)
  5252. + && (mode != TFmode || TARGET_ARCH64)
  5253. + && GET_MODE (x) == SImode
  5254. + && GET_CODE (x) != LO_SUM
  5255. + && GET_CODE (x) != HIGH
  5256. + && sparc_code_model <= CM_MEDLOW
  5257. + && !(flag_pic
  5258. + && (symbolic_operand (x, Pmode) || pic_address_needs_scratch (x))))
  5259. + {
  5260. + x = gen_rtx_LO_SUM (GET_MODE (x), gen_rtx_HIGH (GET_MODE (x), x), x);
  5261. + push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
  5262. + BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
  5263. + opnum, (enum reload_type)type);
  5264. + *win = 1;
  5265. + return x;
  5266. + }
  5267. +
  5268. + /* We have to recognize what we have already generated above. */
  5269. + if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 0)) == HIGH)
  5270. + {
  5271. + push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
  5272. + BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
  5273. + opnum, (enum reload_type)type);
  5274. + *win = 1;
  5275. + return x;
  5276. + }
  5277. +
  5278. + *win = 0;
  5279. + return x;
  5280. +}
  5281. +
  5282. +/* Return true if ADDR (a legitimate address expression)
  5283. + has an effect that depends on the machine mode it is used for.
  5284. +
  5285. + In PIC mode,
  5286. +
  5287. + (mem:HI [%l7+a])
  5288. +
  5289. + is not equivalent to
  5290. +
  5291. + (mem:QI [%l7+a]) (mem:QI [%l7+a+1])
  5292. +
  5293. + because [%l7+a+1] is interpreted as the address of (a+1). */
  5294. +
  5295. +
  5296. +static bool
  5297. +sparc_mode_dependent_address_p (const_rtx addr,
  5298. + addr_space_t as ATTRIBUTE_UNUSED)
  5299. +{
  5300. + if (GET_CODE (addr) == PLUS
  5301. + && sparc_pic_register_p (XEXP (addr, 0))
  5302. + && symbolic_operand (XEXP (addr, 1), VOIDmode))
  5303. + return true;
  5304. +
  5305. + return false;
  5306. +}
  5307. +
  5308. +/* Emit a call instruction with the pattern given by PAT. ADDR is the
  5309. + address of the call target. */
  5310. +
  5311. +void
  5312. +sparc_emit_call_insn (rtx pat, rtx addr)
  5313. +{
  5314. + rtx_insn *insn;
  5315. +
  5316. + insn = emit_call_insn (pat);
  5317. +
  5318. + /* The PIC register is live on entry to VxWorks PIC PLT entries. */
  5319. + if (TARGET_VXWORKS_RTP
  5320. + && flag_pic
  5321. + && GET_CODE (addr) == SYMBOL_REF
  5322. + && (SYMBOL_REF_DECL (addr)
  5323. + ? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
  5324. + : !SYMBOL_REF_LOCAL_P (addr)))
  5325. + {
  5326. + use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
  5327. + crtl->uses_pic_offset_table = 1;
  5328. + }
  5329. +}
  5330. +
  5331. +/* Return 1 if RTX is a MEM which is known to be aligned to at
  5332. + least a DESIRED byte boundary. */
  5333. +
  5334. +int
  5335. +mem_min_alignment (rtx mem, int desired)
  5336. +{
  5337. + rtx addr, base, offset;
  5338. +
  5339. + /* If it's not a MEM we can't accept it. */
  5340. + if (GET_CODE (mem) != MEM)
  5341. + return 0;
  5342. +
  5343. + /* Obviously... */
  5344. + if (!TARGET_UNALIGNED_DOUBLES
  5345. + && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
  5346. + return 1;
  5347. +
  5348. + /* ??? The rest of the function predates MEM_ALIGN so
  5349. + there is probably a bit of redundancy. */
  5350. + addr = XEXP (mem, 0);
  5351. + base = offset = NULL_RTX;
  5352. + if (GET_CODE (addr) == PLUS)
  5353. + {
  5354. + if (GET_CODE (XEXP (addr, 0)) == REG)
  5355. + {
  5356. + base = XEXP (addr, 0);
  5357. +
  5358. + /* What we are saying here is that if the base
  5359. + REG is aligned properly, the compiler will make
  5360. + sure any REG based index upon it will be so
  5361. + as well. */
  5362. + if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
  5363. + offset = XEXP (addr, 1);
  5364. + else
  5365. + offset = const0_rtx;
  5366. + }
  5367. + }
  5368. + else if (GET_CODE (addr) == REG)
  5369. + {
  5370. + base = addr;
  5371. + offset = const0_rtx;
  5372. + }
  5373. +
  5374. + if (base != NULL_RTX)
  5375. + {
  5376. + int regno = REGNO (base);
  5377. +
  5378. + if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
  5379. + {
  5380. + /* Check if the compiler has recorded some information
  5381. + about the alignment of the base REG. If reload has
  5382. + completed, we already matched with proper alignments.
  5383. + If not running global_alloc, reload might give us
  5384. + unaligned pointer to local stack though. */
  5385. + if (((cfun != 0
  5386. + && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
  5387. + || (optimize && reload_completed))
  5388. + && (INTVAL (offset) & (desired - 1)) == 0)
  5389. + return 1;
  5390. + }
  5391. + else
  5392. + {
  5393. + if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
  5394. + return 1;
  5395. + }
  5396. + }
  5397. + else if (! TARGET_UNALIGNED_DOUBLES
  5398. + || CONSTANT_P (addr)
  5399. + || GET_CODE (addr) == LO_SUM)
  5400. + {
  5401. + /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
  5402. + is true, in which case we can only assume that an access is aligned if
  5403. + it is to a constant address, or the address involves a LO_SUM. */
  5404. + return 1;
  5405. + }
  5406. +
  5407. + /* An obviously unaligned address. */
  5408. + return 0;
  5409. +}
  5410. +
  5411. +
  5412. +/* Vectors to keep interesting information about registers where it can easily
  5413. + be got. We used to use the actual mode value as the bit number, but there
  5414. + are more than 32 modes now. Instead we use two tables: one indexed by
  5415. + hard register number, and one indexed by mode. */
  5416. +
  5417. +/* The purpose of sparc_mode_class is to shrink the range of modes so that
  5418. + they all fit (as bit numbers) in a 32-bit word (again). Each real mode is
  5419. + mapped into one sparc_mode_class mode. */
  5420. +
  5421. +enum sparc_mode_class {
  5422. + H_MODE, S_MODE, D_MODE, T_MODE, O_MODE,
  5423. + SF_MODE, DF_MODE, TF_MODE, OF_MODE,
  5424. + CC_MODE, CCFP_MODE
  5425. +};
  5426. +
  5427. +/* Modes for single-word and smaller quantities. */
  5428. +#define S_MODES \
  5429. + ((1 << (int) H_MODE) | (1 << (int) S_MODE) | (1 << (int) SF_MODE))
  5430. +
  5431. +/* Modes for double-word and smaller quantities. */
  5432. +#define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << (int) DF_MODE))
  5433. +
  5434. +/* Modes for quad-word and smaller quantities. */
  5435. +#define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
  5436. +
  5437. +/* Modes for 8-word and smaller quantities. */
  5438. +#define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
  5439. +
  5440. +/* Modes for single-float quantities. */
  5441. +#define SF_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
  5442. +
  5443. +/* Modes for double-float and smaller quantities. */
  5444. +#define DF_MODES (SF_MODES | (1 << (int) D_MODE) | (1 << (int) DF_MODE))
  5445. +
  5446. +/* Modes for quad-float and smaller quantities. */
  5447. +#define TF_MODES (DF_MODES | (1 << (int) TF_MODE))
  5448. +
  5449. +/* Modes for quad-float pairs and smaller quantities. */
  5450. +#define OF_MODES (TF_MODES | (1 << (int) OF_MODE))
  5451. +
  5452. +/* Modes for double-float only quantities. */
  5453. +#define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
  5454. +
  5455. +/* Modes for quad-float and double-float only quantities. */
  5456. +#define TF_MODES_NO_S (DF_MODES_NO_S | (1 << (int) TF_MODE))
  5457. +
  5458. +/* Modes for quad-float pairs and double-float only quantities. */
  5459. +#define OF_MODES_NO_S (TF_MODES_NO_S | (1 << (int) OF_MODE))
  5460. +
  5461. +/* Modes for condition codes. */
  5462. +#define CC_MODES (1 << (int) CC_MODE)
  5463. +#define CCFP_MODES (1 << (int) CCFP_MODE)
  5464. +
  5465. +/* Value is 1 if register/mode pair is acceptable on sparc.
  5466. +
  5467. + The funny mixture of D and T modes is because integer operations
  5468. + do not specially operate on tetra quantities, so non-quad-aligned
  5469. + registers can hold quadword quantities (except %o4 and %i4 because
  5470. + they cross fixed registers).
  5471. +
  5472. + ??? Note that, despite the settings, non-double-aligned parameter
  5473. + registers can hold double-word quantities in 32-bit mode. */
  5474. +
  5475. +/* This points to either the 32-bit or the 64-bit version. */
  5476. +static const int *hard_regno_mode_classes;
  5477. +
  5478. +static const int hard_32bit_mode_classes[] = {
  5479. + S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
  5480. + T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
  5481. + T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
  5482. + T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
  5483. +
  5484. + OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
  5485. + OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
  5486. + OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
  5487. + OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
  5488. +
  5489. + /* FP regs f32 to f63. Only the even numbered registers actually exist,
  5490. + and none can hold SFmode/SImode values. */
  5491. + OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
  5492. + OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
  5493. + OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
  5494. + OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
  5495. +
  5496. + /* %fcc[0123] */
  5497. + CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
  5498. +
  5499. + /* %icc, %sfp, %gsr */
  5500. + CC_MODES, 0, D_MODES
  5501. +};
  5502. +
  5503. +static const int hard_64bit_mode_classes[] = {
  5504. + D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
  5505. + O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
  5506. + T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
  5507. + O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
  5508. +
  5509. + OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
  5510. + OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
  5511. + OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
  5512. + OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
  5513. +
  5514. + /* FP regs f32 to f63. Only the even numbered registers actually exist,
  5515. + and none can hold SFmode/SImode values. */
  5516. + OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
  5517. + OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
  5518. + OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
  5519. + OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
  5520. +
  5521. + /* %fcc[0123] */
  5522. + CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
  5523. +
  5524. + /* %icc, %sfp, %gsr */
  5525. + CC_MODES, 0, D_MODES
  5526. +};
  5527. +
  5528. +static int sparc_mode_class [NUM_MACHINE_MODES];
  5529. +
  5530. +enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
  5531. +
  5532. +static void
  5533. +sparc_init_modes (void)
  5534. +{
  5535. + int i;
  5536. +
  5537. + for (i = 0; i < NUM_MACHINE_MODES; i++)
  5538. + {
  5539. + machine_mode m = (machine_mode) i;
  5540. + unsigned int size = GET_MODE_SIZE (m);
  5541. +
  5542. + switch (GET_MODE_CLASS (m))
  5543. + {
  5544. + case MODE_INT:
  5545. + case MODE_PARTIAL_INT:
  5546. + case MODE_COMPLEX_INT:
  5547. + if (size < 4)
  5548. + sparc_mode_class[i] = 1 << (int) H_MODE;
  5549. + else if (size == 4)
  5550. + sparc_mode_class[i] = 1 << (int) S_MODE;
  5551. + else if (size == 8)
  5552. + sparc_mode_class[i] = 1 << (int) D_MODE;
  5553. + else if (size == 16)
  5554. + sparc_mode_class[i] = 1 << (int) T_MODE;
  5555. + else if (size == 32)
  5556. + sparc_mode_class[i] = 1 << (int) O_MODE;
  5557. + else
  5558. + sparc_mode_class[i] = 0;
  5559. + break;
  5560. + case MODE_VECTOR_INT:
  5561. + if (size == 4)
  5562. + sparc_mode_class[i] = 1 << (int) SF_MODE;
  5563. + else if (size == 8)
  5564. + sparc_mode_class[i] = 1 << (int) DF_MODE;
  5565. + else
  5566. + sparc_mode_class[i] = 0;
  5567. + break;
  5568. + case MODE_FLOAT:
  5569. + case MODE_COMPLEX_FLOAT:
  5570. + if (size == 4)
  5571. + sparc_mode_class[i] = 1 << (int) SF_MODE;
  5572. + else if (size == 8)
  5573. + sparc_mode_class[i] = 1 << (int) DF_MODE;
  5574. + else if (size == 16)
  5575. + sparc_mode_class[i] = 1 << (int) TF_MODE;
  5576. + else if (size == 32)
  5577. + sparc_mode_class[i] = 1 << (int) OF_MODE;
  5578. + else
  5579. + sparc_mode_class[i] = 0;
  5580. + break;
  5581. + case MODE_CC:
  5582. + if (m == CCFPmode || m == CCFPEmode)
  5583. + sparc_mode_class[i] = 1 << (int) CCFP_MODE;
  5584. + else
  5585. + sparc_mode_class[i] = 1 << (int) CC_MODE;
  5586. + break;
  5587. + default:
  5588. + sparc_mode_class[i] = 0;
  5589. + break;
  5590. + }
  5591. + }
  5592. +
  5593. + if (TARGET_ARCH64)
  5594. + hard_regno_mode_classes = hard_64bit_mode_classes;
  5595. + else
  5596. + hard_regno_mode_classes = hard_32bit_mode_classes;
  5597. +
  5598. + /* Initialize the array used by REGNO_REG_CLASS. */
  5599. + for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
  5600. + {
  5601. + if (i < 16 && TARGET_V8PLUS)
  5602. + sparc_regno_reg_class[i] = I64_REGS;
  5603. + else if (i < 32 || i == FRAME_POINTER_REGNUM)
  5604. + sparc_regno_reg_class[i] = GENERAL_REGS;
  5605. + else if (i < 64)
  5606. + sparc_regno_reg_class[i] = FP_REGS;
  5607. + else if (i < 96)
  5608. + sparc_regno_reg_class[i] = EXTRA_FP_REGS;
  5609. + else if (i < 100)
  5610. + sparc_regno_reg_class[i] = FPCC_REGS;
  5611. + else
  5612. + sparc_regno_reg_class[i] = NO_REGS;
  5613. + }
  5614. +}
  5615. +
  5616. +/* Return whether REGNO, a global or FP register, must be saved/restored. */
  5617. +
  5618. +static inline bool
  5619. +save_global_or_fp_reg_p (unsigned int regno,
  5620. + int leaf_function ATTRIBUTE_UNUSED)
  5621. +{
  5622. + return !call_used_or_fixed_reg_p (regno) && df_regs_ever_live_p (regno);
  5623. +}
  5624. +
  5625. +/* Return whether the return address register (%i7) is needed. */
  5626. +
  5627. +static inline bool
  5628. +return_addr_reg_needed_p (int leaf_function)
  5629. +{
  5630. + /* If it is live, for example because of __builtin_return_address (0). */
  5631. + if (df_regs_ever_live_p (RETURN_ADDR_REGNUM))
  5632. + return true;
  5633. +
  5634. + /* Otherwise, it is needed as save register if %o7 is clobbered. */
  5635. + if (!leaf_function
  5636. + /* Loading the GOT register clobbers %o7. */
  5637. + || crtl->uses_pic_offset_table
  5638. + || df_regs_ever_live_p (INCOMING_RETURN_ADDR_REGNUM))
  5639. + return true;
  5640. +
  5641. + return false;
  5642. +}
  5643. +
  5644. +/* Return whether REGNO, a local or in register, must be saved/restored. */
  5645. +
  5646. +static bool
  5647. +save_local_or_in_reg_p (unsigned int regno, int leaf_function)
  5648. +{
  5649. + /* General case: call-saved registers live at some point. */
  5650. + if (!call_used_or_fixed_reg_p (regno) && df_regs_ever_live_p (regno))
  5651. + return true;
  5652. +
  5653. + /* Frame pointer register (%fp) if needed. */
  5654. + if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
  5655. + return true;
  5656. +
  5657. + /* Return address register (%i7) if needed. */
  5658. + if (regno == RETURN_ADDR_REGNUM && return_addr_reg_needed_p (leaf_function))
  5659. + return true;
  5660. +
  5661. + /* GOT register (%l7) if needed. */
  5662. + if (got_register_rtx && regno == REGNO (got_register_rtx))
  5663. + return true;
  5664. +
  5665. + /* If the function accesses prior frames, the frame pointer and the return
  5666. + address of the previous frame must be saved on the stack. */
  5667. + if (crtl->accesses_prior_frames
  5668. + && (regno == HARD_FRAME_POINTER_REGNUM || regno == RETURN_ADDR_REGNUM))
  5669. + return true;
  5670. +
  5671. + return false;
  5672. +}
  5673. +
  5674. +/* Compute the frame size required by the function. This function is called
  5675. + during the reload pass and also by sparc_expand_prologue. */
  5676. +
  5677. +static HOST_WIDE_INT
  5678. +sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function)
  5679. +{
  5680. + HOST_WIDE_INT frame_size, apparent_frame_size;
  5681. + int args_size, n_global_fp_regs = 0;
  5682. + bool save_local_in_regs_p = false;
  5683. + unsigned int i;
  5684. +
  5685. + /* If the function allocates dynamic stack space, the dynamic offset is
  5686. + computed early and contains REG_PARM_STACK_SPACE, so we need to cope. */
  5687. + if (leaf_function && !cfun->calls_alloca)
  5688. + args_size = 0;
  5689. + else
  5690. + args_size = crtl->outgoing_args_size + REG_PARM_STACK_SPACE (cfun->decl);
  5691. +
  5692. + /* Calculate space needed for global registers. */
  5693. + if (TARGET_ARCH64)
  5694. + {
  5695. + for (i = 0; i < 8; i++)
  5696. + if (save_global_or_fp_reg_p (i, 0))
  5697. + n_global_fp_regs += 2;
  5698. + }
  5699. + else
  5700. + {
  5701. + for (i = 0; i < 8; i += 2)
  5702. + if (save_global_or_fp_reg_p (i, 0)
  5703. + || save_global_or_fp_reg_p (i + 1, 0))
  5704. + n_global_fp_regs += 2;
  5705. + }
  5706. +
  5707. + /* In the flat window model, find out which local and in registers need to
  5708. + be saved. We don't reserve space in the current frame for them as they
  5709. + will be spilled into the register window save area of the caller's frame.
  5710. + However, as soon as we use this register window save area, we must create
  5711. + that of the current frame to make it the live one. */
  5712. + if (TARGET_FLAT)
  5713. + for (i = 16; i < 32; i++)
  5714. + if (save_local_or_in_reg_p (i, leaf_function))
  5715. + {
  5716. + save_local_in_regs_p = true;
  5717. + break;
  5718. + }
  5719. +
  5720. + /* Calculate space needed for FP registers. */
  5721. + for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
  5722. + if (save_global_or_fp_reg_p (i, 0) || save_global_or_fp_reg_p (i + 1, 0))
  5723. + n_global_fp_regs += 2;
  5724. +
  5725. + if (size == 0
  5726. + && n_global_fp_regs == 0
  5727. + && args_size == 0
  5728. + && !save_local_in_regs_p)
  5729. + frame_size = apparent_frame_size = 0;
  5730. + else
  5731. + {
  5732. + /* Start from the apparent frame size. */
  5733. + apparent_frame_size = ROUND_UP (size, 8) + n_global_fp_regs * 4;
  5734. +
  5735. + /* We need to add the size of the outgoing argument area. */
  5736. + frame_size = apparent_frame_size + ROUND_UP (args_size, 8);
  5737. +
  5738. + /* And that of the register window save area. */
  5739. + frame_size += FIRST_PARM_OFFSET (cfun->decl);
  5740. +
  5741. + /* Finally, bump to the appropriate alignment. */
  5742. + frame_size = SPARC_STACK_ALIGN (frame_size);
  5743. + }
  5744. +
  5745. + /* Set up values for use in prologue and epilogue. */
  5746. + sparc_frame_size = frame_size;
  5747. + sparc_apparent_frame_size = apparent_frame_size;
  5748. + sparc_n_global_fp_regs = n_global_fp_regs;
  5749. + sparc_save_local_in_regs_p = save_local_in_regs_p;
  5750. +
  5751. + return frame_size;
  5752. +}
  5753. +
  5754. +/* Implement the macro INITIAL_ELIMINATION_OFFSET, return the OFFSET. */
  5755. +
  5756. +int
  5757. +sparc_initial_elimination_offset (int to)
  5758. +{
  5759. + int offset;
  5760. +
  5761. + if (to == STACK_POINTER_REGNUM)
  5762. + offset = sparc_compute_frame_size (get_frame_size (), crtl->is_leaf);
  5763. + else
  5764. + offset = 0;
  5765. +
  5766. + offset += SPARC_STACK_BIAS;
  5767. + return offset;
  5768. +}
  5769. +
  5770. +/* Output any necessary .register pseudo-ops. */
  5771. +
  5772. +void
  5773. +sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
  5774. +{
  5775. + int i;
  5776. +
  5777. + if (TARGET_ARCH32)
  5778. + return;
  5779. +
  5780. + /* Check if %g[2367] were used without
  5781. + .register being printed for them already. */
  5782. + for (i = 2; i < 8; i++)
  5783. + {
  5784. + if (df_regs_ever_live_p (i)
  5785. + && ! sparc_hard_reg_printed [i])
  5786. + {
  5787. + sparc_hard_reg_printed [i] = 1;
  5788. + /* %g7 is used as TLS base register, use #ignore
  5789. + for it instead of #scratch. */
  5790. + fprintf (file, "\t.register\t%%g%d, #%s\n", i,
  5791. + i == 7 ? "ignore" : "scratch");
  5792. + }
  5793. + if (i == 3) i = 5;
  5794. + }
  5795. +}
  5796. +
  5797. +#define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
  5798. +
  5799. +#if PROBE_INTERVAL > 4096
  5800. +#error Cannot use indexed addressing mode for stack probing
  5801. +#endif
  5802. +
  5803. +/* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
  5804. + inclusive. These are offsets from the current stack pointer.
  5805. +
  5806. + Note that we don't use the REG+REG addressing mode for the probes because
  5807. + of the stack bias in 64-bit mode. And it doesn't really buy us anything
  5808. + so the advantages of having a single code win here. */
  5809. +
  5810. +static void
  5811. +sparc_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
  5812. +{
  5813. + rtx g1 = gen_rtx_REG (Pmode, 1);
  5814. +
  5815. + /* See if we have a constant small number of probes to generate. If so,
  5816. + that's the easy case. */
  5817. + if (size <= PROBE_INTERVAL)
  5818. + {
  5819. + emit_move_insn (g1, GEN_INT (first));
  5820. + emit_insn (gen_rtx_SET (g1,
  5821. + gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
  5822. + emit_stack_probe (plus_constant (Pmode, g1, -size));
  5823. + }
  5824. +
  5825. + /* The run-time loop is made up of 9 insns in the generic case while the
  5826. + compile-time loop is made up of 4+2*(n-2) insns for n # of intervals. */
  5827. + else if (size <= 4 * PROBE_INTERVAL)
  5828. + {
  5829. + HOST_WIDE_INT i;
  5830. +
  5831. + emit_move_insn (g1, GEN_INT (first + PROBE_INTERVAL));
  5832. + emit_insn (gen_rtx_SET (g1,
  5833. + gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
  5834. + emit_stack_probe (g1);
  5835. +
  5836. + /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 2 until
  5837. + it exceeds SIZE. If only two probes are needed, this will not
  5838. + generate any code. Then probe at FIRST + SIZE. */
  5839. + for (i = 2 * PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
  5840. + {
  5841. + emit_insn (gen_rtx_SET (g1,
  5842. + plus_constant (Pmode, g1, -PROBE_INTERVAL)));
  5843. + emit_stack_probe (g1);
  5844. + }
  5845. +
  5846. + emit_stack_probe (plus_constant (Pmode, g1,
  5847. + (i - PROBE_INTERVAL) - size));
  5848. + }
  5849. +
  5850. + /* Otherwise, do the same as above, but in a loop. Note that we must be
  5851. + extra careful with variables wrapping around because we might be at
  5852. + the very top (or the very bottom) of the address space and we have
  5853. + to be able to handle this case properly; in particular, we use an
  5854. + equality test for the loop condition. */
  5855. + else
  5856. + {
  5857. + HOST_WIDE_INT rounded_size;
  5858. + rtx g4 = gen_rtx_REG (Pmode, 4);
  5859. +
  5860. + emit_move_insn (g1, GEN_INT (first));
  5861. +
  5862. +
  5863. + /* Step 1: round SIZE to the previous multiple of the interval. */
  5864. +
  5865. + rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
  5866. + emit_move_insn (g4, GEN_INT (rounded_size));
  5867. +
  5868. +
  5869. + /* Step 2: compute initial and final value of the loop counter. */
  5870. +
  5871. + /* TEST_ADDR = SP + FIRST. */
  5872. + emit_insn (gen_rtx_SET (g1,
  5873. + gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
  5874. +
  5875. + /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
  5876. + emit_insn (gen_rtx_SET (g4, gen_rtx_MINUS (Pmode, g1, g4)));
  5877. +
  5878. +
  5879. + /* Step 3: the loop
  5880. +
  5881. + while (TEST_ADDR != LAST_ADDR)
  5882. + {
  5883. + TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
  5884. + probe at TEST_ADDR
  5885. + }
  5886. +
  5887. + probes at FIRST + N * PROBE_INTERVAL for values of N from 1
  5888. + until it is equal to ROUNDED_SIZE. */
  5889. +
  5890. + if (TARGET_ARCH64)
  5891. + emit_insn (gen_probe_stack_rangedi (g1, g1, g4));
  5892. + else
  5893. + emit_insn (gen_probe_stack_rangesi (g1, g1, g4));
  5894. +
  5895. +
  5896. + /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
  5897. + that SIZE is equal to ROUNDED_SIZE. */
  5898. +
  5899. + if (size != rounded_size)
  5900. + emit_stack_probe (plus_constant (Pmode, g4, rounded_size - size));
  5901. + }
  5902. +
  5903. + /* Make sure nothing is scheduled before we are done. */
  5904. + emit_insn (gen_blockage ());
  5905. +}
  5906. +
  5907. +/* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
  5908. + absolute addresses. */
  5909. +
  5910. +const char *
  5911. +output_probe_stack_range (rtx reg1, rtx reg2)
  5912. +{
  5913. + static int labelno = 0;
  5914. + char loop_lab[32];
  5915. + rtx xops[2];
  5916. +
  5917. + ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
  5918. +
  5919. + /* Loop. */
  5920. + ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
  5921. +
  5922. + /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
  5923. + xops[0] = reg1;
  5924. + xops[1] = GEN_INT (-PROBE_INTERVAL);
  5925. + output_asm_insn ("add\t%0, %1, %0", xops);
  5926. +
  5927. + /* Test if TEST_ADDR == LAST_ADDR. */
  5928. + xops[1] = reg2;
  5929. + output_asm_insn ("cmp\t%0, %1", xops);
  5930. +
  5931. + /* Probe at TEST_ADDR and branch. */
  5932. + if (TARGET_ARCH64)
  5933. + fputs ("\tbne,pt\t%xcc,", asm_out_file);
  5934. + else
  5935. + fputs ("\tbne\t", asm_out_file);
  5936. + assemble_name_raw (asm_out_file, loop_lab);
  5937. + fputc ('\n', asm_out_file);
  5938. + xops[1] = GEN_INT (SPARC_STACK_BIAS);
  5939. + output_asm_insn (" st\t%%g0, [%0+%1]", xops);
  5940. +
  5941. + return "";
  5942. +}
  5943. +
  5944. +/* Emit code to save/restore registers from LOW to HIGH at BASE+OFFSET as
  5945. + needed. LOW is supposed to be double-word aligned for 32-bit registers.
  5946. + SAVE_P decides whether a register must be saved/restored. ACTION_TRUE
  5947. + is the action to be performed if SAVE_P returns true and ACTION_FALSE
  5948. + the action to be performed if it returns false. Return the new offset. */
  5949. +
  5950. +typedef bool (*sorr_pred_t) (unsigned int, int);
  5951. +typedef enum { SORR_NONE, SORR_ADVANCE, SORR_SAVE, SORR_RESTORE } sorr_act_t;
  5952. +
  5953. +static int
  5954. +emit_save_or_restore_regs (unsigned int low, unsigned int high, rtx base,
  5955. + int offset, int leaf_function, sorr_pred_t save_p,
  5956. + sorr_act_t action_true, sorr_act_t action_false)
  5957. +{
  5958. + unsigned int i;
  5959. + rtx mem;
  5960. + rtx_insn *insn;
  5961. +
  5962. + if (TARGET_ARCH64 && high <= 32)
  5963. + {
  5964. + int fp_offset = -1;
  5965. +
  5966. + for (i = low; i < high; i++)
  5967. + {
  5968. + if (save_p (i, leaf_function))
  5969. + {
  5970. + mem = gen_frame_mem (DImode, plus_constant (Pmode,
  5971. + base, offset));
  5972. + if (action_true == SORR_SAVE)
  5973. + {
  5974. + insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
  5975. + RTX_FRAME_RELATED_P (insn) = 1;
  5976. + }
  5977. + else /* action_true == SORR_RESTORE */
  5978. + {
  5979. + /* The frame pointer must be restored last since its old
  5980. + value may be used as base address for the frame. This
  5981. + is problematic in 64-bit mode only because of the lack
  5982. + of double-word load instruction. */
  5983. + if (i == HARD_FRAME_POINTER_REGNUM)
  5984. + fp_offset = offset;
  5985. + else
  5986. + emit_move_insn (gen_rtx_REG (DImode, i), mem);
  5987. + }
  5988. + offset += 8;
  5989. + }
  5990. + else if (action_false == SORR_ADVANCE)
  5991. + offset += 8;
  5992. + }
  5993. +
  5994. + if (fp_offset >= 0)
  5995. + {
  5996. + mem = gen_frame_mem (DImode, plus_constant (Pmode, base, fp_offset));
  5997. + emit_move_insn (hard_frame_pointer_rtx, mem);
  5998. + }
  5999. + }
  6000. + else
  6001. + {
  6002. + for (i = low; i < high; i += 2)
  6003. + {
  6004. + bool reg0 = save_p (i, leaf_function);
  6005. + bool reg1 = save_p (i + 1, leaf_function);
  6006. + machine_mode mode;
  6007. + int regno;
  6008. +
  6009. + if (reg0 && reg1)
  6010. + {
  6011. + mode = SPARC_INT_REG_P (i) ? E_DImode : E_DFmode;
  6012. + regno = i;
  6013. + }
  6014. + else if (reg0)
  6015. + {
  6016. + mode = SPARC_INT_REG_P (i) ? E_SImode : E_SFmode;
  6017. + regno = i;
  6018. + }
  6019. + else if (reg1)
  6020. + {
  6021. + mode = SPARC_INT_REG_P (i) ? E_SImode : E_SFmode;
  6022. + regno = i + 1;
  6023. + offset += 4;
  6024. + }
  6025. + else
  6026. + {
  6027. + if (action_false == SORR_ADVANCE)
  6028. + offset += 8;
  6029. + continue;
  6030. + }
  6031. +
  6032. + mem = gen_frame_mem (mode, plus_constant (Pmode, base, offset));
  6033. + if (action_true == SORR_SAVE)
  6034. + {
  6035. + insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
  6036. + RTX_FRAME_RELATED_P (insn) = 1;
  6037. + if (mode == DImode)
  6038. + {
  6039. + rtx set1, set2;
  6040. + mem = gen_frame_mem (SImode, plus_constant (Pmode, base,
  6041. + offset));
  6042. + set1 = gen_rtx_SET (mem, gen_rtx_REG (SImode, regno));
  6043. + RTX_FRAME_RELATED_P (set1) = 1;
  6044. + mem
  6045. + = gen_frame_mem (SImode, plus_constant (Pmode, base,
  6046. + offset + 4));
  6047. + set2 = gen_rtx_SET (mem, gen_rtx_REG (SImode, regno + 1));
  6048. + RTX_FRAME_RELATED_P (set2) = 1;
  6049. + add_reg_note (insn, REG_FRAME_RELATED_EXPR,
  6050. + gen_rtx_PARALLEL (VOIDmode,
  6051. + gen_rtvec (2, set1, set2)));
  6052. + }
  6053. + }
  6054. + else /* action_true == SORR_RESTORE */
  6055. + emit_move_insn (gen_rtx_REG (mode, regno), mem);
  6056. +
  6057. + /* Bump and round down to double word
  6058. + in case we already bumped by 4. */
  6059. + offset = ROUND_DOWN (offset + 8, 8);
  6060. + }
  6061. + }
  6062. +
  6063. + return offset;
  6064. +}
  6065. +
  6066. +/* Emit code to adjust BASE to OFFSET. Return the new base. */
  6067. +
  6068. +static rtx
  6069. +emit_adjust_base_to_offset (rtx base, int offset)
  6070. +{
  6071. + /* ??? This might be optimized a little as %g1 might already have a
  6072. + value close enough that a single add insn will do. */
  6073. + /* ??? Although, all of this is probably only a temporary fix because
  6074. + if %g1 can hold a function result, then sparc_expand_epilogue will
  6075. + lose (the result will be clobbered). */
  6076. + rtx new_base = gen_rtx_REG (Pmode, 1);
  6077. + emit_move_insn (new_base, GEN_INT (offset));
  6078. + emit_insn (gen_rtx_SET (new_base, gen_rtx_PLUS (Pmode, base, new_base)));
  6079. + return new_base;
  6080. +}
  6081. +
  6082. +/* Emit code to save/restore call-saved global and FP registers. */
  6083. +
  6084. +static void
  6085. +emit_save_or_restore_global_fp_regs (rtx base, int offset, sorr_act_t action)
  6086. +{
  6087. + if (offset < -4096 || offset + sparc_n_global_fp_regs * 4 > 4095)
  6088. + {
  6089. + base = emit_adjust_base_to_offset (base, offset);
  6090. + offset = 0;
  6091. + }
  6092. +
  6093. + offset
  6094. + = emit_save_or_restore_regs (0, 8, base, offset, 0,
  6095. + save_global_or_fp_reg_p, action, SORR_NONE);
  6096. + emit_save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, 0,
  6097. + save_global_or_fp_reg_p, action, SORR_NONE);
  6098. +}
  6099. +
  6100. +/* Emit code to save/restore call-saved local and in registers. */
  6101. +
  6102. +static void
  6103. +emit_save_or_restore_local_in_regs (rtx base, int offset, sorr_act_t action)
  6104. +{
  6105. + if (offset < -4096 || offset + 16 * UNITS_PER_WORD > 4095)
  6106. + {
  6107. + base = emit_adjust_base_to_offset (base, offset);
  6108. + offset = 0;
  6109. + }
  6110. +
  6111. + emit_save_or_restore_regs (16, 32, base, offset, sparc_leaf_function_p,
  6112. + save_local_or_in_reg_p, action, SORR_ADVANCE);
  6113. +}
  6114. +
  6115. +/* Emit a window_save insn. */
  6116. +
  6117. +static rtx_insn *
  6118. +emit_window_save (rtx increment)
  6119. +{
  6120. + rtx_insn *insn = emit_insn (gen_window_save (increment));
  6121. + RTX_FRAME_RELATED_P (insn) = 1;
  6122. +
  6123. + /* The incoming return address (%o7) is saved in %i7. */
  6124. + add_reg_note (insn, REG_CFA_REGISTER,
  6125. + gen_rtx_SET (gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM),
  6126. + gen_rtx_REG (Pmode,
  6127. + INCOMING_RETURN_ADDR_REGNUM)));
  6128. +
  6129. + /* The window save event. */
  6130. + add_reg_note (insn, REG_CFA_WINDOW_SAVE, const0_rtx);
  6131. +
  6132. + /* The CFA is %fp, the hard frame pointer. */
  6133. + add_reg_note (insn, REG_CFA_DEF_CFA,
  6134. + plus_constant (Pmode, hard_frame_pointer_rtx,
  6135. + INCOMING_FRAME_SP_OFFSET));
  6136. +
  6137. + return insn;
  6138. +}
  6139. +
  6140. +/* Generate an increment for the stack pointer. */
  6141. +
  6142. +static rtx
  6143. +gen_stack_pointer_inc (rtx increment)
  6144. +{
  6145. + return gen_rtx_SET (stack_pointer_rtx,
  6146. + gen_rtx_PLUS (Pmode,
  6147. + stack_pointer_rtx,
  6148. + increment));
  6149. +}
  6150. +
  6151. +/* Expand the function prologue. The prologue is responsible for reserving
  6152. + storage for the frame, saving the call-saved registers and loading the
  6153. + GOT register if needed. */
  6154. +
  6155. +void
  6156. +sparc_expand_prologue (void)
  6157. +{
  6158. + HOST_WIDE_INT size;
  6159. + rtx_insn *insn;
  6160. +
  6161. + /* Compute a snapshot of crtl->uses_only_leaf_regs. Relying
  6162. + on the final value of the flag means deferring the prologue/epilogue
  6163. + expansion until just before the second scheduling pass, which is too
  6164. + late to emit multiple epilogues or return insns.
  6165. +
  6166. + Of course we are making the assumption that the value of the flag
  6167. + will not change between now and its final value. Of the three parts
  6168. + of the formula, only the last one can reasonably vary. Let's take a
  6169. + closer look, after assuming that the first two ones are set to true
  6170. + (otherwise the last value is effectively silenced).
  6171. +
  6172. + If only_leaf_regs_used returns false, the global predicate will also
  6173. + be false so the actual frame size calculated below will be positive.
  6174. + As a consequence, the save_register_window insn will be emitted in
  6175. + the instruction stream; now this insn explicitly references %fp
  6176. + which is not a leaf register so only_leaf_regs_used will always
  6177. + return false subsequently.
  6178. +
  6179. + If only_leaf_regs_used returns true, we hope that the subsequent
  6180. + optimization passes won't cause non-leaf registers to pop up. For
  6181. + example, the regrename pass has special provisions to not rename to
  6182. + non-leaf registers in a leaf function. */
  6183. + sparc_leaf_function_p
  6184. + = optimize > 0 && crtl->is_leaf && only_leaf_regs_used ();
  6185. +
  6186. + size = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
  6187. +
  6188. + if (flag_stack_usage_info)
  6189. + current_function_static_stack_size = size;
  6190. +
  6191. + if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK
  6192. + || flag_stack_clash_protection)
  6193. + {
  6194. + if (crtl->is_leaf && !cfun->calls_alloca)
  6195. + {
  6196. + if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
  6197. + sparc_emit_probe_stack_range (get_stack_check_protect (),
  6198. + size - get_stack_check_protect ());
  6199. + }
  6200. + else if (size > 0)
  6201. + sparc_emit_probe_stack_range (get_stack_check_protect (), size);
  6202. + }
  6203. +
  6204. + if (size == 0)
  6205. + ; /* do nothing. */
  6206. + else if (sparc_leaf_function_p)
  6207. + {
  6208. + rtx size_int_rtx = GEN_INT (-size);
  6209. +
  6210. + if (size <= 4096)
  6211. + insn = emit_insn (gen_stack_pointer_inc (size_int_rtx));
  6212. + else if (size <= 8192)
  6213. + {
  6214. + insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
  6215. + RTX_FRAME_RELATED_P (insn) = 1;
  6216. +
  6217. + /* %sp is still the CFA register. */
  6218. + insn = emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
  6219. + }
  6220. + else
  6221. + {
  6222. + rtx size_rtx = gen_rtx_REG (Pmode, 1);
  6223. + emit_move_insn (size_rtx, size_int_rtx);
  6224. + insn = emit_insn (gen_stack_pointer_inc (size_rtx));
  6225. + add_reg_note (insn, REG_FRAME_RELATED_EXPR,
  6226. + gen_stack_pointer_inc (size_int_rtx));
  6227. + }
  6228. +
  6229. + RTX_FRAME_RELATED_P (insn) = 1;
  6230. + }
  6231. + else
  6232. + {
  6233. + rtx size_int_rtx = GEN_INT (-size);
  6234. +
  6235. + if (size <= 4096)
  6236. + emit_window_save (size_int_rtx);
  6237. + else if (size <= 8192)
  6238. + {
  6239. + emit_window_save (GEN_INT (-4096));
  6240. +
  6241. + /* %sp is not the CFA register anymore. */
  6242. + emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
  6243. +
  6244. + /* Make sure no %fp-based store is issued until after the frame is
  6245. + established. The offset between the frame pointer and the stack
  6246. + pointer is calculated relative to the value of the stack pointer
  6247. + at the end of the function prologue, and moving instructions that
  6248. + access the stack via the frame pointer between the instructions
  6249. + that decrement the stack pointer could result in accessing the
  6250. + register window save area, which is volatile. */
  6251. + emit_insn (gen_frame_blockage ());
  6252. + }
  6253. + else
  6254. + {
  6255. + rtx size_rtx = gen_rtx_REG (Pmode, 1);
  6256. + emit_move_insn (size_rtx, size_int_rtx);
  6257. + emit_window_save (size_rtx);
  6258. + }
  6259. + }
  6260. +
  6261. + if (sparc_leaf_function_p)
  6262. + {
  6263. + sparc_frame_base_reg = stack_pointer_rtx;
  6264. + sparc_frame_base_offset = size + SPARC_STACK_BIAS;
  6265. + }
  6266. + else
  6267. + {
  6268. + sparc_frame_base_reg = hard_frame_pointer_rtx;
  6269. + sparc_frame_base_offset = SPARC_STACK_BIAS;
  6270. + }
  6271. +
  6272. + if (sparc_n_global_fp_regs > 0)
  6273. + emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
  6274. + sparc_frame_base_offset
  6275. + - sparc_apparent_frame_size,
  6276. + SORR_SAVE);
  6277. +
  6278. + /* Advertise that the data calculated just above are now valid. */
  6279. + sparc_prologue_data_valid_p = true;
  6280. +}
  6281. +
  6282. +/* Expand the function prologue. The prologue is responsible for reserving
  6283. + storage for the frame, saving the call-saved registers and loading the
  6284. + GOT register if needed. */
  6285. +
  6286. +void
  6287. +sparc_flat_expand_prologue (void)
  6288. +{
  6289. + HOST_WIDE_INT size;
  6290. + rtx_insn *insn;
  6291. +
  6292. + sparc_leaf_function_p = optimize > 0 && crtl->is_leaf;
  6293. +
  6294. + size = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
  6295. +
  6296. + if (flag_stack_usage_info)
  6297. + current_function_static_stack_size = size;
  6298. +
  6299. + if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK
  6300. + || flag_stack_clash_protection)
  6301. + {
  6302. + if (crtl->is_leaf && !cfun->calls_alloca)
  6303. + {
  6304. + if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
  6305. + sparc_emit_probe_stack_range (get_stack_check_protect (),
  6306. + size - get_stack_check_protect ());
  6307. + }
  6308. + else if (size > 0)
  6309. + sparc_emit_probe_stack_range (get_stack_check_protect (), size);
  6310. + }
  6311. +
  6312. + if (sparc_save_local_in_regs_p)
  6313. + emit_save_or_restore_local_in_regs (stack_pointer_rtx, SPARC_STACK_BIAS,
  6314. + SORR_SAVE);
  6315. +
  6316. + if (size == 0)
  6317. + ; /* do nothing. */
  6318. + else
  6319. + {
  6320. + rtx size_int_rtx, size_rtx;
  6321. +
  6322. + size_rtx = size_int_rtx = GEN_INT (-size);
  6323. +
  6324. + /* We establish the frame (i.e. decrement the stack pointer) first, even
  6325. + if we use a frame pointer, because we cannot clobber any call-saved
  6326. + registers, including the frame pointer, if we haven't created a new
  6327. + register save area, for the sake of compatibility with the ABI. */
  6328. + if (size <= 4096)
  6329. + insn = emit_insn (gen_stack_pointer_inc (size_int_rtx));
  6330. + else if (size <= 8192 && !frame_pointer_needed)
  6331. + {
  6332. + insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
  6333. + RTX_FRAME_RELATED_P (insn) = 1;
  6334. + insn = emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
  6335. + }
  6336. + else
  6337. + {
  6338. + size_rtx = gen_rtx_REG (Pmode, 1);
  6339. + emit_move_insn (size_rtx, size_int_rtx);
  6340. + insn = emit_insn (gen_stack_pointer_inc (size_rtx));
  6341. + add_reg_note (insn, REG_CFA_ADJUST_CFA,
  6342. + gen_stack_pointer_inc (size_int_rtx));
  6343. + }
  6344. + RTX_FRAME_RELATED_P (insn) = 1;
  6345. +
  6346. + /* Ensure nothing is scheduled until after the frame is established. */
  6347. + emit_insn (gen_blockage ());
  6348. +
  6349. + if (frame_pointer_needed)
  6350. + {
  6351. + insn = emit_insn (gen_rtx_SET (hard_frame_pointer_rtx,
  6352. + gen_rtx_MINUS (Pmode,
  6353. + stack_pointer_rtx,
  6354. + size_rtx)));
  6355. + RTX_FRAME_RELATED_P (insn) = 1;
  6356. +
  6357. + add_reg_note (insn, REG_CFA_ADJUST_CFA,
  6358. + gen_rtx_SET (hard_frame_pointer_rtx,
  6359. + plus_constant (Pmode, stack_pointer_rtx,
  6360. + size)));
  6361. + }
  6362. +
  6363. + if (return_addr_reg_needed_p (sparc_leaf_function_p))
  6364. + {
  6365. + rtx o7 = gen_rtx_REG (Pmode, INCOMING_RETURN_ADDR_REGNUM);
  6366. + rtx i7 = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
  6367. +
  6368. + insn = emit_move_insn (i7, o7);
  6369. + RTX_FRAME_RELATED_P (insn) = 1;
  6370. +
  6371. + add_reg_note (insn, REG_CFA_REGISTER, gen_rtx_SET (i7, o7));
  6372. +
  6373. + /* Prevent this instruction from ever being considered dead,
  6374. + even if this function has no epilogue. */
  6375. + emit_use (i7);
  6376. + }
  6377. + }
  6378. +
  6379. + if (frame_pointer_needed)
  6380. + {
  6381. + sparc_frame_base_reg = hard_frame_pointer_rtx;
  6382. + sparc_frame_base_offset = SPARC_STACK_BIAS;
  6383. + }
  6384. + else
  6385. + {
  6386. + sparc_frame_base_reg = stack_pointer_rtx;
  6387. + sparc_frame_base_offset = size + SPARC_STACK_BIAS;
  6388. + }
  6389. +
  6390. + if (sparc_n_global_fp_regs > 0)
  6391. + emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
  6392. + sparc_frame_base_offset
  6393. + - sparc_apparent_frame_size,
  6394. + SORR_SAVE);
  6395. +
  6396. + /* Advertise that the data calculated just above are now valid. */
  6397. + sparc_prologue_data_valid_p = true;
  6398. +}
  6399. +
  6400. +/* This function generates the assembly code for function entry, which boils
  6401. + down to emitting the necessary .register directives. */
  6402. +
  6403. +static void
  6404. +sparc_asm_function_prologue (FILE *file)
  6405. +{
  6406. + /* Check that the assumption we made in sparc_expand_prologue is valid. */
  6407. + if (!TARGET_FLAT)
  6408. + gcc_assert (sparc_leaf_function_p == crtl->uses_only_leaf_regs);
  6409. +
  6410. + sparc_output_scratch_registers (file);
  6411. +}
  6412. +
  6413. +/* Expand the function epilogue, either normal or part of a sibcall.
  6414. + We emit all the instructions except the return or the call. */
  6415. +
  6416. +void
  6417. +sparc_expand_epilogue (bool for_eh)
  6418. +{
  6419. + HOST_WIDE_INT size = sparc_frame_size;
  6420. +
  6421. + if (cfun->calls_alloca)
  6422. + emit_insn (gen_frame_blockage ());
  6423. +
  6424. + if (sparc_n_global_fp_regs > 0)
  6425. + emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
  6426. + sparc_frame_base_offset
  6427. + - sparc_apparent_frame_size,
  6428. + SORR_RESTORE);
  6429. +
  6430. + if (size == 0 || for_eh)
  6431. + ; /* do nothing. */
  6432. + else if (sparc_leaf_function_p)
  6433. + {
  6434. + if (size <= 4096)
  6435. + emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
  6436. + else if (size <= 8192)
  6437. + {
  6438. + emit_insn (gen_stack_pointer_inc (GEN_INT (4096)));
  6439. + emit_insn (gen_stack_pointer_inc (GEN_INT (size - 4096)));
  6440. + }
  6441. + else
  6442. + {
  6443. + rtx reg = gen_rtx_REG (Pmode, 1);
  6444. + emit_move_insn (reg, GEN_INT (size));
  6445. + emit_insn (gen_stack_pointer_inc (reg));
  6446. + }
  6447. + }
  6448. +}
  6449. +
  6450. +/* Expand the function epilogue, either normal or part of a sibcall.
  6451. + We emit all the instructions except the return or the call. */
  6452. +
  6453. +void
  6454. +sparc_flat_expand_epilogue (bool for_eh)
  6455. +{
  6456. + HOST_WIDE_INT size = sparc_frame_size;
  6457. +
  6458. + if (sparc_n_global_fp_regs > 0)
  6459. + emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
  6460. + sparc_frame_base_offset
  6461. + - sparc_apparent_frame_size,
  6462. + SORR_RESTORE);
  6463. +
  6464. + /* If we have a frame pointer, we'll need both to restore it before the
  6465. + frame is destroyed and use its current value in destroying the frame.
  6466. + Since we don't have an atomic way to do that in the flat window model,
  6467. + we save the current value into a temporary register (%g1). */
  6468. + if (frame_pointer_needed && !for_eh)
  6469. + emit_move_insn (gen_rtx_REG (Pmode, 1), hard_frame_pointer_rtx);
  6470. +
  6471. + if (return_addr_reg_needed_p (sparc_leaf_function_p))
  6472. + emit_move_insn (gen_rtx_REG (Pmode, INCOMING_RETURN_ADDR_REGNUM),
  6473. + gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM));
  6474. +
  6475. + if (sparc_save_local_in_regs_p)
  6476. + emit_save_or_restore_local_in_regs (sparc_frame_base_reg,
  6477. + sparc_frame_base_offset,
  6478. + SORR_RESTORE);
  6479. +
  6480. + if (size == 0 || for_eh)
  6481. + ; /* do nothing. */
  6482. + else if (frame_pointer_needed)
  6483. + {
  6484. + /* Make sure the frame is destroyed after everything else is done. */
  6485. + emit_insn (gen_blockage ());
  6486. +
  6487. + emit_move_insn (stack_pointer_rtx, gen_rtx_REG (Pmode, 1));
  6488. + }
  6489. + else
  6490. + {
  6491. + /* Likewise. */
  6492. + emit_insn (gen_blockage ());
  6493. +
  6494. + if (size <= 4096)
  6495. + emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
  6496. + else if (size <= 8192)
  6497. + {
  6498. + emit_insn (gen_stack_pointer_inc (GEN_INT (4096)));
  6499. + emit_insn (gen_stack_pointer_inc (GEN_INT (size - 4096)));
  6500. + }
  6501. + else
  6502. + {
  6503. + rtx reg = gen_rtx_REG (Pmode, 1);
  6504. + emit_move_insn (reg, GEN_INT (size));
  6505. + emit_insn (gen_stack_pointer_inc (reg));
  6506. + }
  6507. + }
  6508. +}
  6509. +
  6510. +/* Return true if it is appropriate to emit `return' instructions in the
  6511. + body of a function. */
  6512. +
  6513. +bool
  6514. +sparc_can_use_return_insn_p (void)
  6515. +{
  6516. + return sparc_prologue_data_valid_p
  6517. + && sparc_n_global_fp_regs == 0
  6518. + && TARGET_FLAT
  6519. + ? (sparc_frame_size == 0 && !sparc_save_local_in_regs_p)
  6520. + : (sparc_frame_size == 0 || !sparc_leaf_function_p);
  6521. +}
  6522. +
  6523. +/* This function generates the assembly code for function exit. */
  6524. +
  6525. +static void
  6526. +sparc_asm_function_epilogue (FILE *file)
  6527. +{
  6528. + /* If the last two instructions of a function are "call foo; dslot;"
  6529. + the return address might point to the first instruction in the next
  6530. + function and we have to output a dummy nop for the sake of sane
  6531. + backtraces in such cases. This is pointless for sibling calls since
  6532. + the return address is explicitly adjusted. */
  6533. +
  6534. + rtx_insn *insn = get_last_insn ();
  6535. +
  6536. + rtx last_real_insn = prev_real_insn (insn);
  6537. + if (last_real_insn
  6538. + && NONJUMP_INSN_P (last_real_insn)
  6539. + && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
  6540. + last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
  6541. +
  6542. + if (last_real_insn
  6543. + && CALL_P (last_real_insn)
  6544. + && !SIBLING_CALL_P (last_real_insn))
  6545. + fputs("\tnop\n", file);
  6546. +
  6547. + sparc_output_deferred_case_vectors ();
  6548. +}
  6549. +
  6550. +/* Output a 'restore' instruction. */
  6551. +
  6552. +static void
  6553. +output_restore (rtx pat)
  6554. +{
  6555. + rtx operands[3];
  6556. +
  6557. + if (! pat)
  6558. + {
  6559. + fputs ("\t restore\n", asm_out_file);
  6560. + return;
  6561. + }
  6562. +
  6563. + gcc_assert (GET_CODE (pat) == SET);
  6564. +
  6565. + operands[0] = SET_DEST (pat);
  6566. + pat = SET_SRC (pat);
  6567. +
  6568. + switch (GET_CODE (pat))
  6569. + {
  6570. + case PLUS:
  6571. + operands[1] = XEXP (pat, 0);
  6572. + operands[2] = XEXP (pat, 1);
  6573. + output_asm_insn (" restore %r1, %2, %Y0", operands);
  6574. + break;
  6575. + case LO_SUM:
  6576. + operands[1] = XEXP (pat, 0);
  6577. + operands[2] = XEXP (pat, 1);
  6578. + output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
  6579. + break;
  6580. + case ASHIFT:
  6581. + operands[1] = XEXP (pat, 0);
  6582. + gcc_assert (XEXP (pat, 1) == const1_rtx);
  6583. + output_asm_insn (" restore %r1, %r1, %Y0", operands);
  6584. + break;
  6585. + default:
  6586. + operands[1] = pat;
  6587. + output_asm_insn (" restore %%g0, %1, %Y0", operands);
  6588. + break;
  6589. + }
  6590. +}
  6591. +
  6592. +/* Output a return. */
  6593. +
  6594. +const char *
  6595. +output_return (rtx_insn *insn)
  6596. +{
  6597. + if (crtl->calls_eh_return)
  6598. + {
  6599. + /* If the function uses __builtin_eh_return, the eh_return
  6600. + machinery occupies the delay slot. */
  6601. + gcc_assert (!final_sequence);
  6602. +
  6603. + if (flag_delayed_branch)
  6604. + {
  6605. + if (!TARGET_FLAT && TARGET_V9)
  6606. + fputs ("\treturn\t%i7+8\n", asm_out_file);
  6607. + else
  6608. + {
  6609. + if (!TARGET_FLAT)
  6610. + fputs ("\trestore\n", asm_out_file);
  6611. +
  6612. + fputs ("\tjmp\t%o7+8\n", asm_out_file);
  6613. + }
  6614. +
  6615. + fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
  6616. + }
  6617. + else
  6618. + {
  6619. + if (!TARGET_FLAT)
  6620. + fputs ("\trestore\n", asm_out_file);
  6621. +
  6622. + fputs ("\tadd\t%sp, %g1, %sp\n", asm_out_file);
  6623. + fputs ("\tjmp\t%o7+8\n\t nop\n", asm_out_file);
  6624. + }
  6625. + }
  6626. + else if (sparc_leaf_function_p || TARGET_FLAT)
  6627. + {
  6628. + /* This is a leaf or flat function so we don't have to bother restoring
  6629. + the register window, which frees us from dealing with the convoluted
  6630. + semantics of restore/return. We simply output the jump to the
  6631. + return address and the insn in the delay slot (if any). */
  6632. +
  6633. + return "jmp\t%%o7+%)%#";
  6634. + }
  6635. + else
  6636. + {
  6637. + /* This is a regular function so we have to restore the register window.
  6638. + We may have a pending insn for the delay slot, which will be either
  6639. + combined with the 'restore' instruction or put in the delay slot of
  6640. + the 'return' instruction. */
  6641. +
  6642. + if (final_sequence)
  6643. + {
  6644. + rtx_insn *delay;
  6645. + rtx pat;
  6646. +
  6647. + delay = NEXT_INSN (insn);
  6648. + gcc_assert (delay);
  6649. +
  6650. + pat = PATTERN (delay);
  6651. +
  6652. + if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
  6653. + {
  6654. + epilogue_renumber (&pat, 0);
  6655. + return "return\t%%i7+%)%#";
  6656. + }
  6657. + else
  6658. + {
  6659. + output_asm_insn ("jmp\t%%i7+%)", NULL);
  6660. +
  6661. + /* We're going to output the insn in the delay slot manually.
  6662. + Make sure to output its source location first. */
  6663. + PATTERN (delay) = gen_blockage ();
  6664. + INSN_CODE (delay) = -1;
  6665. + final_scan_insn (delay, asm_out_file, optimize, 0, NULL);
  6666. + INSN_LOCATION (delay) = UNKNOWN_LOCATION;
  6667. +
  6668. + output_restore (pat);
  6669. + }
  6670. + }
  6671. + else
  6672. + {
  6673. + /* The delay slot is empty. */
  6674. + if (TARGET_V9)
  6675. + return "return\t%%i7+%)\n\t nop";
  6676. + else if (flag_delayed_branch)
  6677. + return "jmp\t%%i7+%)\n\t restore";
  6678. + else
  6679. + return "restore\n\tjmp\t%%o7+%)\n\t nop";
  6680. + }
  6681. + }
  6682. +
  6683. + return "";
  6684. +}
  6685. +
  6686. +/* Output a sibling call. */
  6687. +
  6688. +const char *
  6689. +output_sibcall (rtx_insn *insn, rtx call_operand)
  6690. +{
  6691. + rtx operands[1];
  6692. +
  6693. + gcc_assert (flag_delayed_branch);
  6694. +
  6695. + operands[0] = call_operand;
  6696. +
  6697. + if (sparc_leaf_function_p || TARGET_FLAT)
  6698. + {
  6699. + /* This is a leaf or flat function so we don't have to bother restoring
  6700. + the register window. We simply output the jump to the function and
  6701. + the insn in the delay slot (if any). */
  6702. +
  6703. + gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
  6704. +
  6705. + if (final_sequence)
  6706. + output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
  6707. + operands);
  6708. + else
  6709. + /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
  6710. + it into branch if possible. */
  6711. + output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
  6712. + operands);
  6713. + }
  6714. + else
  6715. + {
  6716. + /* This is a regular function so we have to restore the register window.
  6717. + We may have a pending insn for the delay slot, which will be combined
  6718. + with the 'restore' instruction. */
  6719. +
  6720. + output_asm_insn ("call\t%a0, 0", operands);
  6721. +
  6722. + if (final_sequence)
  6723. + {
  6724. + rtx_insn *delay;
  6725. + rtx pat;
  6726. +
  6727. + delay = NEXT_INSN (insn);
  6728. + gcc_assert (delay);
  6729. +
  6730. + pat = PATTERN (delay);
  6731. +
  6732. + /* We're going to output the insn in the delay slot manually.
  6733. + Make sure to output its source location first. */
  6734. + PATTERN (delay) = gen_blockage ();
  6735. + INSN_CODE (delay) = -1;
  6736. + final_scan_insn (delay, asm_out_file, optimize, 0, NULL);
  6737. + INSN_LOCATION (delay) = UNKNOWN_LOCATION;
  6738. +
  6739. + output_restore (pat);
  6740. + }
  6741. + else
  6742. + output_restore (NULL_RTX);
  6743. + }
  6744. +
  6745. + return "";
  6746. +}
  6747. +
  6748. +/* Functions for handling argument passing.
  6749. +
  6750. + For 32-bit, the first 6 args are normally in registers and the rest are
  6751. + pushed. Any arg that starts within the first 6 words is at least
  6752. + partially passed in a register unless its data type forbids.
  6753. +
  6754. + For 64-bit, the argument registers are laid out as an array of 16 elements
  6755. + and arguments are added sequentially. The first 6 int args and up to the
  6756. + first 16 fp args (depending on size) are passed in regs.
  6757. +
  6758. + Slot Stack Integral Float Float in structure Double Long Double
  6759. + ---- ----- -------- ----- ------------------ ------ -----------
  6760. + 15 [SP+248] %f31 %f30,%f31 %d30
  6761. + 14 [SP+240] %f29 %f28,%f29 %d28 %q28
  6762. + 13 [SP+232] %f27 %f26,%f27 %d26
  6763. + 12 [SP+224] %f25 %f24,%f25 %d24 %q24
  6764. + 11 [SP+216] %f23 %f22,%f23 %d22
  6765. + 10 [SP+208] %f21 %f20,%f21 %d20 %q20
  6766. + 9 [SP+200] %f19 %f18,%f19 %d18
  6767. + 8 [SP+192] %f17 %f16,%f17 %d16 %q16
  6768. + 7 [SP+184] %f15 %f14,%f15 %d14
  6769. + 6 [SP+176] %f13 %f12,%f13 %d12 %q12
  6770. + 5 [SP+168] %o5 %f11 %f10,%f11 %d10
  6771. + 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
  6772. + 3 [SP+152] %o3 %f7 %f6,%f7 %d6
  6773. + 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
  6774. + 1 [SP+136] %o1 %f3 %f2,%f3 %d2
  6775. + 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
  6776. +
  6777. + Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
  6778. +
  6779. + Integral arguments are always passed as 64-bit quantities appropriately
  6780. + extended.
  6781. +
  6782. + Passing of floating point values is handled as follows.
  6783. + If a prototype is in scope:
  6784. + If the value is in a named argument (i.e. not a stdarg function or a
  6785. + value not part of the `...') then the value is passed in the appropriate
  6786. + fp reg.
  6787. + If the value is part of the `...' and is passed in one of the first 6
  6788. + slots then the value is passed in the appropriate int reg.
  6789. + If the value is part of the `...' and is not passed in one of the first 6
  6790. + slots then the value is passed in memory.
  6791. + If a prototype is not in scope:
  6792. + If the value is one of the first 6 arguments the value is passed in the
  6793. + appropriate integer reg and the appropriate fp reg.
  6794. + If the value is not one of the first 6 arguments the value is passed in
  6795. + the appropriate fp reg and in memory.
  6796. +
  6797. +
  6798. + Summary of the calling conventions implemented by GCC on the SPARC:
  6799. +
  6800. + 32-bit ABI:
  6801. + size argument return value
  6802. +
  6803. + small integer <4 int. reg. int. reg.
  6804. + word 4 int. reg. int. reg.
  6805. + double word 8 int. reg. int. reg.
  6806. +
  6807. + _Complex small integer <8 int. reg. int. reg.
  6808. + _Complex word 8 int. reg. int. reg.
  6809. + _Complex double word 16 memory int. reg.
  6810. +
  6811. + vector integer <=8 int. reg. FP reg.
  6812. + vector integer >8 memory memory
  6813. +
  6814. + float 4 int. reg. FP reg.
  6815. + double 8 int. reg. FP reg.
  6816. + long double 16 memory memory
  6817. +
  6818. + _Complex float 8 memory FP reg.
  6819. + _Complex double 16 memory FP reg.
  6820. + _Complex long double 32 memory FP reg.
  6821. +
  6822. + vector float any memory memory
  6823. +
  6824. + aggregate any memory memory
  6825. +
  6826. +
  6827. +
  6828. + 64-bit ABI:
  6829. + size argument return value
  6830. +
  6831. + small integer <8 int. reg. int. reg.
  6832. + word 8 int. reg. int. reg.
  6833. + double word 16 int. reg. int. reg.
  6834. +
  6835. + _Complex small integer <16 int. reg. int. reg.
  6836. + _Complex word 16 int. reg. int. reg.
  6837. + _Complex double word 32 memory int. reg.
  6838. +
  6839. + vector integer <=16 FP reg. FP reg.
  6840. + vector integer 16<s<=32 memory FP reg.
  6841. + vector integer >32 memory memory
  6842. +
  6843. + float 4 FP reg. FP reg.
  6844. + double 8 FP reg. FP reg.
  6845. + long double 16 FP reg. FP reg.
  6846. +
  6847. + _Complex float 8 FP reg. FP reg.
  6848. + _Complex double 16 FP reg. FP reg.
  6849. + _Complex long double 32 memory FP reg.
  6850. +
  6851. + vector float <=16 FP reg. FP reg.
  6852. + vector float 16<s<=32 memory FP reg.
  6853. + vector float >32 memory memory
  6854. +
  6855. + aggregate <=16 reg. reg.
  6856. + aggregate 16<s<=32 memory reg.
  6857. + aggregate >32 memory memory
  6858. +
  6859. +
  6860. +
  6861. +Note #1: complex floating-point types follow the extended SPARC ABIs as
  6862. +implemented by the Sun compiler.
  6863. +
  6864. +Note #2: integer vector types follow the scalar floating-point types
  6865. +conventions to match what is implemented by the Sun VIS SDK.
  6866. +
  6867. +Note #3: floating-point vector types follow the aggregate types
  6868. +conventions. */
  6869. +
  6870. +
  6871. +/* Maximum number of int regs for args. */
  6872. +#define SPARC_INT_ARG_MAX 6
  6873. +/* Maximum number of fp regs for args. */
  6874. +#define SPARC_FP_ARG_MAX 16
  6875. +/* Number of words (partially) occupied for a given size in units. */
  6876. +#define CEIL_NWORDS(SIZE) CEIL((SIZE), UNITS_PER_WORD)
  6877. +
  6878. +/* Handle the INIT_CUMULATIVE_ARGS macro.
  6879. + Initialize a variable CUM of type CUMULATIVE_ARGS
  6880. + for a call to a function whose data type is FNTYPE.
  6881. + For a library call, FNTYPE is 0. */
  6882. +
  6883. +void
  6884. +init_cumulative_args (struct sparc_args *cum, tree fntype, rtx, tree)
  6885. +{
  6886. + cum->words = 0;
  6887. + cum->prototype_p = fntype && prototype_p (fntype);
  6888. + cum->libcall_p = !fntype;
  6889. +}
  6890. +
  6891. +/* Handle promotion of pointer and integer arguments. */
  6892. +
  6893. +static machine_mode
  6894. +sparc_promote_function_mode (const_tree type, machine_mode mode,
  6895. + int *punsignedp, const_tree, int)
  6896. +{
  6897. + if (type && POINTER_TYPE_P (type))
  6898. + {
  6899. + *punsignedp = POINTERS_EXTEND_UNSIGNED;
  6900. + return Pmode;
  6901. + }
  6902. +
  6903. + /* Integral arguments are passed as full words, as per the ABI. */
  6904. + if (GET_MODE_CLASS (mode) == MODE_INT
  6905. + && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
  6906. + return word_mode;
  6907. +
  6908. + return mode;
  6909. +}
  6910. +
  6911. +/* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
  6912. +
  6913. +static bool
  6914. +sparc_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
  6915. +{
  6916. + return TARGET_ARCH64 ? true : false;
  6917. +}
  6918. +
  6919. +/* Handle the TARGET_PASS_BY_REFERENCE target hook.
  6920. + Specify whether to pass the argument by reference. */
  6921. +
  6922. +static bool
  6923. +sparc_pass_by_reference (cumulative_args_t, const function_arg_info &arg)
  6924. +{
  6925. + tree type = arg.type;
  6926. + machine_mode mode = arg.mode;
  6927. + if (TARGET_ARCH32)
  6928. + /* Original SPARC 32-bit ABI says that structures and unions,
  6929. + and quad-precision floats are passed by reference.
  6930. + All other base types are passed in registers.
  6931. +
  6932. + Extended ABI (as implemented by the Sun compiler) says that all
  6933. + complex floats are passed by reference. Pass complex integers
  6934. + in registers up to 8 bytes. More generally, enforce the 2-word
  6935. + cap for passing arguments in registers.
  6936. +
  6937. + Vector ABI (as implemented by the Sun VIS SDK) says that integer
  6938. + vectors are passed like floats of the same size, that is in
  6939. + registers up to 8 bytes. Pass all vector floats by reference
  6940. + like structure and unions. */
  6941. + return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
  6942. + || mode == SCmode
  6943. + /* Catch CDImode, TFmode, DCmode and TCmode. */
  6944. + || GET_MODE_SIZE (mode) > 8
  6945. + || (type
  6946. + && VECTOR_TYPE_P (type)
  6947. + && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
  6948. + else
  6949. + /* Original SPARC 64-bit ABI says that structures and unions
  6950. + smaller than 16 bytes are passed in registers, as well as
  6951. + all other base types.
  6952. +
  6953. + Extended ABI (as implemented by the Sun compiler) says that
  6954. + complex floats are passed in registers up to 16 bytes. Pass
  6955. + all complex integers in registers up to 16 bytes. More generally,
  6956. + enforce the 2-word cap for passing arguments in registers.
  6957. +
  6958. + Vector ABI (as implemented by the Sun VIS SDK) says that integer
  6959. + vectors are passed like floats of the same size, that is in
  6960. + registers (up to 16 bytes). Pass all vector floats like structure
  6961. + and unions. */
  6962. + return ((type
  6963. + && (AGGREGATE_TYPE_P (type) || VECTOR_TYPE_P (type))
  6964. + && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
  6965. + /* Catch CTImode and TCmode. */
  6966. + || GET_MODE_SIZE (mode) > 16);
  6967. +}
  6968. +
  6969. +/* Traverse the record TYPE recursively and call FUNC on its fields.
  6970. + NAMED is true if this is for a named parameter. DATA is passed
  6971. + to FUNC for each field. OFFSET is the starting position and
  6972. + PACKED is true if we are inside a packed record. */
  6973. +
  6974. +template <typename T, void Func (const_tree, int, bool, T*)>
  6975. +static void
  6976. +traverse_record_type (const_tree type, bool named, T *data,
  6977. + int offset = 0, bool packed = false)
  6978. +{
  6979. + /* The ABI obviously doesn't specify how packed structures are passed.
  6980. + These are passed in integer regs if possible, otherwise memory. */
  6981. + if (!packed)
  6982. + for (tree field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
  6983. + if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
  6984. + {
  6985. + packed = true;
  6986. + break;
  6987. + }
  6988. +
  6989. + /* Walk the real fields, but skip those with no size or a zero size.
  6990. + ??? Fields with variable offset are handled as having zero offset. */
  6991. + for (tree field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
  6992. + if (TREE_CODE (field) == FIELD_DECL)
  6993. + {
  6994. + if (!DECL_SIZE (field) || integer_zerop (DECL_SIZE (field)))
  6995. + continue;
  6996. +
  6997. + int bitpos = offset;
  6998. + if (TREE_CODE (DECL_FIELD_OFFSET (field)) == INTEGER_CST)
  6999. + bitpos += int_bit_position (field);
  7000. +
  7001. + tree field_type = TREE_TYPE (field);
  7002. + if (TREE_CODE (field_type) == RECORD_TYPE)
  7003. + traverse_record_type<T, Func> (field_type, named, data, bitpos,
  7004. + packed);
  7005. + else
  7006. + {
  7007. + const bool fp_type
  7008. + = FLOAT_TYPE_P (field_type) || VECTOR_TYPE_P (field_type);
  7009. + Func (field, bitpos, fp_type && named && !packed && TARGET_FPU,
  7010. + data);
  7011. + }
  7012. + }
  7013. +}
  7014. +
  7015. +/* Handle recursive register classifying for structure layout. */
  7016. +
  7017. +typedef struct
  7018. +{
  7019. + bool fp_regs; /* true if field eligible to FP registers. */
  7020. + bool fp_regs_in_first_word; /* true if such field in first word. */
  7021. +} classify_data_t;
  7022. +
  7023. +/* A subroutine of function_arg_slotno. Classify the field. */
  7024. +
  7025. +inline void
  7026. +classify_registers (const_tree, int bitpos, bool fp, classify_data_t *data)
  7027. +{
  7028. + if (fp)
  7029. + {
  7030. + data->fp_regs = true;
  7031. + if (bitpos < BITS_PER_WORD)
  7032. + data->fp_regs_in_first_word = true;
  7033. + }
  7034. +}
  7035. +
  7036. +/* Compute the slot number to pass an argument in.
  7037. + Return the slot number or -1 if passing on the stack.
  7038. +
  7039. + CUM is a variable of type CUMULATIVE_ARGS which gives info about
  7040. + the preceding args and about the function being called.
  7041. + MODE is the argument's machine mode.
  7042. + TYPE is the data type of the argument (as a tree).
  7043. + This is null for libcalls where that information may
  7044. + not be available.
  7045. + NAMED is nonzero if this argument is a named parameter
  7046. + (otherwise it is an extra parameter matching an ellipsis).
  7047. + INCOMING is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
  7048. + *PREGNO records the register number to use if scalar type.
  7049. + *PPADDING records the amount of padding needed in words. */
  7050. +
  7051. +static int
  7052. +function_arg_slotno (const struct sparc_args *cum, machine_mode mode,
  7053. + const_tree type, bool named, bool incoming,
  7054. + int *pregno, int *ppadding)
  7055. +{
  7056. + const int regbase
  7057. + = incoming ? SPARC_INCOMING_INT_ARG_FIRST : SPARC_OUTGOING_INT_ARG_FIRST;
  7058. + int slotno = cum->words, regno;
  7059. + enum mode_class mclass = GET_MODE_CLASS (mode);
  7060. +
  7061. + /* Silence warnings in the callers. */
  7062. + *pregno = -1;
  7063. + *ppadding = -1;
  7064. +
  7065. + if (type && TREE_ADDRESSABLE (type))
  7066. + return -1;
  7067. +
  7068. + /* In 64-bit mode, objects requiring 16-byte alignment get it. */
  7069. + if (TARGET_ARCH64
  7070. + && (type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode)) >= 128
  7071. + && (slotno & 1) != 0)
  7072. + {
  7073. + slotno++;
  7074. + *ppadding = 1;
  7075. + }
  7076. + else
  7077. + *ppadding = 0;
  7078. +
  7079. + /* Vector types deserve special treatment because they are polymorphic wrt
  7080. + their mode, depending upon whether VIS instructions are enabled. */
  7081. + if (type && VECTOR_TYPE_P (type))
  7082. + {
  7083. + if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
  7084. + {
  7085. + /* The SPARC port defines no floating-point vector modes. */
  7086. + gcc_assert (mode == BLKmode);
  7087. + }
  7088. + else
  7089. + {
  7090. + /* Integer vector types should either have a vector
  7091. + mode or an integral mode, because we are guaranteed
  7092. + by pass_by_reference that their size is not greater
  7093. + than 16 bytes and TImode is 16-byte wide. */
  7094. + gcc_assert (mode != BLKmode);
  7095. +
  7096. + /* Integer vectors are handled like floats as per
  7097. + the Sun VIS SDK. */
  7098. + mclass = MODE_FLOAT;
  7099. + }
  7100. + }
  7101. +
  7102. + switch (mclass)
  7103. + {
  7104. + case MODE_FLOAT:
  7105. + case MODE_COMPLEX_FLOAT:
  7106. + case MODE_VECTOR_INT:
  7107. + if (TARGET_ARCH64 && TARGET_FPU && named)
  7108. + {
  7109. + /* If all arg slots are filled, then must pass on stack. */
  7110. + if (slotno >= SPARC_FP_ARG_MAX)
  7111. + return -1;
  7112. +
  7113. + regno = SPARC_FP_ARG_FIRST + slotno * 2;
  7114. + /* Arguments filling only one single FP register are
  7115. + right-justified in the outer double FP register. */
  7116. + if (GET_MODE_SIZE (mode) <= 4)
  7117. + regno++;
  7118. + break;
  7119. + }
  7120. + /* fallthrough */
  7121. +
  7122. + case MODE_INT:
  7123. + case MODE_COMPLEX_INT:
  7124. + /* If all arg slots are filled, then must pass on stack. */
  7125. + if (slotno >= SPARC_INT_ARG_MAX)
  7126. + return -1;
  7127. +
  7128. + regno = regbase + slotno;
  7129. + break;
  7130. +
  7131. + case MODE_RANDOM:
  7132. + /* MODE is VOIDmode when generating the actual call. */
  7133. + if (mode == VOIDmode)
  7134. + return -1;
  7135. +
  7136. + if (TARGET_64BIT && TARGET_FPU && named
  7137. + && type
  7138. + && (TREE_CODE (type) == RECORD_TYPE || VECTOR_TYPE_P (type)))
  7139. + {
  7140. + /* If all arg slots are filled, then must pass on stack. */
  7141. + if (slotno >= SPARC_FP_ARG_MAX)
  7142. + return -1;
  7143. +
  7144. + if (TREE_CODE (type) == RECORD_TYPE)
  7145. + {
  7146. + classify_data_t data = { false, false };
  7147. + traverse_record_type<classify_data_t, classify_registers>
  7148. + (type, named, &data);
  7149. +
  7150. + if (data.fp_regs)
  7151. + {
  7152. + /* If all FP slots are filled except for the last one and
  7153. + there is no FP field in the first word, then must pass
  7154. + on stack. */
  7155. + if (slotno >= SPARC_FP_ARG_MAX - 1
  7156. + && !data.fp_regs_in_first_word)
  7157. + return -1;
  7158. + }
  7159. + else
  7160. + {
  7161. + /* If all int slots are filled, then must pass on stack. */
  7162. + if (slotno >= SPARC_INT_ARG_MAX)
  7163. + return -1;
  7164. + }
  7165. +
  7166. + /* PREGNO isn't set since both int and FP regs can be used. */
  7167. + return slotno;
  7168. + }
  7169. +
  7170. + regno = SPARC_FP_ARG_FIRST + slotno * 2;
  7171. + }
  7172. + else
  7173. + {
  7174. + /* If all arg slots are filled, then must pass on stack. */
  7175. + if (slotno >= SPARC_INT_ARG_MAX)
  7176. + return -1;
  7177. +
  7178. + regno = regbase + slotno;
  7179. + }
  7180. + break;
  7181. +
  7182. + default :
  7183. + gcc_unreachable ();
  7184. + }
  7185. +
  7186. + *pregno = regno;
  7187. + return slotno;
  7188. +}
  7189. +
  7190. +/* Handle recursive register counting/assigning for structure layout. */
  7191. +
  7192. +typedef struct
  7193. +{
  7194. + int slotno; /* slot number of the argument. */
  7195. + int regbase; /* regno of the base register. */
  7196. + int intoffset; /* offset of the first pending integer field. */
  7197. + int nregs; /* number of words passed in registers. */
  7198. + bool stack; /* true if part of the argument is on the stack. */
  7199. + rtx ret; /* return expression being built. */
  7200. +} assign_data_t;
  7201. +
  7202. +/* A subroutine of function_arg_record_value. Compute the number of integer
  7203. + registers to be assigned between PARMS->intoffset and BITPOS. Return
  7204. + true if at least one integer register is assigned or false otherwise. */
  7205. +
  7206. +static bool
  7207. +compute_int_layout (int bitpos, assign_data_t *data, int *pnregs)
  7208. +{
  7209. + if (data->intoffset < 0)
  7210. + return false;
  7211. +
  7212. + const int intoffset = data->intoffset;
  7213. + data->intoffset = -1;
  7214. +
  7215. + const int this_slotno = data->slotno + intoffset / BITS_PER_WORD;
  7216. + const unsigned int startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
  7217. + const unsigned int endbit = ROUND_UP (bitpos, BITS_PER_WORD);
  7218. + int nregs = (endbit - startbit) / BITS_PER_WORD;
  7219. +
  7220. + if (nregs > 0 && nregs > SPARC_INT_ARG_MAX - this_slotno)
  7221. + {
  7222. + nregs = SPARC_INT_ARG_MAX - this_slotno;
  7223. +
  7224. + /* We need to pass this field (partly) on the stack. */
  7225. + data->stack = 1;
  7226. + }
  7227. +
  7228. + if (nregs <= 0)
  7229. + return false;
  7230. +
  7231. + *pnregs = nregs;
  7232. + return true;
  7233. +}
  7234. +
  7235. +/* A subroutine of function_arg_record_value. Compute the number and the mode
  7236. + of the FP registers to be assigned for FIELD. Return true if at least one
  7237. + FP register is assigned or false otherwise. */
  7238. +
  7239. +static bool
  7240. +compute_fp_layout (const_tree field, int bitpos, assign_data_t *data,
  7241. + int *pnregs, machine_mode *pmode)
  7242. +{
  7243. + const int this_slotno = data->slotno + bitpos / BITS_PER_WORD;
  7244. + machine_mode mode = DECL_MODE (field);
  7245. + int nregs, nslots;
  7246. +
  7247. + /* Slots are counted as words while regs are counted as having the size of
  7248. + the (inner) mode. */
  7249. + if (VECTOR_TYPE_P (TREE_TYPE (field)) && mode == BLKmode)
  7250. + {
  7251. + mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
  7252. + nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
  7253. + }
  7254. + else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
  7255. + {
  7256. + mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
  7257. + nregs = 2;
  7258. + }
  7259. + else
  7260. + nregs = 1;
  7261. +
  7262. + nslots = CEIL_NWORDS (nregs * GET_MODE_SIZE (mode));
  7263. +
  7264. + if (nslots > SPARC_FP_ARG_MAX - this_slotno)
  7265. + {
  7266. + nslots = SPARC_FP_ARG_MAX - this_slotno;
  7267. + nregs = (nslots * UNITS_PER_WORD) / GET_MODE_SIZE (mode);
  7268. +
  7269. + /* We need to pass this field (partly) on the stack. */
  7270. + data->stack = 1;
  7271. +
  7272. + if (nregs <= 0)
  7273. + return false;
  7274. + }
  7275. +
  7276. + *pnregs = nregs;
  7277. + *pmode = mode;
  7278. + return true;
  7279. +}
  7280. +
  7281. +/* A subroutine of function_arg_record_value. Count the number of registers
  7282. + to be assigned for FIELD and between PARMS->intoffset and BITPOS. */
  7283. +
  7284. +inline void
  7285. +count_registers (const_tree field, int bitpos, bool fp, assign_data_t *data)
  7286. +{
  7287. + if (fp)
  7288. + {
  7289. + int nregs;
  7290. + machine_mode mode;
  7291. +
  7292. + if (compute_int_layout (bitpos, data, &nregs))
  7293. + data->nregs += nregs;
  7294. +
  7295. + if (compute_fp_layout (field, bitpos, data, &nregs, &mode))
  7296. + data->nregs += nregs;
  7297. + }
  7298. + else
  7299. + {
  7300. + if (data->intoffset < 0)
  7301. + data->intoffset = bitpos;
  7302. + }
  7303. +}
  7304. +
  7305. +/* A subroutine of function_arg_record_value. Assign the bits of the
  7306. + structure between PARMS->intoffset and BITPOS to integer registers. */
  7307. +
  7308. +static void
  7309. +assign_int_registers (int bitpos, assign_data_t *data)
  7310. +{
  7311. + int intoffset = data->intoffset;
  7312. + machine_mode mode;
  7313. + int nregs;
  7314. +
  7315. + if (!compute_int_layout (bitpos, data, &nregs))
  7316. + return;
  7317. +
  7318. + /* If this is the trailing part of a word, only load that much into
  7319. + the register. Otherwise load the whole register. Note that in
  7320. + the latter case we may pick up unwanted bits. It's not a problem
  7321. + at the moment but may wish to revisit. */
  7322. + if (intoffset % BITS_PER_WORD != 0)
  7323. + mode = smallest_int_mode_for_size (BITS_PER_WORD
  7324. + - intoffset % BITS_PER_WORD);
  7325. + else
  7326. + mode = word_mode;
  7327. +
  7328. + const int this_slotno = data->slotno + intoffset / BITS_PER_WORD;
  7329. + unsigned int regno = data->regbase + this_slotno;
  7330. + intoffset /= BITS_PER_UNIT;
  7331. +
  7332. + do
  7333. + {
  7334. + rtx reg = gen_rtx_REG (mode, regno);
  7335. + XVECEXP (data->ret, 0, data->stack + data->nregs)
  7336. + = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
  7337. + data->nregs += 1;
  7338. + mode = word_mode;
  7339. + regno += 1;
  7340. + intoffset = (intoffset | (UNITS_PER_WORD - 1)) + 1;
  7341. + }
  7342. + while (--nregs > 0);
  7343. +}
  7344. +
  7345. +/* A subroutine of function_arg_record_value. Assign FIELD at position
  7346. + BITPOS to FP registers. */
  7347. +
  7348. +static void
  7349. +assign_fp_registers (const_tree field, int bitpos, assign_data_t *data)
  7350. +{
  7351. + int nregs;
  7352. + machine_mode mode;
  7353. +
  7354. + if (!compute_fp_layout (field, bitpos, data, &nregs, &mode))
  7355. + return;
  7356. +
  7357. + const int this_slotno = data->slotno + bitpos / BITS_PER_WORD;
  7358. + int regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
  7359. + if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
  7360. + regno++;
  7361. + int pos = bitpos / BITS_PER_UNIT;
  7362. +
  7363. + do
  7364. + {
  7365. + rtx reg = gen_rtx_REG (mode, regno);
  7366. + XVECEXP (data->ret, 0, data->stack + data->nregs)
  7367. + = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
  7368. + data->nregs += 1;
  7369. + regno += GET_MODE_SIZE (mode) / 4;
  7370. + pos += GET_MODE_SIZE (mode);
  7371. + }
  7372. + while (--nregs > 0);
  7373. +}
  7374. +
  7375. +/* A subroutine of function_arg_record_value. Assign FIELD and the bits of
  7376. + the structure between PARMS->intoffset and BITPOS to registers. */
  7377. +
  7378. +inline void
  7379. +assign_registers (const_tree field, int bitpos, bool fp, assign_data_t *data)
  7380. +{
  7381. + if (fp)
  7382. + {
  7383. + assign_int_registers (bitpos, data);
  7384. +
  7385. + assign_fp_registers (field, bitpos, data);
  7386. + }
  7387. + else
  7388. + {
  7389. + if (data->intoffset < 0)
  7390. + data->intoffset = bitpos;
  7391. + }
  7392. +}
  7393. +
  7394. +/* Used by function_arg and function_value to implement the complex
  7395. + conventions of the 64-bit ABI for passing and returning structures.
  7396. + Return an expression valid as a return value for the FUNCTION_ARG
  7397. + and TARGET_FUNCTION_VALUE.
  7398. +
  7399. + TYPE is the data type of the argument (as a tree).
  7400. + This is null for libcalls where that information may
  7401. + not be available.
  7402. + MODE is the argument's machine mode.
  7403. + SLOTNO is the index number of the argument's slot in the parameter array.
  7404. + NAMED is true if this argument is a named parameter
  7405. + (otherwise it is an extra parameter matching an ellipsis).
  7406. + REGBASE is the regno of the base register for the parameter array. */
  7407. +
  7408. +static rtx
  7409. +function_arg_record_value (const_tree type, machine_mode mode,
  7410. + int slotno, bool named, int regbase)
  7411. +{
  7412. + const int size = int_size_in_bytes (type);
  7413. + assign_data_t data;
  7414. + int nregs;
  7415. +
  7416. + data.slotno = slotno;
  7417. + data.regbase = regbase;
  7418. +
  7419. + /* Count how many registers we need. */
  7420. + data.nregs = 0;
  7421. + data.intoffset = 0;
  7422. + data.stack = false;
  7423. + traverse_record_type<assign_data_t, count_registers> (type, named, &data);
  7424. +
  7425. + /* Take into account pending integer fields. */
  7426. + if (compute_int_layout (size * BITS_PER_UNIT, &data, &nregs))
  7427. + data.nregs += nregs;
  7428. +
  7429. + /* Allocate the vector and handle some annoying special cases. */
  7430. + nregs = data.nregs;
  7431. +
  7432. + if (nregs == 0)
  7433. + {
  7434. + /* ??? Empty structure has no value? Duh? */
  7435. + if (size <= 0)
  7436. + {
  7437. + /* Though there's nothing really to store, return a word register
  7438. + anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
  7439. + leads to breakage due to the fact that there are zero bytes to
  7440. + load. */
  7441. + return gen_rtx_REG (mode, regbase);
  7442. + }
  7443. +
  7444. + /* ??? C++ has structures with no fields, and yet a size. Give up
  7445. + for now and pass everything back in integer registers. */
  7446. + nregs = CEIL_NWORDS (size);
  7447. + if (nregs + slotno > SPARC_INT_ARG_MAX)
  7448. + nregs = SPARC_INT_ARG_MAX - slotno;
  7449. + }
  7450. +
  7451. + gcc_assert (nregs > 0);
  7452. +
  7453. + data.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (data.stack + nregs));
  7454. +
  7455. + /* If at least one field must be passed on the stack, generate
  7456. + (parallel [(expr_list (nil) ...) ...]) so that all fields will
  7457. + also be passed on the stack. We can't do much better because the
  7458. + semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
  7459. + of structures for which the fields passed exclusively in registers
  7460. + are not at the beginning of the structure. */
  7461. + if (data.stack)
  7462. + XVECEXP (data.ret, 0, 0)
  7463. + = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
  7464. +
  7465. + /* Assign the registers. */
  7466. + data.nregs = 0;
  7467. + data.intoffset = 0;
  7468. + traverse_record_type<assign_data_t, assign_registers> (type, named, &data);
  7469. +
  7470. + /* Assign pending integer fields. */
  7471. + assign_int_registers (size * BITS_PER_UNIT, &data);
  7472. +
  7473. + gcc_assert (data.nregs == nregs);
  7474. +
  7475. + return data.ret;
  7476. +}
  7477. +
  7478. +/* Used by function_arg and function_value to implement the conventions
  7479. + of the 64-bit ABI for passing and returning unions.
  7480. + Return an expression valid as a return value for the FUNCTION_ARG
  7481. + and TARGET_FUNCTION_VALUE.
  7482. +
  7483. + SIZE is the size in bytes of the union.
  7484. + MODE is the argument's machine mode.
  7485. + SLOTNO is the index number of the argument's slot in the parameter array.
  7486. + REGNO is the hard register the union will be passed in. */
  7487. +
  7488. +static rtx
  7489. +function_arg_union_value (int size, machine_mode mode, int slotno, int regno)
  7490. +{
  7491. + unsigned int nwords;
  7492. +
  7493. + /* See comment in function_arg_record_value for empty structures. */
  7494. + if (size <= 0)
  7495. + return gen_rtx_REG (mode, regno);
  7496. +
  7497. + if (slotno == SPARC_INT_ARG_MAX - 1)
  7498. + nwords = 1;
  7499. + else
  7500. + nwords = CEIL_NWORDS (size);
  7501. +
  7502. + rtx regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
  7503. +
  7504. + /* Unions are passed left-justified. */
  7505. + for (unsigned int i = 0; i < nwords; i++)
  7506. + XVECEXP (regs, 0, i)
  7507. + = gen_rtx_EXPR_LIST (VOIDmode,
  7508. + gen_rtx_REG (word_mode, regno + i),
  7509. + GEN_INT (UNITS_PER_WORD * i));
  7510. +
  7511. + return regs;
  7512. +}
  7513. +
  7514. +/* Used by function_arg and function_value to implement the conventions
  7515. + of the 64-bit ABI for passing and returning BLKmode vectors.
  7516. + Return an expression valid as a return value for the FUNCTION_ARG
  7517. + and TARGET_FUNCTION_VALUE.
  7518. +
  7519. + SIZE is the size in bytes of the vector.
  7520. + SLOTNO is the index number of the argument's slot in the parameter array.
  7521. + NAMED is true if this argument is a named parameter
  7522. + (otherwise it is an extra parameter matching an ellipsis).
  7523. + REGNO is the hard register the vector will be passed in. */
  7524. +
  7525. +static rtx
  7526. +function_arg_vector_value (int size, int slotno, bool named, int regno)
  7527. +{
  7528. + const int mult = (named ? 2 : 1);
  7529. + unsigned int nwords;
  7530. +
  7531. + if (slotno == (named ? SPARC_FP_ARG_MAX : SPARC_INT_ARG_MAX) - 1)
  7532. + nwords = 1;
  7533. + else
  7534. + nwords = CEIL_NWORDS (size);
  7535. +
  7536. + rtx regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nwords));
  7537. +
  7538. + if (size < UNITS_PER_WORD)
  7539. + XVECEXP (regs, 0, 0)
  7540. + = gen_rtx_EXPR_LIST (VOIDmode,
  7541. + gen_rtx_REG (SImode, regno),
  7542. + const0_rtx);
  7543. + else
  7544. + for (unsigned int i = 0; i < nwords; i++)
  7545. + XVECEXP (regs, 0, i)
  7546. + = gen_rtx_EXPR_LIST (VOIDmode,
  7547. + gen_rtx_REG (word_mode, regno + i * mult),
  7548. + GEN_INT (i * UNITS_PER_WORD));
  7549. +
  7550. + return regs;
  7551. +}
  7552. +
  7553. +/* Determine where to put an argument to a function.
  7554. + Value is zero to push the argument on the stack,
  7555. + or a hard register in which to store the argument.
  7556. +
  7557. + CUM is a variable of type CUMULATIVE_ARGS which gives info about
  7558. + the preceding args and about the function being called.
  7559. + ARG is a description of the argument.
  7560. + INCOMING_P is false for TARGET_FUNCTION_ARG, true for
  7561. + TARGET_FUNCTION_INCOMING_ARG. */
  7562. +
  7563. +static rtx
  7564. +sparc_function_arg_1 (cumulative_args_t cum_v, const function_arg_info &arg,
  7565. + bool incoming)
  7566. +{
  7567. + const CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
  7568. + const int regbase
  7569. + = incoming ? SPARC_INCOMING_INT_ARG_FIRST : SPARC_OUTGOING_INT_ARG_FIRST;
  7570. + int slotno, regno, padding;
  7571. + tree type = arg.type;
  7572. + machine_mode mode = arg.mode;
  7573. + enum mode_class mclass = GET_MODE_CLASS (mode);
  7574. + bool named = arg.named;
  7575. +
  7576. + slotno
  7577. + = function_arg_slotno (cum, mode, type, named, incoming, &regno, &padding);
  7578. + if (slotno == -1)
  7579. + return 0;
  7580. +
  7581. + /* Integer vectors are handled like floats as per the Sun VIS SDK. */
  7582. + if (type && VECTOR_INTEGER_TYPE_P (type))
  7583. + mclass = MODE_FLOAT;
  7584. +
  7585. + if (TARGET_ARCH32)
  7586. + return gen_rtx_REG (mode, regno);
  7587. +
  7588. + /* Structures up to 16 bytes in size are passed in arg slots on the stack
  7589. + and are promoted to registers if possible. */
  7590. + if (type && TREE_CODE (type) == RECORD_TYPE)
  7591. + {
  7592. + const int size = int_size_in_bytes (type);
  7593. + gcc_assert (size <= 16);
  7594. +
  7595. + return function_arg_record_value (type, mode, slotno, named, regbase);
  7596. + }
  7597. +
  7598. + /* Unions up to 16 bytes in size are passed in integer registers. */
  7599. + else if (type && TREE_CODE (type) == UNION_TYPE)
  7600. + {
  7601. + const int size = int_size_in_bytes (type);
  7602. + gcc_assert (size <= 16);
  7603. +
  7604. + return function_arg_union_value (size, mode, slotno, regno);
  7605. + }
  7606. +
  7607. + /* Floating-point vectors up to 16 bytes are passed in registers. */
  7608. + else if (type && VECTOR_TYPE_P (type) && mode == BLKmode)
  7609. + {
  7610. + const int size = int_size_in_bytes (type);
  7611. + gcc_assert (size <= 16);
  7612. +
  7613. + return function_arg_vector_value (size, slotno, named, regno);
  7614. + }
  7615. +
  7616. + /* v9 fp args in reg slots beyond the int reg slots get passed in regs
  7617. + but also have the slot allocated for them.
  7618. + If no prototype is in scope fp values in register slots get passed
  7619. + in two places, either fp regs and int regs or fp regs and memory. */
  7620. + else if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
  7621. + && SPARC_FP_REG_P (regno))
  7622. + {
  7623. + rtx reg = gen_rtx_REG (mode, regno);
  7624. + if (cum->prototype_p || cum->libcall_p)
  7625. + return reg;
  7626. + else
  7627. + {
  7628. + rtx v0, v1;
  7629. +
  7630. + if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
  7631. + {
  7632. + int intreg;
  7633. +
  7634. + /* On incoming, we don't need to know that the value
  7635. + is passed in %f0 and %i0, and it confuses other parts
  7636. + causing needless spillage even on the simplest cases. */
  7637. + if (incoming)
  7638. + return reg;
  7639. +
  7640. + intreg = (SPARC_OUTGOING_INT_ARG_FIRST
  7641. + + (regno - SPARC_FP_ARG_FIRST) / 2);
  7642. +
  7643. + v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
  7644. + v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
  7645. + const0_rtx);
  7646. + return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
  7647. + }
  7648. + else
  7649. + {
  7650. + v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
  7651. + v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
  7652. + return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
  7653. + }
  7654. + }
  7655. + }
  7656. +
  7657. + /* All other aggregate types are passed in an integer register in a mode
  7658. + corresponding to the size of the type. */
  7659. + else if (type && AGGREGATE_TYPE_P (type))
  7660. + {
  7661. + const int size = int_size_in_bytes (type);
  7662. + gcc_assert (size <= 16);
  7663. +
  7664. + mode = int_mode_for_size (size * BITS_PER_UNIT, 0).else_blk ();
  7665. + }
  7666. +
  7667. + return gen_rtx_REG (mode, regno);
  7668. +}
  7669. +
  7670. +/* Handle the TARGET_FUNCTION_ARG target hook. */
  7671. +
  7672. +static rtx
  7673. +sparc_function_arg (cumulative_args_t cum, const function_arg_info &arg)
  7674. +{
  7675. + return sparc_function_arg_1 (cum, arg, false);
  7676. +}
  7677. +
  7678. +/* Handle the TARGET_FUNCTION_INCOMING_ARG target hook. */
  7679. +
  7680. +static rtx
  7681. +sparc_function_incoming_arg (cumulative_args_t cum,
  7682. + const function_arg_info &arg)
  7683. +{
  7684. + return sparc_function_arg_1 (cum, arg, true);
  7685. +}
  7686. +
  7687. +/* For sparc64, objects requiring 16 byte alignment are passed that way. */
  7688. +
  7689. +static unsigned int
  7690. +sparc_function_arg_boundary (machine_mode mode, const_tree type)
  7691. +{
  7692. + return ((TARGET_ARCH64
  7693. + && (GET_MODE_ALIGNMENT (mode) == 128
  7694. + || (type && TYPE_ALIGN (type) == 128)))
  7695. + ? 128
  7696. + : PARM_BOUNDARY);
  7697. +}
  7698. +
  7699. +/* For an arg passed partly in registers and partly in memory,
  7700. + this is the number of bytes of registers used.
  7701. + For args passed entirely in registers or entirely in memory, zero.
  7702. +
  7703. + Any arg that starts in the first 6 regs but won't entirely fit in them
  7704. + needs partial registers on v8. On v9, structures with integer
  7705. + values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
  7706. + values that begin in the last fp reg [where "last fp reg" varies with the
  7707. + mode] will be split between that reg and memory. */
  7708. +
  7709. +static int
  7710. +sparc_arg_partial_bytes (cumulative_args_t cum, const function_arg_info &arg)
  7711. +{
  7712. + int slotno, regno, padding;
  7713. +
  7714. + /* We pass false for incoming here, it doesn't matter. */
  7715. + slotno = function_arg_slotno (get_cumulative_args (cum), arg.mode, arg.type,
  7716. + arg.named, false, &regno, &padding);
  7717. +
  7718. + if (slotno == -1)
  7719. + return 0;
  7720. +
  7721. + if (TARGET_ARCH32)
  7722. + {
  7723. + /* We are guaranteed by pass_by_reference that the size of the
  7724. + argument is not greater than 8 bytes, so we only need to return
  7725. + one word if the argument is partially passed in registers. */
  7726. + const int size = GET_MODE_SIZE (arg.mode);
  7727. +
  7728. + if (size > UNITS_PER_WORD && slotno == SPARC_INT_ARG_MAX - 1)
  7729. + return UNITS_PER_WORD;
  7730. + }
  7731. + else
  7732. + {
  7733. + /* We are guaranteed by pass_by_reference that the size of the
  7734. + argument is not greater than 16 bytes, so we only need to return
  7735. + one word if the argument is partially passed in registers. */
  7736. + if (arg.aggregate_type_p ())
  7737. + {
  7738. + const int size = int_size_in_bytes (arg.type);
  7739. +
  7740. + if (size > UNITS_PER_WORD
  7741. + && (slotno == SPARC_INT_ARG_MAX - 1
  7742. + || slotno == SPARC_FP_ARG_MAX - 1))
  7743. + return UNITS_PER_WORD;
  7744. + }
  7745. + else if (GET_MODE_CLASS (arg.mode) == MODE_COMPLEX_INT
  7746. + || ((GET_MODE_CLASS (arg.mode) == MODE_COMPLEX_FLOAT
  7747. + || (arg.type && VECTOR_TYPE_P (arg.type)))
  7748. + && !(TARGET_FPU && arg.named)))
  7749. + {
  7750. + const int size = (arg.type && VECTOR_FLOAT_TYPE_P (arg.type))
  7751. + ? int_size_in_bytes (arg.type)
  7752. + : GET_MODE_SIZE (arg.mode);
  7753. +
  7754. + if (size > UNITS_PER_WORD && slotno == SPARC_INT_ARG_MAX - 1)
  7755. + return UNITS_PER_WORD;
  7756. + }
  7757. + else if (GET_MODE_CLASS (arg.mode) == MODE_COMPLEX_FLOAT
  7758. + || (arg.type && VECTOR_TYPE_P (arg.type)))
  7759. + {
  7760. + const int size = (arg.type && VECTOR_FLOAT_TYPE_P (arg.type))
  7761. + ? int_size_in_bytes (arg.type)
  7762. + : GET_MODE_SIZE (arg.mode);
  7763. +
  7764. + if (size > UNITS_PER_WORD && slotno == SPARC_FP_ARG_MAX - 1)
  7765. + return UNITS_PER_WORD;
  7766. + }
  7767. + }
  7768. +
  7769. + return 0;
  7770. +}
  7771. +
  7772. +/* Handle the TARGET_FUNCTION_ARG_ADVANCE hook.
  7773. + Update the data in CUM to advance over argument ARG. */
  7774. +
  7775. +static void
  7776. +sparc_function_arg_advance (cumulative_args_t cum_v,
  7777. + const function_arg_info &arg)
  7778. +{
  7779. + CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
  7780. + tree type = arg.type;
  7781. + machine_mode mode = arg.mode;
  7782. + int regno, padding;
  7783. +
  7784. + /* We pass false for incoming here, it doesn't matter. */
  7785. + function_arg_slotno (cum, mode, type, arg.named, false, &regno, &padding);
  7786. +
  7787. + /* If argument requires leading padding, add it. */
  7788. + cum->words += padding;
  7789. +
  7790. + if (TARGET_ARCH32)
  7791. + cum->words += CEIL_NWORDS (GET_MODE_SIZE (mode));
  7792. + else
  7793. + {
  7794. + /* For types that can have BLKmode, get the size from the type. */
  7795. + if (type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
  7796. + {
  7797. + const int size = int_size_in_bytes (type);
  7798. +
  7799. + /* See comment in function_arg_record_value for empty structures. */
  7800. + if (size <= 0)
  7801. + cum->words++;
  7802. + else
  7803. + cum->words += CEIL_NWORDS (size);
  7804. + }
  7805. + else
  7806. + cum->words += CEIL_NWORDS (GET_MODE_SIZE (mode));
  7807. + }
  7808. +}
  7809. +
  7810. +/* Implement TARGET_FUNCTION_ARG_PADDING. For the 64-bit ABI structs
  7811. + are always stored left shifted in their argument slot. */
  7812. +
  7813. +static pad_direction
  7814. +sparc_function_arg_padding (machine_mode mode, const_tree type)
  7815. +{
  7816. + if (TARGET_ARCH64 && type && AGGREGATE_TYPE_P (type))
  7817. + return PAD_UPWARD;
  7818. +
  7819. + /* Fall back to the default. */
  7820. + return default_function_arg_padding (mode, type);
  7821. +}
  7822. +
  7823. +/* Handle the TARGET_RETURN_IN_MEMORY target hook.
  7824. + Specify whether to return the return value in memory. */
  7825. +
  7826. +static bool
  7827. +sparc_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
  7828. +{
  7829. + if (TARGET_ARCH32)
  7830. + /* Original SPARC 32-bit ABI says that structures and unions, and
  7831. + quad-precision floats are returned in memory. But note that the
  7832. + first part is implemented through -fpcc-struct-return being the
  7833. + default, so here we only implement -freg-struct-return instead.
  7834. + All other base types are returned in registers.
  7835. +
  7836. + Extended ABI (as implemented by the Sun compiler) says that
  7837. + all complex floats are returned in registers (8 FP registers
  7838. + at most for '_Complex long double'). Return all complex integers
  7839. + in registers (4 at most for '_Complex long long').
  7840. +
  7841. + Vector ABI (as implemented by the Sun VIS SDK) says that vector
  7842. + integers are returned like floats of the same size, that is in
  7843. + registers up to 8 bytes and in memory otherwise. Return all
  7844. + vector floats in memory like structure and unions; note that
  7845. + they always have BLKmode like the latter. */
  7846. + return (TYPE_MODE (type) == BLKmode
  7847. + || TYPE_MODE (type) == TFmode
  7848. + || (TREE_CODE (type) == VECTOR_TYPE
  7849. + && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
  7850. + else
  7851. + /* Original SPARC 64-bit ABI says that structures and unions
  7852. + smaller than 32 bytes are returned in registers, as well as
  7853. + all other base types.
  7854. +
  7855. + Extended ABI (as implemented by the Sun compiler) says that all
  7856. + complex floats are returned in registers (8 FP registers at most
  7857. + for '_Complex long double'). Return all complex integers in
  7858. + registers (4 at most for '_Complex TItype').
  7859. +
  7860. + Vector ABI (as implemented by the Sun VIS SDK) says that vector
  7861. + integers are returned like floats of the same size, that is in
  7862. + registers. Return all vector floats like structure and unions;
  7863. + note that they always have BLKmode like the latter. */
  7864. + return (TYPE_MODE (type) == BLKmode
  7865. + && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32);
  7866. +}
  7867. +
  7868. +/* Handle the TARGET_STRUCT_VALUE target hook.
  7869. + Return where to find the structure return value address. */
  7870. +
  7871. +static rtx
  7872. +sparc_struct_value_rtx (tree fndecl, int incoming)
  7873. +{
  7874. + if (TARGET_ARCH64)
  7875. + return NULL_RTX;
  7876. + else
  7877. + {
  7878. + rtx mem;
  7879. +
  7880. + if (incoming)
  7881. + mem = gen_frame_mem (Pmode, plus_constant (Pmode, frame_pointer_rtx,
  7882. + STRUCT_VALUE_OFFSET));
  7883. + else
  7884. + mem = gen_frame_mem (Pmode, plus_constant (Pmode, stack_pointer_rtx,
  7885. + STRUCT_VALUE_OFFSET));
  7886. +
  7887. + /* Only follow the SPARC ABI for fixed-size structure returns.
  7888. + Variable size structure returns are handled per the normal
  7889. + procedures in GCC. This is enabled by -mstd-struct-return */
  7890. + if (incoming == 2
  7891. + && sparc_std_struct_return
  7892. + && TYPE_SIZE_UNIT (TREE_TYPE (fndecl))
  7893. + && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl))) == INTEGER_CST)
  7894. + {
  7895. + /* We must check and adjust the return address, as it is optional
  7896. + as to whether the return object is really provided. */
  7897. + rtx ret_reg = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
  7898. + rtx scratch = gen_reg_rtx (SImode);
  7899. + rtx_code_label *endlab = gen_label_rtx ();
  7900. +
  7901. + /* Calculate the return object size. */
  7902. + tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl));
  7903. + rtx size_rtx = GEN_INT (TREE_INT_CST_LOW (size) & 0xfff);
  7904. + /* Construct a temporary return value. */
  7905. + rtx temp_val
  7906. + = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0);
  7907. +
  7908. + /* Implement SPARC 32-bit psABI callee return struct checking:
  7909. +
  7910. + Fetch the instruction where we will return to and see if
  7911. + it's an unimp instruction (the most significant 10 bits
  7912. + will be zero). */
  7913. + emit_move_insn (scratch, gen_rtx_MEM (SImode,
  7914. + plus_constant (Pmode,
  7915. + ret_reg, 8)));
  7916. + /* Assume the size is valid and pre-adjust. */
  7917. + emit_insn (gen_add3_insn (ret_reg, ret_reg, GEN_INT (4)));
  7918. + emit_cmp_and_jump_insns (scratch, size_rtx, EQ, const0_rtx, SImode,
  7919. + 0, endlab);
  7920. + emit_insn (gen_sub3_insn (ret_reg, ret_reg, GEN_INT (4)));
  7921. + /* Write the address of the memory pointed to by temp_val into
  7922. + the memory pointed to by mem. */
  7923. + emit_move_insn (mem, XEXP (temp_val, 0));
  7924. + emit_label (endlab);
  7925. + }
  7926. +
  7927. + return mem;
  7928. + }
  7929. +}
  7930. +
  7931. +/* Handle TARGET_FUNCTION_VALUE, and TARGET_LIBCALL_VALUE target hook.
  7932. + For v9, function return values are subject to the same rules as arguments,
  7933. + except that up to 32 bytes may be returned in registers. */
  7934. +
  7935. +static rtx
  7936. +sparc_function_value_1 (const_tree type, machine_mode mode, bool outgoing)
  7937. +{
  7938. + /* Beware that the two values are swapped here wrt function_arg. */
  7939. + const int regbase
  7940. + = outgoing ? SPARC_INCOMING_INT_ARG_FIRST : SPARC_OUTGOING_INT_ARG_FIRST;
  7941. + enum mode_class mclass = GET_MODE_CLASS (mode);
  7942. + int regno;
  7943. +
  7944. + /* Integer vectors are handled like floats as per the Sun VIS SDK.
  7945. + Note that integer vectors larger than 16 bytes have BLKmode so
  7946. + they need to be handled like floating-point vectors below. */
  7947. + if (type && VECTOR_INTEGER_TYPE_P (type) && mode != BLKmode)
  7948. + mclass = MODE_FLOAT;
  7949. +
  7950. + if (TARGET_ARCH64 && type)
  7951. + {
  7952. + /* Structures up to 32 bytes in size are returned in registers. */
  7953. + if (TREE_CODE (type) == RECORD_TYPE)
  7954. + {
  7955. + const int size = int_size_in_bytes (type);
  7956. + gcc_assert (size <= 32);
  7957. +
  7958. + return function_arg_record_value (type, mode, 0, true, regbase);
  7959. + }
  7960. +
  7961. + /* Unions up to 32 bytes in size are returned in integer registers. */
  7962. + else if (TREE_CODE (type) == UNION_TYPE)
  7963. + {
  7964. + const int size = int_size_in_bytes (type);
  7965. + gcc_assert (size <= 32);
  7966. +
  7967. + return function_arg_union_value (size, mode, 0, regbase);
  7968. + }
  7969. +
  7970. + /* Vectors up to 32 bytes are returned in FP registers. */
  7971. + else if (VECTOR_TYPE_P (type) && mode == BLKmode)
  7972. + {
  7973. + const int size = int_size_in_bytes (type);
  7974. + gcc_assert (size <= 32);
  7975. +
  7976. + return function_arg_vector_value (size, 0, true, SPARC_FP_ARG_FIRST);
  7977. + }
  7978. +
  7979. + /* Objects that require it are returned in FP registers. */
  7980. + else if (mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
  7981. + ;
  7982. +
  7983. + /* All other aggregate types are returned in an integer register in a
  7984. + mode corresponding to the size of the type. */
  7985. + else if (AGGREGATE_TYPE_P (type))
  7986. + {
  7987. + /* All other aggregate types are passed in an integer register
  7988. + in a mode corresponding to the size of the type. */
  7989. + const int size = int_size_in_bytes (type);
  7990. + gcc_assert (size <= 32);
  7991. +
  7992. + mode = int_mode_for_size (size * BITS_PER_UNIT, 0).else_blk ();
  7993. +
  7994. + /* ??? We probably should have made the same ABI change in
  7995. + 3.4.0 as the one we made for unions. The latter was
  7996. + required by the SCD though, while the former is not
  7997. + specified, so we favored compatibility and efficiency.
  7998. +
  7999. + Now we're stuck for aggregates larger than 16 bytes,
  8000. + because OImode vanished in the meantime. Let's not
  8001. + try to be unduly clever, and simply follow the ABI
  8002. + for unions in that case. */
  8003. + if (mode == BLKmode)
  8004. + return function_arg_union_value (size, mode, 0, regbase);
  8005. + else
  8006. + mclass = MODE_INT;
  8007. + }
  8008. +
  8009. + /* We should only have pointer and integer types at this point. This
  8010. + must match sparc_promote_function_mode. */
  8011. + else if (mclass == MODE_INT && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
  8012. + mode = word_mode;
  8013. + }
  8014. +
  8015. + /* We should only have pointer and integer types at this point, except with
  8016. + -freg-struct-return. This must match sparc_promote_function_mode. */
  8017. + else if (TARGET_ARCH32
  8018. + && !(type && AGGREGATE_TYPE_P (type))
  8019. + && mclass == MODE_INT
  8020. + && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
  8021. + mode = word_mode;
  8022. +
  8023. + if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT) && TARGET_FPU)
  8024. + regno = SPARC_FP_ARG_FIRST;
  8025. + else
  8026. + regno = regbase;
  8027. +
  8028. + return gen_rtx_REG (mode, regno);
  8029. +}
  8030. +
  8031. +/* Handle TARGET_FUNCTION_VALUE.
  8032. + On the SPARC, the value is found in the first "output" register, but the
  8033. + called function leaves it in the first "input" register. */
  8034. +
  8035. +static rtx
  8036. +sparc_function_value (const_tree valtype,
  8037. + const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
  8038. + bool outgoing)
  8039. +{
  8040. + return sparc_function_value_1 (valtype, TYPE_MODE (valtype), outgoing);
  8041. +}
  8042. +
  8043. +/* Handle TARGET_LIBCALL_VALUE. */
  8044. +
  8045. +static rtx
  8046. +sparc_libcall_value (machine_mode mode,
  8047. + const_rtx fun ATTRIBUTE_UNUSED)
  8048. +{
  8049. + return sparc_function_value_1 (NULL_TREE, mode, false);
  8050. +}
  8051. +
  8052. +/* Handle FUNCTION_VALUE_REGNO_P.
  8053. + On the SPARC, the first "output" reg is used for integer values, and the
  8054. + first floating point register is used for floating point values. */
  8055. +
  8056. +static bool
  8057. +sparc_function_value_regno_p (const unsigned int regno)
  8058. +{
  8059. + return (regno == 8 || (TARGET_FPU && regno == 32));
  8060. +}
  8061. +
  8062. +/* Do what is necessary for `va_start'. We look at the current function
  8063. + to determine if stdarg or varargs is used and return the address of
  8064. + the first unnamed parameter. */
  8065. +
  8066. +static rtx
  8067. +sparc_builtin_saveregs (void)
  8068. +{
  8069. + int first_reg = crtl->args.info.words;
  8070. + rtx address;
  8071. + int regno;
  8072. +
  8073. + for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
  8074. + emit_move_insn (gen_rtx_MEM (word_mode,
  8075. + gen_rtx_PLUS (Pmode,
  8076. + frame_pointer_rtx,
  8077. + GEN_INT (FIRST_PARM_OFFSET (0)
  8078. + + (UNITS_PER_WORD
  8079. + * regno)))),
  8080. + gen_rtx_REG (word_mode,
  8081. + SPARC_INCOMING_INT_ARG_FIRST + regno));
  8082. +
  8083. + address = gen_rtx_PLUS (Pmode,
  8084. + frame_pointer_rtx,
  8085. + GEN_INT (FIRST_PARM_OFFSET (0)
  8086. + + UNITS_PER_WORD * first_reg));
  8087. +
  8088. + return address;
  8089. +}
  8090. +
  8091. +/* Implement `va_start' for stdarg. */
  8092. +
  8093. +static void
  8094. +sparc_va_start (tree valist, rtx nextarg)
  8095. +{
  8096. + nextarg = expand_builtin_saveregs ();
  8097. + std_expand_builtin_va_start (valist, nextarg);
  8098. +}
  8099. +
  8100. +/* Implement `va_arg' for stdarg. */
  8101. +
  8102. +static tree
  8103. +sparc_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
  8104. + gimple_seq *post_p)
  8105. +{
  8106. + HOST_WIDE_INT size, rsize, align;
  8107. + tree addr, incr;
  8108. + bool indirect;
  8109. + tree ptrtype = build_pointer_type (type);
  8110. +
  8111. + if (pass_va_arg_by_reference (type))
  8112. + {
  8113. + indirect = true;
  8114. + size = rsize = UNITS_PER_WORD;
  8115. + align = 0;
  8116. + }
  8117. + else
  8118. + {
  8119. + indirect = false;
  8120. + size = int_size_in_bytes (type);
  8121. + rsize = ROUND_UP (size, UNITS_PER_WORD);
  8122. + align = 0;
  8123. +
  8124. + if (TARGET_ARCH64)
  8125. + {
  8126. + /* For SPARC64, objects requiring 16-byte alignment get it. */
  8127. + if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
  8128. + align = 2 * UNITS_PER_WORD;
  8129. +
  8130. + /* SPARC-V9 ABI states that structures up to 16 bytes in size
  8131. + are left-justified in their slots. */
  8132. + if (AGGREGATE_TYPE_P (type))
  8133. + {
  8134. + if (size == 0)
  8135. + size = rsize = UNITS_PER_WORD;
  8136. + else
  8137. + size = rsize;
  8138. + }
  8139. + }
  8140. + }
  8141. +
  8142. + incr = valist;
  8143. + if (align)
  8144. + {
  8145. + incr = fold_build_pointer_plus_hwi (incr, align - 1);
  8146. + incr = fold_convert (sizetype, incr);
  8147. + incr = fold_build2 (BIT_AND_EXPR, sizetype, incr,
  8148. + size_int (-align));
  8149. + incr = fold_convert (ptr_type_node, incr);
  8150. + }
  8151. +
  8152. + gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
  8153. + addr = incr;
  8154. +
  8155. + if (BYTES_BIG_ENDIAN && size < rsize)
  8156. + addr = fold_build_pointer_plus_hwi (incr, rsize - size);
  8157. +
  8158. + if (indirect)
  8159. + {
  8160. + addr = fold_convert (build_pointer_type (ptrtype), addr);
  8161. + addr = build_va_arg_indirect_ref (addr);
  8162. + }
  8163. +
  8164. + /* If the address isn't aligned properly for the type, we need a temporary.
  8165. + FIXME: This is inefficient, usually we can do this in registers. */
  8166. + else if (align == 0 && TYPE_ALIGN (type) > BITS_PER_WORD)
  8167. + {
  8168. + tree tmp = create_tmp_var (type, "va_arg_tmp");
  8169. + tree dest_addr = build_fold_addr_expr (tmp);
  8170. + tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
  8171. + 3, dest_addr, addr, size_int (rsize));
  8172. + TREE_ADDRESSABLE (tmp) = 1;
  8173. + gimplify_and_add (copy, pre_p);
  8174. + addr = dest_addr;
  8175. + }
  8176. +
  8177. + else
  8178. + addr = fold_convert (ptrtype, addr);
  8179. +
  8180. + incr = fold_build_pointer_plus_hwi (incr, rsize);
  8181. + gimplify_assign (valist, incr, post_p);
  8182. +
  8183. + return build_va_arg_indirect_ref (addr);
  8184. +}
  8185. +
  8186. +/* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
  8187. + Specify whether the vector mode is supported by the hardware. */
  8188. +
  8189. +static bool
  8190. +sparc_vector_mode_supported_p (machine_mode mode)
  8191. +{
  8192. + return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
  8193. +}
  8194. +
  8195. +/* Implement the TARGET_VECTORIZE_PREFERRED_SIMD_MODE target hook. */
  8196. +
  8197. +static machine_mode
  8198. +sparc_preferred_simd_mode (scalar_mode mode)
  8199. +{
  8200. + if (TARGET_VIS)
  8201. + switch (mode)
  8202. + {
  8203. + case E_SImode:
  8204. + return V2SImode;
  8205. + case E_HImode:
  8206. + return V4HImode;
  8207. + case E_QImode:
  8208. + return V8QImode;
  8209. +
  8210. + default:;
  8211. + }
  8212. +
  8213. + return word_mode;
  8214. +}
  8215. +
  8216. + /* Implement TARGET_CAN_FOLLOW_JUMP. */
  8217. +
  8218. +static bool
  8219. +sparc_can_follow_jump (const rtx_insn *follower, const rtx_insn *followee)
  8220. +{
  8221. + /* Do not fold unconditional jumps that have been created for crossing
  8222. + partition boundaries. */
  8223. + if (CROSSING_JUMP_P (followee) && !CROSSING_JUMP_P (follower))
  8224. + return false;
  8225. +
  8226. + return true;
  8227. +}
  8228. +
  8229. +/* Return the string to output an unconditional branch to LABEL, which is
  8230. + the operand number of the label.
  8231. +
  8232. + DEST is the destination insn (i.e. the label), INSN is the source. */
  8233. +
  8234. +const char *
  8235. +output_ubranch (rtx dest, rtx_insn *insn)
  8236. +{
  8237. + static char string[64];
  8238. + bool v9_form = false;
  8239. + int delta;
  8240. + char *p;
  8241. +
  8242. + /* Even if we are trying to use cbcond for this, evaluate
  8243. + whether we can use V9 branches as our backup plan. */
  8244. + delta = 5000000;
  8245. + if (!CROSSING_JUMP_P (insn) && INSN_ADDRESSES_SET_P ())
  8246. + delta = (INSN_ADDRESSES (INSN_UID (dest))
  8247. + - INSN_ADDRESSES (INSN_UID (insn)));
  8248. +
  8249. + /* Leave some instructions for "slop". */
  8250. + if (TARGET_V9 && delta >= -260000 && delta < 260000)
  8251. + v9_form = true;
  8252. +
  8253. + if (TARGET_CBCOND)
  8254. + {
  8255. + bool emit_nop = emit_cbcond_nop (insn);
  8256. + bool far = false;
  8257. + const char *rval;
  8258. +
  8259. + if (delta < -500 || delta > 500)
  8260. + far = true;
  8261. +
  8262. + if (far)
  8263. + {
  8264. + if (v9_form)
  8265. + rval = "ba,a,pt\t%%xcc, %l0";
  8266. + else
  8267. + rval = "b,a\t%l0";
  8268. + }
  8269. + else
  8270. + {
  8271. + if (emit_nop)
  8272. + rval = "cwbe\t%%g0, %%g0, %l0\n\tnop";
  8273. + else
  8274. + rval = "cwbe\t%%g0, %%g0, %l0";
  8275. + }
  8276. + return rval;
  8277. + }
  8278. +
  8279. + if (v9_form)
  8280. + strcpy (string, "ba%*,pt\t%%xcc, ");
  8281. + else
  8282. + strcpy (string, "b%*\t");
  8283. +
  8284. + p = strchr (string, '\0');
  8285. + *p++ = '%';
  8286. + *p++ = 'l';
  8287. + *p++ = '0';
  8288. + *p++ = '%';
  8289. + *p++ = '(';
  8290. + *p = '\0';
  8291. +
  8292. + return string;
  8293. +}
  8294. +
  8295. +/* Return the string to output a conditional branch to LABEL, which is
  8296. + the operand number of the label. OP is the conditional expression.
  8297. + XEXP (OP, 0) is assumed to be a condition code register (integer or
  8298. + floating point) and its mode specifies what kind of comparison we made.
  8299. +
  8300. + DEST is the destination insn (i.e. the label), INSN is the source.
  8301. +
  8302. + REVERSED is nonzero if we should reverse the sense of the comparison.
  8303. +
  8304. + ANNUL is nonzero if we should generate an annulling branch. */
  8305. +
  8306. +const char *
  8307. +output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
  8308. + rtx_insn *insn)
  8309. +{
  8310. + static char string[64];
  8311. + enum rtx_code code = GET_CODE (op);
  8312. + rtx cc_reg = XEXP (op, 0);
  8313. + machine_mode mode = GET_MODE (cc_reg);
  8314. + const char *labelno, *branch;
  8315. + int spaces = 8, far;
  8316. + char *p;
  8317. +
  8318. + /* v9 branches are limited to +-1MB. If it is too far away,
  8319. + change
  8320. +
  8321. + bne,pt %xcc, .LC30
  8322. +
  8323. + to
  8324. +
  8325. + be,pn %xcc, .+12
  8326. + nop
  8327. + ba .LC30
  8328. +
  8329. + and
  8330. +
  8331. + fbne,a,pn %fcc2, .LC29
  8332. +
  8333. + to
  8334. +
  8335. + fbe,pt %fcc2, .+16
  8336. + nop
  8337. + ba .LC29 */
  8338. +
  8339. + far = TARGET_V9 && (get_attr_length (insn) >= 3);
  8340. + if (reversed ^ far)
  8341. + {
  8342. + /* Reversal of FP compares takes care -- an ordered compare
  8343. + becomes an unordered compare and vice versa. */
  8344. + if (mode == CCFPmode || mode == CCFPEmode)
  8345. + code = reverse_condition_maybe_unordered (code);
  8346. + else
  8347. + code = reverse_condition (code);
  8348. + }
  8349. +
  8350. + /* Start by writing the branch condition. */
  8351. + if (mode == CCFPmode || mode == CCFPEmode)
  8352. + {
  8353. + switch (code)
  8354. + {
  8355. + case NE:
  8356. + branch = "fbne";
  8357. + break;
  8358. + case EQ:
  8359. + branch = "fbe";
  8360. + break;
  8361. + case GE:
  8362. + branch = "fbge";
  8363. + break;
  8364. + case GT:
  8365. + branch = "fbg";
  8366. + break;
  8367. + case LE:
  8368. + branch = "fble";
  8369. + break;
  8370. + case LT:
  8371. + branch = "fbl";
  8372. + break;
  8373. + case UNORDERED:
  8374. + branch = "fbu";
  8375. + break;
  8376. + case ORDERED:
  8377. + branch = "fbo";
  8378. + break;
  8379. + case UNGT:
  8380. + branch = "fbug";
  8381. + break;
  8382. + case UNLT:
  8383. + branch = "fbul";
  8384. + break;
  8385. + case UNEQ:
  8386. + branch = "fbue";
  8387. + break;
  8388. + case UNGE:
  8389. + branch = "fbuge";
  8390. + break;
  8391. + case UNLE:
  8392. + branch = "fbule";
  8393. + break;
  8394. + case LTGT:
  8395. + branch = "fblg";
  8396. + break;
  8397. + default:
  8398. + gcc_unreachable ();
  8399. + }
  8400. +
  8401. + /* ??? !v9: FP branches cannot be preceded by another floating point
  8402. + insn. Because there is currently no concept of pre-delay slots,
  8403. + we can fix this only by always emitting a nop before a floating
  8404. + point branch. */
  8405. +
  8406. + string[0] = '\0';
  8407. + if (! TARGET_V9)
  8408. + strcpy (string, "nop\n\t");
  8409. + strcat (string, branch);
  8410. + }
  8411. + else
  8412. + {
  8413. + switch (code)
  8414. + {
  8415. + case NE:
  8416. + if (mode == CCVmode || mode == CCXVmode)
  8417. + branch = "bvs";
  8418. + else
  8419. + branch = "bne";
  8420. + break;
  8421. + case EQ:
  8422. + if (mode == CCVmode || mode == CCXVmode)
  8423. + branch = "bvc";
  8424. + else
  8425. + branch = "be";
  8426. + break;
  8427. + case GE:
  8428. + if (mode == CCNZmode || mode == CCXNZmode)
  8429. + branch = "bpos";
  8430. + else
  8431. + branch = "bge";
  8432. + break;
  8433. + case GT:
  8434. + branch = "bg";
  8435. + break;
  8436. + case LE:
  8437. + branch = "ble";
  8438. + break;
  8439. + case LT:
  8440. + if (mode == CCNZmode || mode == CCXNZmode)
  8441. + branch = "bneg";
  8442. + else
  8443. + branch = "bl";
  8444. + break;
  8445. + case GEU:
  8446. + branch = "bgeu";
  8447. + break;
  8448. + case GTU:
  8449. + branch = "bgu";
  8450. + break;
  8451. + case LEU:
  8452. + branch = "bleu";
  8453. + break;
  8454. + case LTU:
  8455. + branch = "blu";
  8456. + break;
  8457. + default:
  8458. + gcc_unreachable ();
  8459. + }
  8460. + strcpy (string, branch);
  8461. + }
  8462. + spaces -= strlen (branch);
  8463. + p = strchr (string, '\0');
  8464. +
  8465. + /* Now add the annulling, the label, and a possible noop. */
  8466. + if (annul && ! far)
  8467. + {
  8468. + strcpy (p, ",a");
  8469. + p += 2;
  8470. + spaces -= 2;
  8471. + }
  8472. +
  8473. + if (TARGET_V9)
  8474. + {
  8475. + rtx note;
  8476. + int v8 = 0;
  8477. +
  8478. + if (! far && insn && INSN_ADDRESSES_SET_P ())
  8479. + {
  8480. + int delta = (INSN_ADDRESSES (INSN_UID (dest))
  8481. + - INSN_ADDRESSES (INSN_UID (insn)));
  8482. + /* Leave some instructions for "slop". */
  8483. + if (delta < -260000 || delta >= 260000)
  8484. + v8 = 1;
  8485. + }
  8486. +
  8487. + switch (mode)
  8488. + {
  8489. + case E_CCmode:
  8490. + case E_CCNZmode:
  8491. + case E_CCCmode:
  8492. + case E_CCVmode:
  8493. + labelno = "%%icc, ";
  8494. + if (v8)
  8495. + labelno = "";
  8496. + break;
  8497. + case E_CCXmode:
  8498. + case E_CCXNZmode:
  8499. + case E_CCXCmode:
  8500. + case E_CCXVmode:
  8501. + labelno = "%%xcc, ";
  8502. + gcc_assert (!v8);
  8503. + break;
  8504. + case E_CCFPmode:
  8505. + case E_CCFPEmode:
  8506. + {
  8507. + static char v9_fcc_labelno[] = "%%fccX, ";
  8508. + /* Set the char indicating the number of the fcc reg to use. */
  8509. + v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
  8510. + labelno = v9_fcc_labelno;
  8511. + if (v8)
  8512. + {
  8513. + gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
  8514. + labelno = "";
  8515. + }
  8516. + }
  8517. + break;
  8518. + default:
  8519. + gcc_unreachable ();
  8520. + }
  8521. +
  8522. + if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
  8523. + {
  8524. + strcpy (p,
  8525. + ((profile_probability::from_reg_br_prob_note (XINT (note, 0))
  8526. + >= profile_probability::even ()) ^ far)
  8527. + ? ",pt" : ",pn");
  8528. + p += 3;
  8529. + spaces -= 3;
  8530. + }
  8531. + }
  8532. + else
  8533. + labelno = "";
  8534. +
  8535. + if (spaces > 0)
  8536. + *p++ = '\t';
  8537. + else
  8538. + *p++ = ' ';
  8539. + strcpy (p, labelno);
  8540. + p = strchr (p, '\0');
  8541. + if (far)
  8542. + {
  8543. + strcpy (p, ".+12\n\t nop\n\tb\t");
  8544. + /* Skip the next insn if requested or
  8545. + if we know that it will be a nop. */
  8546. + if (annul || ! final_sequence)
  8547. + p[3] = '6';
  8548. + p += 14;
  8549. + }
  8550. + *p++ = '%';
  8551. + *p++ = 'l';
  8552. + *p++ = label + '0';
  8553. + *p++ = '%';
  8554. + *p++ = '#';
  8555. + *p = '\0';
  8556. +
  8557. + return string;
  8558. +}
  8559. +
  8560. +/* Emit a library call comparison between floating point X and Y.
  8561. + COMPARISON is the operator to compare with (EQ, NE, GT, etc).
  8562. + Return the new operator to be used in the comparison sequence.
  8563. +
  8564. + TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
  8565. + values as arguments instead of the TFmode registers themselves,
  8566. + that's why we cannot call emit_float_lib_cmp. */
  8567. +
  8568. +rtx
  8569. +sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
  8570. +{
  8571. + const char *qpfunc;
  8572. + rtx slot0, slot1, result, tem, tem2, libfunc;
  8573. + machine_mode mode;
  8574. + enum rtx_code new_comparison;
  8575. +
  8576. + switch (comparison)
  8577. + {
  8578. + case EQ:
  8579. + qpfunc = (TARGET_ARCH64 ? "_Qp_feq" : "_Q_feq");
  8580. + break;
  8581. +
  8582. + case NE:
  8583. + qpfunc = (TARGET_ARCH64 ? "_Qp_fne" : "_Q_fne");
  8584. + break;
  8585. +
  8586. + case GT:
  8587. + qpfunc = (TARGET_ARCH64 ? "_Qp_fgt" : "_Q_fgt");
  8588. + break;
  8589. +
  8590. + case GE:
  8591. + qpfunc = (TARGET_ARCH64 ? "_Qp_fge" : "_Q_fge");
  8592. + break;
  8593. +
  8594. + case LT:
  8595. + qpfunc = (TARGET_ARCH64 ? "_Qp_flt" : "_Q_flt");
  8596. + break;
  8597. +
  8598. + case LE:
  8599. + qpfunc = (TARGET_ARCH64 ? "_Qp_fle" : "_Q_fle");
  8600. + break;
  8601. +
  8602. + case ORDERED:
  8603. + case UNORDERED:
  8604. + case UNGT:
  8605. + case UNLT:
  8606. + case UNEQ:
  8607. + case UNGE:
  8608. + case UNLE:
  8609. + case LTGT:
  8610. + qpfunc = (TARGET_ARCH64 ? "_Qp_cmp" : "_Q_cmp");
  8611. + break;
  8612. +
  8613. + default:
  8614. + gcc_unreachable ();
  8615. + }
  8616. +
  8617. + if (TARGET_ARCH64)
  8618. + {
  8619. + if (MEM_P (x))
  8620. + {
  8621. + tree expr = MEM_EXPR (x);
  8622. + if (expr)
  8623. + mark_addressable (expr);
  8624. + slot0 = x;
  8625. + }
  8626. + else
  8627. + {
  8628. + slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode));
  8629. + emit_move_insn (slot0, x);
  8630. + }
  8631. +
  8632. + if (MEM_P (y))
  8633. + {
  8634. + tree expr = MEM_EXPR (y);
  8635. + if (expr)
  8636. + mark_addressable (expr);
  8637. + slot1 = y;
  8638. + }
  8639. + else
  8640. + {
  8641. + slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode));
  8642. + emit_move_insn (slot1, y);
  8643. + }
  8644. +
  8645. + libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
  8646. + emit_library_call (libfunc, LCT_NORMAL,
  8647. + DImode,
  8648. + XEXP (slot0, 0), Pmode,
  8649. + XEXP (slot1, 0), Pmode);
  8650. + mode = DImode;
  8651. + }
  8652. + else
  8653. + {
  8654. + libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
  8655. + emit_library_call (libfunc, LCT_NORMAL,
  8656. + SImode,
  8657. + x, TFmode, y, TFmode);
  8658. + mode = SImode;
  8659. + }
  8660. +
  8661. +
  8662. + /* Immediately move the result of the libcall into a pseudo
  8663. + register so reload doesn't clobber the value if it needs
  8664. + the return register for a spill reg. */
  8665. + result = gen_reg_rtx (mode);
  8666. + emit_move_insn (result, hard_libcall_value (mode, libfunc));
  8667. +
  8668. + switch (comparison)
  8669. + {
  8670. + default:
  8671. + return gen_rtx_NE (VOIDmode, result, const0_rtx);
  8672. + case ORDERED:
  8673. + case UNORDERED:
  8674. + new_comparison = (comparison == UNORDERED ? EQ : NE);
  8675. + return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, GEN_INT(3));
  8676. + case UNGT:
  8677. + case UNGE:
  8678. + new_comparison = (comparison == UNGT ? GT : NE);
  8679. + return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, const1_rtx);
  8680. + case UNLE:
  8681. + return gen_rtx_NE (VOIDmode, result, const2_rtx);
  8682. + case UNLT:
  8683. + tem = gen_reg_rtx (mode);
  8684. + if (TARGET_ARCH32)
  8685. + emit_insn (gen_andsi3 (tem, result, const1_rtx));
  8686. + else
  8687. + emit_insn (gen_anddi3 (tem, result, const1_rtx));
  8688. + return gen_rtx_NE (VOIDmode, tem, const0_rtx);
  8689. + case UNEQ:
  8690. + case LTGT:
  8691. + tem = gen_reg_rtx (mode);
  8692. + if (TARGET_ARCH32)
  8693. + emit_insn (gen_addsi3 (tem, result, const1_rtx));
  8694. + else
  8695. + emit_insn (gen_adddi3 (tem, result, const1_rtx));
  8696. + tem2 = gen_reg_rtx (mode);
  8697. + if (TARGET_ARCH32)
  8698. + emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
  8699. + else
  8700. + emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
  8701. + new_comparison = (comparison == UNEQ ? EQ : NE);
  8702. + return gen_rtx_fmt_ee (new_comparison, VOIDmode, tem2, const0_rtx);
  8703. + }
  8704. +
  8705. + gcc_unreachable ();
  8706. +}
  8707. +
  8708. +/* Generate an unsigned DImode to FP conversion. This is the same code
  8709. + optabs would emit if we didn't have TFmode patterns. */
  8710. +
  8711. +void
  8712. +sparc_emit_floatunsdi (rtx *operands, machine_mode mode)
  8713. +{
  8714. + rtx i0, i1, f0, in, out;
  8715. +
  8716. + out = operands[0];
  8717. + in = force_reg (DImode, operands[1]);
  8718. + rtx_code_label *neglab = gen_label_rtx ();
  8719. + rtx_code_label *donelab = gen_label_rtx ();
  8720. + i0 = gen_reg_rtx (DImode);
  8721. + i1 = gen_reg_rtx (DImode);
  8722. + f0 = gen_reg_rtx (mode);
  8723. +
  8724. + emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
  8725. +
  8726. + emit_insn (gen_rtx_SET (out, gen_rtx_FLOAT (mode, in)));
  8727. + emit_jump_insn (gen_jump (donelab));
  8728. + emit_barrier ();
  8729. +
  8730. + emit_label (neglab);
  8731. +
  8732. + emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
  8733. + emit_insn (gen_anddi3 (i1, in, const1_rtx));
  8734. + emit_insn (gen_iordi3 (i0, i0, i1));
  8735. + emit_insn (gen_rtx_SET (f0, gen_rtx_FLOAT (mode, i0)));
  8736. + emit_insn (gen_rtx_SET (out, gen_rtx_PLUS (mode, f0, f0)));
  8737. +
  8738. + emit_label (donelab);
  8739. +}
  8740. +
  8741. +/* Generate an FP to unsigned DImode conversion. This is the same code
  8742. + optabs would emit if we didn't have TFmode patterns. */
  8743. +
  8744. +void
  8745. +sparc_emit_fixunsdi (rtx *operands, machine_mode mode)
  8746. +{
  8747. + rtx i0, i1, f0, in, out, limit;
  8748. +
  8749. + out = operands[0];
  8750. + in = force_reg (mode, operands[1]);
  8751. + rtx_code_label *neglab = gen_label_rtx ();
  8752. + rtx_code_label *donelab = gen_label_rtx ();
  8753. + i0 = gen_reg_rtx (DImode);
  8754. + i1 = gen_reg_rtx (DImode);
  8755. + limit = gen_reg_rtx (mode);
  8756. + f0 = gen_reg_rtx (mode);
  8757. +
  8758. + emit_move_insn (limit,
  8759. + const_double_from_real_value (
  8760. + REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
  8761. + emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
  8762. +
  8763. + emit_insn (gen_rtx_SET (out,
  8764. + gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
  8765. + emit_jump_insn (gen_jump (donelab));
  8766. + emit_barrier ();
  8767. +
  8768. + emit_label (neglab);
  8769. +
  8770. + emit_insn (gen_rtx_SET (f0, gen_rtx_MINUS (mode, in, limit)));
  8771. + emit_insn (gen_rtx_SET (i0,
  8772. + gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
  8773. + emit_insn (gen_movdi (i1, const1_rtx));
  8774. + emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
  8775. + emit_insn (gen_xordi3 (out, i0, i1));
  8776. +
  8777. + emit_label (donelab);
  8778. +}
  8779. +
  8780. +/* Return the string to output a compare and branch instruction to DEST.
  8781. + DEST is the destination insn (i.e. the label), INSN is the source,
  8782. + and OP is the conditional expression. */
  8783. +
  8784. +const char *
  8785. +output_cbcond (rtx op, rtx dest, rtx_insn *insn)
  8786. +{
  8787. + machine_mode mode = GET_MODE (XEXP (op, 0));
  8788. + enum rtx_code code = GET_CODE (op);
  8789. + const char *cond_str, *tmpl;
  8790. + int far, emit_nop, len;
  8791. + static char string[64];
  8792. + char size_char;
  8793. +
  8794. + /* Compare and Branch is limited to +-2KB. If it is too far away,
  8795. + change
  8796. +
  8797. + cxbne X, Y, .LC30
  8798. +
  8799. + to
  8800. +
  8801. + cxbe X, Y, .+16
  8802. + nop
  8803. + ba,pt xcc, .LC30
  8804. + nop */
  8805. +
  8806. + len = get_attr_length (insn);
  8807. +
  8808. + far = len == 4;
  8809. + emit_nop = len == 2;
  8810. +
  8811. + if (far)
  8812. + code = reverse_condition (code);
  8813. +
  8814. + size_char = ((mode == SImode) ? 'w' : 'x');
  8815. +
  8816. + switch (code)
  8817. + {
  8818. + case NE:
  8819. + cond_str = "ne";
  8820. + break;
  8821. +
  8822. + case EQ:
  8823. + cond_str = "e";
  8824. + break;
  8825. +
  8826. + case GE:
  8827. + cond_str = "ge";
  8828. + break;
  8829. +
  8830. + case GT:
  8831. + cond_str = "g";
  8832. + break;
  8833. +
  8834. + case LE:
  8835. + cond_str = "le";
  8836. + break;
  8837. +
  8838. + case LT:
  8839. + cond_str = "l";
  8840. + break;
  8841. +
  8842. + case GEU:
  8843. + cond_str = "cc";
  8844. + break;
  8845. +
  8846. + case GTU:
  8847. + cond_str = "gu";
  8848. + break;
  8849. +
  8850. + case LEU:
  8851. + cond_str = "leu";
  8852. + break;
  8853. +
  8854. + case LTU:
  8855. + cond_str = "cs";
  8856. + break;
  8857. +
  8858. + default:
  8859. + gcc_unreachable ();
  8860. + }
  8861. +
  8862. + if (far)
  8863. + {
  8864. + int veryfar = 1, delta;
  8865. +
  8866. + if (INSN_ADDRESSES_SET_P ())
  8867. + {
  8868. + delta = (INSN_ADDRESSES (INSN_UID (dest))
  8869. + - INSN_ADDRESSES (INSN_UID (insn)));
  8870. + /* Leave some instructions for "slop". */
  8871. + if (delta >= -260000 && delta < 260000)
  8872. + veryfar = 0;
  8873. + }
  8874. +
  8875. + if (veryfar)
  8876. + tmpl = "c%cb%s\t%%1, %%2, .+16\n\tnop\n\tb\t%%3\n\tnop";
  8877. + else
  8878. + tmpl = "c%cb%s\t%%1, %%2, .+16\n\tnop\n\tba,pt\t%%%%xcc, %%3\n\tnop";
  8879. + }
  8880. + else
  8881. + {
  8882. + if (emit_nop)
  8883. + tmpl = "c%cb%s\t%%1, %%2, %%3\n\tnop";
  8884. + else
  8885. + tmpl = "c%cb%s\t%%1, %%2, %%3";
  8886. + }
  8887. +
  8888. + snprintf (string, sizeof(string), tmpl, size_char, cond_str);
  8889. +
  8890. + return string;
  8891. +}
  8892. +
  8893. +/* Return the string to output a conditional branch to LABEL, testing
  8894. + register REG. LABEL is the operand number of the label; REG is the
  8895. + operand number of the reg. OP is the conditional expression. The mode
  8896. + of REG says what kind of comparison we made.
  8897. +
  8898. + DEST is the destination insn (i.e. the label), INSN is the source.
  8899. +
  8900. + REVERSED is nonzero if we should reverse the sense of the comparison.
  8901. +
  8902. + ANNUL is nonzero if we should generate an annulling branch. */
  8903. +
  8904. +const char *
  8905. +output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
  8906. + int annul, rtx_insn *insn)
  8907. +{
  8908. + static char string[64];
  8909. + enum rtx_code code = GET_CODE (op);
  8910. + machine_mode mode = GET_MODE (XEXP (op, 0));
  8911. + rtx note;
  8912. + int far;
  8913. + char *p;
  8914. +
  8915. + /* branch on register are limited to +-128KB. If it is too far away,
  8916. + change
  8917. +
  8918. + brnz,pt %g1, .LC30
  8919. +
  8920. + to
  8921. +
  8922. + brz,pn %g1, .+12
  8923. + nop
  8924. + ba,pt %xcc, .LC30
  8925. +
  8926. + and
  8927. +
  8928. + brgez,a,pn %o1, .LC29
  8929. +
  8930. + to
  8931. +
  8932. + brlz,pt %o1, .+16
  8933. + nop
  8934. + ba,pt %xcc, .LC29 */
  8935. +
  8936. + far = get_attr_length (insn) >= 3;
  8937. +
  8938. + /* If not floating-point or if EQ or NE, we can just reverse the code. */
  8939. + if (reversed ^ far)
  8940. + code = reverse_condition (code);
  8941. +
  8942. + /* Only 64-bit versions of these instructions exist. */
  8943. + gcc_assert (mode == DImode);
  8944. +
  8945. + /* Start by writing the branch condition. */
  8946. +
  8947. + switch (code)
  8948. + {
  8949. + case NE:
  8950. + strcpy (string, "brnz");
  8951. + break;
  8952. +
  8953. + case EQ:
  8954. + strcpy (string, "brz");
  8955. + break;
  8956. +
  8957. + case GE:
  8958. + strcpy (string, "brgez");
  8959. + break;
  8960. +
  8961. + case LT:
  8962. + strcpy (string, "brlz");
  8963. + break;
  8964. +
  8965. + case LE:
  8966. + strcpy (string, "brlez");
  8967. + break;
  8968. +
  8969. + case GT:
  8970. + strcpy (string, "brgz");
  8971. + break;
  8972. +
  8973. + default:
  8974. + gcc_unreachable ();
  8975. + }
  8976. +
  8977. + p = strchr (string, '\0');
  8978. +
  8979. + /* Now add the annulling, reg, label, and nop. */
  8980. + if (annul && ! far)
  8981. + {
  8982. + strcpy (p, ",a");
  8983. + p += 2;
  8984. + }
  8985. +
  8986. + if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
  8987. + {
  8988. + strcpy (p,
  8989. + ((profile_probability::from_reg_br_prob_note (XINT (note, 0))
  8990. + >= profile_probability::even ()) ^ far)
  8991. + ? ",pt" : ",pn");
  8992. + p += 3;
  8993. + }
  8994. +
  8995. + *p = p < string + 8 ? '\t' : ' ';
  8996. + p++;
  8997. + *p++ = '%';
  8998. + *p++ = '0' + reg;
  8999. + *p++ = ',';
  9000. + *p++ = ' ';
  9001. + if (far)
  9002. + {
  9003. + int veryfar = 1, delta;
  9004. +
  9005. + if (INSN_ADDRESSES_SET_P ())
  9006. + {
  9007. + delta = (INSN_ADDRESSES (INSN_UID (dest))
  9008. + - INSN_ADDRESSES (INSN_UID (insn)));
  9009. + /* Leave some instructions for "slop". */
  9010. + if (delta >= -260000 && delta < 260000)
  9011. + veryfar = 0;
  9012. + }
  9013. +
  9014. + strcpy (p, ".+12\n\t nop\n\t");
  9015. + /* Skip the next insn if requested or
  9016. + if we know that it will be a nop. */
  9017. + if (annul || ! final_sequence)
  9018. + p[3] = '6';
  9019. + p += 12;
  9020. + if (veryfar)
  9021. + {
  9022. + strcpy (p, "b\t");
  9023. + p += 2;
  9024. + }
  9025. + else
  9026. + {
  9027. + strcpy (p, "ba,pt\t%%xcc, ");
  9028. + p += 13;
  9029. + }
  9030. + }
  9031. + *p++ = '%';
  9032. + *p++ = 'l';
  9033. + *p++ = '0' + label;
  9034. + *p++ = '%';
  9035. + *p++ = '#';
  9036. + *p = '\0';
  9037. +
  9038. + return string;
  9039. +}
  9040. +
  9041. +/* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
  9042. + Such instructions cannot be used in the delay slot of return insn on v9.
  9043. + If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
  9044. + */
  9045. +
  9046. +static int
  9047. +epilogue_renumber (register rtx *where, int test)
  9048. +{
  9049. + register const char *fmt;
  9050. + register int i;
  9051. + register enum rtx_code code;
  9052. +
  9053. + if (*where == 0)
  9054. + return 0;
  9055. +
  9056. + code = GET_CODE (*where);
  9057. +
  9058. + switch (code)
  9059. + {
  9060. + case REG:
  9061. + if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
  9062. + return 1;
  9063. + if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
  9064. + *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
  9065. + /* fallthrough */
  9066. + case SCRATCH:
  9067. + case CC0:
  9068. + case PC:
  9069. + case CONST_INT:
  9070. + case CONST_WIDE_INT:
  9071. + case CONST_DOUBLE:
  9072. + return 0;
  9073. +
  9074. + /* Do not replace the frame pointer with the stack pointer because
  9075. + it can cause the delayed instruction to load below the stack.
  9076. + This occurs when instructions like:
  9077. +
  9078. + (set (reg/i:SI 24 %i0)
  9079. + (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
  9080. + (const_int -20 [0xffffffec])) 0))
  9081. +
  9082. + are in the return delayed slot. */
  9083. + case PLUS:
  9084. + if (GET_CODE (XEXP (*where, 0)) == REG
  9085. + && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
  9086. + && (GET_CODE (XEXP (*where, 1)) != CONST_INT
  9087. + || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
  9088. + return 1;
  9089. + break;
  9090. +
  9091. + case MEM:
  9092. + if (SPARC_STACK_BIAS
  9093. + && GET_CODE (XEXP (*where, 0)) == REG
  9094. + && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
  9095. + return 1;
  9096. + break;
  9097. +
  9098. + default:
  9099. + break;
  9100. + }
  9101. +
  9102. + fmt = GET_RTX_FORMAT (code);
  9103. +
  9104. + for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
  9105. + {
  9106. + if (fmt[i] == 'E')
  9107. + {
  9108. + register int j;
  9109. + for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
  9110. + if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
  9111. + return 1;
  9112. + }
  9113. + else if (fmt[i] == 'e'
  9114. + && epilogue_renumber (&(XEXP (*where, i)), test))
  9115. + return 1;
  9116. + }
  9117. + return 0;
  9118. +}
  9119. +
  9120. +/* Leaf functions and non-leaf functions have different needs. */
  9121. +
  9122. +static const int
  9123. +reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
  9124. +
  9125. +static const int
  9126. +reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
  9127. +
  9128. +static const int *const reg_alloc_orders[] = {
  9129. + reg_leaf_alloc_order,
  9130. + reg_nonleaf_alloc_order};
  9131. +
  9132. +void
  9133. +order_regs_for_local_alloc (void)
  9134. +{
  9135. + static int last_order_nonleaf = 1;
  9136. +
  9137. + if (df_regs_ever_live_p (15) != last_order_nonleaf)
  9138. + {
  9139. + last_order_nonleaf = !last_order_nonleaf;
  9140. + memcpy ((char *) reg_alloc_order,
  9141. + (const char *) reg_alloc_orders[last_order_nonleaf],
  9142. + FIRST_PSEUDO_REGISTER * sizeof (int));
  9143. + }
  9144. +}
  9145. +
  9146. +/* Return 1 if REG and MEM are legitimate enough to allow the various
  9147. + MEM<-->REG splits to be run. */
  9148. +
  9149. +int
  9150. +sparc_split_reg_mem_legitimate (rtx reg, rtx mem)
  9151. +{
  9152. + /* Punt if we are here by mistake. */
  9153. + gcc_assert (reload_completed);
  9154. +
  9155. + /* We must have an offsettable memory reference. */
  9156. + if (!offsettable_memref_p (mem))
  9157. + return 0;
  9158. +
  9159. + /* If we have legitimate args for ldd/std, we do not want
  9160. + the split to happen. */
  9161. + if ((REGNO (reg) % 2) == 0 && mem_min_alignment (mem, 8))
  9162. + return 0;
  9163. +
  9164. + /* Success. */
  9165. + return 1;
  9166. +}
  9167. +
  9168. +/* Split a REG <-- MEM move into a pair of moves in MODE. */
  9169. +
  9170. +void
  9171. +sparc_split_reg_mem (rtx dest, rtx src, machine_mode mode)
  9172. +{
  9173. + rtx high_part = gen_highpart (mode, dest);
  9174. + rtx low_part = gen_lowpart (mode, dest);
  9175. + rtx word0 = adjust_address (src, mode, 0);
  9176. + rtx word1 = adjust_address (src, mode, 4);
  9177. +
  9178. + if (reg_overlap_mentioned_p (high_part, word1))
  9179. + {
  9180. + emit_move_insn_1 (low_part, word1);
  9181. + emit_move_insn_1 (high_part, word0);
  9182. + }
  9183. + else
  9184. + {
  9185. + emit_move_insn_1 (high_part, word0);
  9186. + emit_move_insn_1 (low_part, word1);
  9187. + }
  9188. +}
  9189. +
  9190. +/* Split a MEM <-- REG move into a pair of moves in MODE. */
  9191. +
  9192. +void
  9193. +sparc_split_mem_reg (rtx dest, rtx src, machine_mode mode)
  9194. +{
  9195. + rtx word0 = adjust_address (dest, mode, 0);
  9196. + rtx word1 = adjust_address (dest, mode, 4);
  9197. + rtx high_part = gen_highpart (mode, src);
  9198. + rtx low_part = gen_lowpart (mode, src);
  9199. +
  9200. + emit_move_insn_1 (word0, high_part);
  9201. + emit_move_insn_1 (word1, low_part);
  9202. +}
  9203. +
  9204. +/* Like sparc_split_reg_mem_legitimate but for REG <--> REG moves. */
  9205. +
  9206. +int
  9207. +sparc_split_reg_reg_legitimate (rtx reg1, rtx reg2)
  9208. +{
  9209. + /* Punt if we are here by mistake. */
  9210. + gcc_assert (reload_completed);
  9211. +
  9212. + if (GET_CODE (reg1) == SUBREG)
  9213. + reg1 = SUBREG_REG (reg1);
  9214. + if (GET_CODE (reg1) != REG)
  9215. + return 0;
  9216. + const int regno1 = REGNO (reg1);
  9217. +
  9218. + if (GET_CODE (reg2) == SUBREG)
  9219. + reg2 = SUBREG_REG (reg2);
  9220. + if (GET_CODE (reg2) != REG)
  9221. + return 0;
  9222. + const int regno2 = REGNO (reg2);
  9223. +
  9224. + if (SPARC_INT_REG_P (regno1) && SPARC_INT_REG_P (regno2))
  9225. + return 1;
  9226. +
  9227. + if (TARGET_VIS3)
  9228. + {
  9229. + if ((SPARC_INT_REG_P (regno1) && SPARC_FP_REG_P (regno2))
  9230. + || (SPARC_FP_REG_P (regno1) && SPARC_INT_REG_P (regno2)))
  9231. + return 1;
  9232. + }
  9233. +
  9234. + return 0;
  9235. +}
  9236. +
  9237. +/* Split a REG <--> REG move into a pair of moves in MODE. */
  9238. +
  9239. +void
  9240. +sparc_split_reg_reg (rtx dest, rtx src, machine_mode mode)
  9241. +{
  9242. + rtx dest1 = gen_highpart (mode, dest);
  9243. + rtx dest2 = gen_lowpart (mode, dest);
  9244. + rtx src1 = gen_highpart (mode, src);
  9245. + rtx src2 = gen_lowpart (mode, src);
  9246. +
  9247. + /* Now emit using the real source and destination we found, swapping
  9248. + the order if we detect overlap. */
  9249. + if (reg_overlap_mentioned_p (dest1, src2))
  9250. + {
  9251. + emit_move_insn_1 (dest2, src2);
  9252. + emit_move_insn_1 (dest1, src1);
  9253. + }
  9254. + else
  9255. + {
  9256. + emit_move_insn_1 (dest1, src1);
  9257. + emit_move_insn_1 (dest2, src2);
  9258. + }
  9259. +}
  9260. +
  9261. +/* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
  9262. + This makes them candidates for using ldd and std insns.
  9263. +
  9264. + Note reg1 and reg2 *must* be hard registers. */
  9265. +
  9266. +int
  9267. +registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
  9268. +{
  9269. + /* We might have been passed a SUBREG. */
  9270. + if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
  9271. + return 0;
  9272. +
  9273. + if (REGNO (reg1) % 2 != 0)
  9274. + return 0;
  9275. +
  9276. + /* Integer ldd is deprecated in SPARC V9 */
  9277. + if (TARGET_V9 && SPARC_INT_REG_P (REGNO (reg1)))
  9278. + return 0;
  9279. +
  9280. + return (REGNO (reg1) == REGNO (reg2) - 1);
  9281. +}
  9282. +
  9283. +/* Return 1 if the addresses in mem1 and mem2 are suitable for use in
  9284. + an ldd or std insn.
  9285. +
  9286. + This can only happen when addr1 and addr2, the addresses in mem1
  9287. + and mem2, are consecutive memory locations (addr1 + 4 == addr2).
  9288. + addr1 must also be aligned on a 64-bit boundary.
  9289. +
  9290. + Also iff dependent_reg_rtx is not null it should not be used to
  9291. + compute the address for mem1, i.e. we cannot optimize a sequence
  9292. + like:
  9293. + ld [%o0], %o0
  9294. + ld [%o0 + 4], %o1
  9295. + to
  9296. + ldd [%o0], %o0
  9297. + nor:
  9298. + ld [%g3 + 4], %g3
  9299. + ld [%g3], %g2
  9300. + to
  9301. + ldd [%g3], %g2
  9302. +
  9303. + But, note that the transformation from:
  9304. + ld [%g2 + 4], %g3
  9305. + ld [%g2], %g2
  9306. + to
  9307. + ldd [%g2], %g2
  9308. + is perfectly fine. Thus, the peephole2 patterns always pass us
  9309. + the destination register of the first load, never the second one.
  9310. +
  9311. + For stores we don't have a similar problem, so dependent_reg_rtx is
  9312. + NULL_RTX. */
  9313. +
  9314. +int
  9315. +mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
  9316. +{
  9317. + rtx addr1, addr2;
  9318. + unsigned int reg1;
  9319. + HOST_WIDE_INT offset1;
  9320. +
  9321. + /* The mems cannot be volatile. */
  9322. + if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
  9323. + return 0;
  9324. +
  9325. + /* MEM1 should be aligned on a 64-bit boundary. */
  9326. + if (MEM_ALIGN (mem1) < 64)
  9327. + return 0;
  9328. +
  9329. + addr1 = XEXP (mem1, 0);
  9330. + addr2 = XEXP (mem2, 0);
  9331. +
  9332. + /* Extract a register number and offset (if used) from the first addr. */
  9333. + if (GET_CODE (addr1) == PLUS)
  9334. + {
  9335. + /* If not a REG, return zero. */
  9336. + if (GET_CODE (XEXP (addr1, 0)) != REG)
  9337. + return 0;
  9338. + else
  9339. + {
  9340. + reg1 = REGNO (XEXP (addr1, 0));
  9341. + /* The offset must be constant! */
  9342. + if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
  9343. + return 0;
  9344. + offset1 = INTVAL (XEXP (addr1, 1));
  9345. + }
  9346. + }
  9347. + else if (GET_CODE (addr1) != REG)
  9348. + return 0;
  9349. + else
  9350. + {
  9351. + reg1 = REGNO (addr1);
  9352. + /* This was a simple (mem (reg)) expression. Offset is 0. */
  9353. + offset1 = 0;
  9354. + }
  9355. +
  9356. + /* Make sure the second address is a (mem (plus (reg) (const_int). */
  9357. + if (GET_CODE (addr2) != PLUS)
  9358. + return 0;
  9359. +
  9360. + if (GET_CODE (XEXP (addr2, 0)) != REG
  9361. + || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
  9362. + return 0;
  9363. +
  9364. + if (reg1 != REGNO (XEXP (addr2, 0)))
  9365. + return 0;
  9366. +
  9367. + if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
  9368. + return 0;
  9369. +
  9370. + /* The first offset must be evenly divisible by 8 to ensure the
  9371. + address is 64-bit aligned. */
  9372. + if (offset1 % 8 != 0)
  9373. + return 0;
  9374. +
  9375. + /* The offset for the second addr must be 4 more than the first addr. */
  9376. + if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
  9377. + return 0;
  9378. +
  9379. + /* All the tests passed. addr1 and addr2 are valid for ldd and std
  9380. + instructions. */
  9381. + return 1;
  9382. +}
  9383. +
  9384. +/* Return the widened memory access made of MEM1 and MEM2 in MODE. */
  9385. +
  9386. +rtx
  9387. +widen_mem_for_ldd_peep (rtx mem1, rtx mem2, machine_mode mode)
  9388. +{
  9389. + rtx x = widen_memory_access (mem1, mode, 0);
  9390. + MEM_NOTRAP_P (x) = MEM_NOTRAP_P (mem1) && MEM_NOTRAP_P (mem2);
  9391. + return x;
  9392. +}
  9393. +
  9394. +/* Return 1 if reg is a pseudo, or is the first register in
  9395. + a hard register pair. This makes it suitable for use in
  9396. + ldd and std insns. */
  9397. +
  9398. +int
  9399. +register_ok_for_ldd (rtx reg)
  9400. +{
  9401. + /* We might have been passed a SUBREG. */
  9402. + if (!REG_P (reg))
  9403. + return 0;
  9404. +
  9405. + if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
  9406. + return (REGNO (reg) % 2 == 0);
  9407. +
  9408. + return 1;
  9409. +}
  9410. +
  9411. +/* Return 1 if OP, a MEM, has an address which is known to be
  9412. + aligned to an 8-byte boundary. */
  9413. +
  9414. +int
  9415. +memory_ok_for_ldd (rtx op)
  9416. +{
  9417. + /* In 64-bit mode, we assume that the address is word-aligned. */
  9418. + if (TARGET_ARCH32 && !mem_min_alignment (op, 8))
  9419. + return 0;
  9420. +
  9421. + if (! can_create_pseudo_p ()
  9422. + && !strict_memory_address_p (Pmode, XEXP (op, 0)))
  9423. + return 0;
  9424. +
  9425. + return 1;
  9426. +}
  9427. +
  9428. +/* Implement TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
  9429. +
  9430. +static bool
  9431. +sparc_print_operand_punct_valid_p (unsigned char code)
  9432. +{
  9433. + if (code == '#'
  9434. + || code == '*'
  9435. + || code == '('
  9436. + || code == ')'
  9437. + || code == '_'
  9438. + || code == '&')
  9439. + return true;
  9440. +
  9441. + return false;
  9442. +}
  9443. +
  9444. +/* Implement TARGET_PRINT_OPERAND.
  9445. + Print operand X (an rtx) in assembler syntax to file FILE.
  9446. + CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
  9447. + For `%' followed by punctuation, CODE is the punctuation and X is null. */
  9448. +
  9449. +static void
  9450. +sparc_print_operand (FILE *file, rtx x, int code)
  9451. +{
  9452. + const char *s;
  9453. +
  9454. + switch (code)
  9455. + {
  9456. + case '#':
  9457. + /* Output an insn in a delay slot. */
  9458. + if (final_sequence)
  9459. + sparc_indent_opcode = 1;
  9460. + else
  9461. + fputs ("\n\t nop", file);
  9462. + return;
  9463. + case '*':
  9464. + /* Output an annul flag if there's nothing for the delay slot and we
  9465. + are optimizing. This is always used with '(' below.
  9466. + Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
  9467. + this is a dbx bug. So, we only do this when optimizing.
  9468. + On UltraSPARC, a branch in a delay slot causes a pipeline flush.
  9469. + Always emit a nop in case the next instruction is a branch. */
  9470. + if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
  9471. + fputs (",a", file);
  9472. + return;
  9473. + case '(':
  9474. + /* Output a 'nop' if there's nothing for the delay slot and we are
  9475. + not optimizing. This is always used with '*' above. */
  9476. + if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
  9477. + fputs ("\n\t nop", file);
  9478. + else if (final_sequence)
  9479. + sparc_indent_opcode = 1;
  9480. + return;
  9481. + case ')':
  9482. + /* Output the right displacement from the saved PC on function return.
  9483. + The caller may have placed an "unimp" insn immediately after the call
  9484. + so we have to account for it. This insn is used in the 32-bit ABI
  9485. + when calling a function that returns a non zero-sized structure. The
  9486. + 64-bit ABI doesn't have it. Be careful to have this test be the same
  9487. + as that for the call. The exception is when sparc_std_struct_return
  9488. + is enabled, the psABI is followed exactly and the adjustment is made
  9489. + by the code in sparc_struct_value_rtx. The call emitted is the same
  9490. + when sparc_std_struct_return is enabled. */
  9491. + if (!TARGET_ARCH64
  9492. + && cfun->returns_struct
  9493. + && !sparc_std_struct_return
  9494. + && DECL_SIZE (DECL_RESULT (current_function_decl))
  9495. + && TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
  9496. + == INTEGER_CST
  9497. + && !integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
  9498. + fputs ("12", file);
  9499. + else
  9500. + fputc ('8', file);
  9501. + return;
  9502. + case '_':
  9503. + /* Output the Embedded Medium/Anywhere code model base register. */
  9504. + fputs (EMBMEDANY_BASE_REG, file);
  9505. + return;
  9506. + case '&':
  9507. + /* Print some local dynamic TLS name. */
  9508. + if (const char *name = get_some_local_dynamic_name ())
  9509. + assemble_name (file, name);
  9510. + else
  9511. + output_operand_lossage ("'%%&' used without any "
  9512. + "local dynamic TLS references");
  9513. + return;
  9514. +
  9515. + case 'Y':
  9516. + /* Adjust the operand to take into account a RESTORE operation. */
  9517. + if (GET_CODE (x) == CONST_INT)
  9518. + break;
  9519. + else if (GET_CODE (x) != REG)
  9520. + output_operand_lossage ("invalid %%Y operand");
  9521. + else if (REGNO (x) < 8)
  9522. + fputs (reg_names[REGNO (x)], file);
  9523. + else if (REGNO (x) >= 24 && REGNO (x) < 32)
  9524. + fputs (reg_names[REGNO (x)-16], file);
  9525. + else
  9526. + output_operand_lossage ("invalid %%Y operand");
  9527. + return;
  9528. + case 'L':
  9529. + /* Print out the low order register name of a register pair. */
  9530. + if (WORDS_BIG_ENDIAN)
  9531. + fputs (reg_names[REGNO (x)+1], file);
  9532. + else
  9533. + fputs (reg_names[REGNO (x)], file);
  9534. + return;
  9535. + case 'H':
  9536. + /* Print out the high order register name of a register pair. */
  9537. + if (WORDS_BIG_ENDIAN)
  9538. + fputs (reg_names[REGNO (x)], file);
  9539. + else
  9540. + fputs (reg_names[REGNO (x)+1], file);
  9541. + return;
  9542. + case 'R':
  9543. + /* Print out the second register name of a register pair or quad.
  9544. + I.e., R (%o0) => %o1. */
  9545. + fputs (reg_names[REGNO (x)+1], file);
  9546. + return;
  9547. + case 'S':
  9548. + /* Print out the third register name of a register quad.
  9549. + I.e., S (%o0) => %o2. */
  9550. + fputs (reg_names[REGNO (x)+2], file);
  9551. + return;
  9552. + case 'T':
  9553. + /* Print out the fourth register name of a register quad.
  9554. + I.e., T (%o0) => %o3. */
  9555. + fputs (reg_names[REGNO (x)+3], file);
  9556. + return;
  9557. + case 'x':
  9558. + /* Print a condition code register. */
  9559. + if (REGNO (x) == SPARC_ICC_REG)
  9560. + {
  9561. + switch (GET_MODE (x))
  9562. + {
  9563. + case E_CCmode:
  9564. + case E_CCNZmode:
  9565. + case E_CCCmode:
  9566. + case E_CCVmode:
  9567. + s = "%icc";
  9568. + break;
  9569. + case E_CCXmode:
  9570. + case E_CCXNZmode:
  9571. + case E_CCXCmode:
  9572. + case E_CCXVmode:
  9573. + s = "%xcc";
  9574. + break;
  9575. + default:
  9576. + gcc_unreachable ();
  9577. + }
  9578. + fputs (s, file);
  9579. + }
  9580. + else
  9581. + /* %fccN register */
  9582. + fputs (reg_names[REGNO (x)], file);
  9583. + return;
  9584. + case 'm':
  9585. + /* Print the operand's address only. */
  9586. + output_address (GET_MODE (x), XEXP (x, 0));
  9587. + return;
  9588. + case 'r':
  9589. + /* In this case we need a register. Use %g0 if the
  9590. + operand is const0_rtx. */
  9591. + if (x == const0_rtx
  9592. + || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
  9593. + {
  9594. + fputs ("%g0", file);
  9595. + return;
  9596. + }
  9597. + else
  9598. + break;
  9599. +
  9600. + case 'A':
  9601. + switch (GET_CODE (x))
  9602. + {
  9603. + case IOR:
  9604. + s = "or";
  9605. + break;
  9606. + case AND:
  9607. + s = "and";
  9608. + break;
  9609. + case XOR:
  9610. + s = "xor";
  9611. + break;
  9612. + default:
  9613. + output_operand_lossage ("invalid %%A operand");
  9614. + s = "";
  9615. + break;
  9616. + }
  9617. + fputs (s, file);
  9618. + return;
  9619. +
  9620. + case 'B':
  9621. + switch (GET_CODE (x))
  9622. + {
  9623. + case IOR:
  9624. + s = "orn";
  9625. + break;
  9626. + case AND:
  9627. + s = "andn";
  9628. + break;
  9629. + case XOR:
  9630. + s = "xnor";
  9631. + break;
  9632. + default:
  9633. + output_operand_lossage ("invalid %%B operand");
  9634. + s = "";
  9635. + break;
  9636. + }
  9637. + fputs (s, file);
  9638. + return;
  9639. +
  9640. + /* This is used by the conditional move instructions. */
  9641. + case 'C':
  9642. + {
  9643. + machine_mode mode = GET_MODE (XEXP (x, 0));
  9644. + switch (GET_CODE (x))
  9645. + {
  9646. + case NE:
  9647. + if (mode == CCVmode || mode == CCXVmode)
  9648. + s = "vs";
  9649. + else
  9650. + s = "ne";
  9651. + break;
  9652. + case EQ:
  9653. + if (mode == CCVmode || mode == CCXVmode)
  9654. + s = "vc";
  9655. + else
  9656. + s = "e";
  9657. + break;
  9658. + case GE:
  9659. + if (mode == CCNZmode || mode == CCXNZmode)
  9660. + s = "pos";
  9661. + else
  9662. + s = "ge";
  9663. + break;
  9664. + case GT:
  9665. + s = "g";
  9666. + break;
  9667. + case LE:
  9668. + s = "le";
  9669. + break;
  9670. + case LT:
  9671. + if (mode == CCNZmode || mode == CCXNZmode)
  9672. + s = "neg";
  9673. + else
  9674. + s = "l";
  9675. + break;
  9676. + case GEU:
  9677. + s = "geu";
  9678. + break;
  9679. + case GTU:
  9680. + s = "gu";
  9681. + break;
  9682. + case LEU:
  9683. + s = "leu";
  9684. + break;
  9685. + case LTU:
  9686. + s = "lu";
  9687. + break;
  9688. + case LTGT:
  9689. + s = "lg";
  9690. + break;
  9691. + case UNORDERED:
  9692. + s = "u";
  9693. + break;
  9694. + case ORDERED:
  9695. + s = "o";
  9696. + break;
  9697. + case UNLT:
  9698. + s = "ul";
  9699. + break;
  9700. + case UNLE:
  9701. + s = "ule";
  9702. + break;
  9703. + case UNGT:
  9704. + s = "ug";
  9705. + break;
  9706. + case UNGE:
  9707. + s = "uge"
  9708. + ; break;
  9709. + case UNEQ:
  9710. + s = "ue";
  9711. + break;
  9712. + default:
  9713. + output_operand_lossage ("invalid %%C operand");
  9714. + s = "";
  9715. + break;
  9716. + }
  9717. + fputs (s, file);
  9718. + return;
  9719. + }
  9720. +
  9721. + /* This are used by the movr instruction pattern. */
  9722. + case 'D':
  9723. + {
  9724. + switch (GET_CODE (x))
  9725. + {
  9726. + case NE:
  9727. + s = "ne";
  9728. + break;
  9729. + case EQ:
  9730. + s = "e";
  9731. + break;
  9732. + case GE:
  9733. + s = "gez";
  9734. + break;
  9735. + case LT:
  9736. + s = "lz";
  9737. + break;
  9738. + case LE:
  9739. + s = "lez";
  9740. + break;
  9741. + case GT:
  9742. + s = "gz";
  9743. + break;
  9744. + default:
  9745. + output_operand_lossage ("invalid %%D operand");
  9746. + s = "";
  9747. + break;
  9748. + }
  9749. + fputs (s, file);
  9750. + return;
  9751. + }
  9752. +
  9753. + case 'b':
  9754. + {
  9755. + /* Print a sign-extended character. */
  9756. + int i = trunc_int_for_mode (INTVAL (x), QImode);
  9757. + fprintf (file, "%d", i);
  9758. + return;
  9759. + }
  9760. +
  9761. + case 'f':
  9762. + /* Operand must be a MEM; write its address. */
  9763. + if (GET_CODE (x) != MEM)
  9764. + output_operand_lossage ("invalid %%f operand");
  9765. + output_address (GET_MODE (x), XEXP (x, 0));
  9766. + return;
  9767. +
  9768. + case 's':
  9769. + {
  9770. + /* Print a sign-extended 32-bit value. */
  9771. + HOST_WIDE_INT i;
  9772. + if (GET_CODE(x) == CONST_INT)
  9773. + i = INTVAL (x);
  9774. + else
  9775. + {
  9776. + output_operand_lossage ("invalid %%s operand");
  9777. + return;
  9778. + }
  9779. + i = trunc_int_for_mode (i, SImode);
  9780. + fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
  9781. + return;
  9782. + }
  9783. +
  9784. + case 0:
  9785. + /* Do nothing special. */
  9786. + break;
  9787. +
  9788. + default:
  9789. + /* Undocumented flag. */
  9790. + output_operand_lossage ("invalid operand output code");
  9791. + }
  9792. +
  9793. + if (GET_CODE (x) == REG)
  9794. + fputs (reg_names[REGNO (x)], file);
  9795. + else if (GET_CODE (x) == MEM)
  9796. + {
  9797. + fputc ('[', file);
  9798. + /* Poor Sun assembler doesn't understand absolute addressing. */
  9799. + if (CONSTANT_P (XEXP (x, 0)))
  9800. + fputs ("%g0+", file);
  9801. + output_address (GET_MODE (x), XEXP (x, 0));
  9802. + fputc (']', file);
  9803. + }
  9804. + else if (GET_CODE (x) == HIGH)
  9805. + {
  9806. + fputs ("%hi(", file);
  9807. + output_addr_const (file, XEXP (x, 0));
  9808. + fputc (')', file);
  9809. + }
  9810. + else if (GET_CODE (x) == LO_SUM)
  9811. + {
  9812. + sparc_print_operand (file, XEXP (x, 0), 0);
  9813. + if (TARGET_CM_MEDMID)
  9814. + fputs ("+%l44(", file);
  9815. + else
  9816. + fputs ("+%lo(", file);
  9817. + output_addr_const (file, XEXP (x, 1));
  9818. + fputc (')', file);
  9819. + }
  9820. + else if (GET_CODE (x) == CONST_DOUBLE)
  9821. + output_operand_lossage ("floating-point constant not a valid immediate operand");
  9822. + else
  9823. + output_addr_const (file, x);
  9824. +}
  9825. +
  9826. +/* Implement TARGET_PRINT_OPERAND_ADDRESS. */
  9827. +
  9828. +static void
  9829. +sparc_print_operand_address (FILE *file, machine_mode /*mode*/, rtx x)
  9830. +{
  9831. + register rtx base, index = 0;
  9832. + int offset = 0;
  9833. + register rtx addr = x;
  9834. +
  9835. + if (REG_P (addr))
  9836. + fputs (reg_names[REGNO (addr)], file);
  9837. + else if (GET_CODE (addr) == PLUS)
  9838. + {
  9839. + if (CONST_INT_P (XEXP (addr, 0)))
  9840. + offset = INTVAL (XEXP (addr, 0)), base = XEXP (addr, 1);
  9841. + else if (CONST_INT_P (XEXP (addr, 1)))
  9842. + offset = INTVAL (XEXP (addr, 1)), base = XEXP (addr, 0);
  9843. + else
  9844. + base = XEXP (addr, 0), index = XEXP (addr, 1);
  9845. + if (GET_CODE (base) == LO_SUM)
  9846. + {
  9847. + gcc_assert (USE_AS_OFFSETABLE_LO10
  9848. + && TARGET_ARCH64
  9849. + && ! TARGET_CM_MEDMID);
  9850. + output_operand (XEXP (base, 0), 0);
  9851. + fputs ("+%lo(", file);
  9852. + output_address (VOIDmode, XEXP (base, 1));
  9853. + fprintf (file, ")+%d", offset);
  9854. + }
  9855. + else
  9856. + {
  9857. + fputs (reg_names[REGNO (base)], file);
  9858. + if (index == 0)
  9859. + fprintf (file, "%+d", offset);
  9860. + else if (REG_P (index))
  9861. + fprintf (file, "+%s", reg_names[REGNO (index)]);
  9862. + else if (GET_CODE (index) == SYMBOL_REF
  9863. + || GET_CODE (index) == LABEL_REF
  9864. + || GET_CODE (index) == CONST)
  9865. + fputc ('+', file), output_addr_const (file, index);
  9866. + else gcc_unreachable ();
  9867. + }
  9868. + }
  9869. + else if (GET_CODE (addr) == MINUS
  9870. + && GET_CODE (XEXP (addr, 1)) == LABEL_REF)
  9871. + {
  9872. + output_addr_const (file, XEXP (addr, 0));
  9873. + fputs ("-(", file);
  9874. + output_addr_const (file, XEXP (addr, 1));
  9875. + fputs ("-.)", file);
  9876. + }
  9877. + else if (GET_CODE (addr) == LO_SUM)
  9878. + {
  9879. + output_operand (XEXP (addr, 0), 0);
  9880. + if (TARGET_CM_MEDMID)
  9881. + fputs ("+%l44(", file);
  9882. + else
  9883. + fputs ("+%lo(", file);
  9884. + output_address (VOIDmode, XEXP (addr, 1));
  9885. + fputc (')', file);
  9886. + }
  9887. + else if (flag_pic
  9888. + && GET_CODE (addr) == CONST
  9889. + && GET_CODE (XEXP (addr, 0)) == MINUS
  9890. + && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST
  9891. + && GET_CODE (XEXP (XEXP (XEXP (addr, 0), 1), 0)) == MINUS
  9892. + && XEXP (XEXP (XEXP (XEXP (addr, 0), 1), 0), 1) == pc_rtx)
  9893. + {
  9894. + addr = XEXP (addr, 0);
  9895. + output_addr_const (file, XEXP (addr, 0));
  9896. + /* Group the args of the second CONST in parenthesis. */
  9897. + fputs ("-(", file);
  9898. + /* Skip past the second CONST--it does nothing for us. */
  9899. + output_addr_const (file, XEXP (XEXP (addr, 1), 0));
  9900. + /* Close the parenthesis. */
  9901. + fputc (')', file);
  9902. + }
  9903. + else
  9904. + {
  9905. + output_addr_const (file, addr);
  9906. + }
  9907. +}
  9908. +
  9909. +/* Target hook for assembling integer objects. The sparc version has
  9910. + special handling for aligned DI-mode objects. */
  9911. +
  9912. +static bool
  9913. +sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
  9914. +{
  9915. + /* ??? We only output .xword's for symbols and only then in environments
  9916. + where the assembler can handle them. */
  9917. + if (aligned_p && size == 8 && GET_CODE (x) != CONST_INT)
  9918. + {
  9919. + if (TARGET_V9)
  9920. + {
  9921. + assemble_integer_with_op ("\t.xword\t", x);
  9922. + return true;
  9923. + }
  9924. + else
  9925. + {
  9926. + assemble_aligned_integer (4, const0_rtx);
  9927. + assemble_aligned_integer (4, x);
  9928. + return true;
  9929. + }
  9930. + }
  9931. + return default_assemble_integer (x, size, aligned_p);
  9932. +}
  9933. +
  9934. +/* Return the value of a code used in the .proc pseudo-op that says
  9935. + what kind of result this function returns. For non-C types, we pick
  9936. + the closest C type. */
  9937. +
  9938. +#ifndef SHORT_TYPE_SIZE
  9939. +#define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
  9940. +#endif
  9941. +
  9942. +#ifndef INT_TYPE_SIZE
  9943. +#define INT_TYPE_SIZE BITS_PER_WORD
  9944. +#endif
  9945. +
  9946. +#ifndef LONG_TYPE_SIZE
  9947. +#define LONG_TYPE_SIZE BITS_PER_WORD
  9948. +#endif
  9949. +
  9950. +#ifndef LONG_LONG_TYPE_SIZE
  9951. +#define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
  9952. +#endif
  9953. +
  9954. +#ifndef FLOAT_TYPE_SIZE
  9955. +#define FLOAT_TYPE_SIZE BITS_PER_WORD
  9956. +#endif
  9957. +
  9958. +#ifndef DOUBLE_TYPE_SIZE
  9959. +#define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
  9960. +#endif
  9961. +
  9962. +#ifndef LONG_DOUBLE_TYPE_SIZE
  9963. +#define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
  9964. +#endif
  9965. +
  9966. +unsigned long
  9967. +sparc_type_code (register tree type)
  9968. +{
  9969. + register unsigned long qualifiers = 0;
  9970. + register unsigned shift;
  9971. +
  9972. + /* Only the first 30 bits of the qualifier are valid. We must refrain from
  9973. + setting more, since some assemblers will give an error for this. Also,
  9974. + we must be careful to avoid shifts of 32 bits or more to avoid getting
  9975. + unpredictable results. */
  9976. +
  9977. + for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
  9978. + {
  9979. + switch (TREE_CODE (type))
  9980. + {
  9981. + case ERROR_MARK:
  9982. + return qualifiers;
  9983. +
  9984. + case ARRAY_TYPE:
  9985. + qualifiers |= (3 << shift);
  9986. + break;
  9987. +
  9988. + case FUNCTION_TYPE:
  9989. + case METHOD_TYPE:
  9990. + qualifiers |= (2 << shift);
  9991. + break;
  9992. +
  9993. + case POINTER_TYPE:
  9994. + case REFERENCE_TYPE:
  9995. + case OFFSET_TYPE:
  9996. + qualifiers |= (1 << shift);
  9997. + break;
  9998. +
  9999. + case RECORD_TYPE:
  10000. + return (qualifiers | 8);
  10001. +
  10002. + case UNION_TYPE:
  10003. + case QUAL_UNION_TYPE:
  10004. + return (qualifiers | 9);
  10005. +
  10006. + case ENUMERAL_TYPE:
  10007. + return (qualifiers | 10);
  10008. +
  10009. + case VOID_TYPE:
  10010. + return (qualifiers | 16);
  10011. +
  10012. + case INTEGER_TYPE:
  10013. + /* If this is a range type, consider it to be the underlying
  10014. + type. */
  10015. + if (TREE_TYPE (type) != 0)
  10016. + break;
  10017. +
  10018. + /* Carefully distinguish all the standard types of C,
  10019. + without messing up if the language is not C. We do this by
  10020. + testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
  10021. + look at both the names and the above fields, but that's redundant.
  10022. + Any type whose size is between two C types will be considered
  10023. + to be the wider of the two types. Also, we do not have a
  10024. + special code to use for "long long", so anything wider than
  10025. + long is treated the same. Note that we can't distinguish
  10026. + between "int" and "long" in this code if they are the same
  10027. + size, but that's fine, since neither can the assembler. */
  10028. +
  10029. + if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
  10030. + return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
  10031. +
  10032. + else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
  10033. + return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
  10034. +
  10035. + else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
  10036. + return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
  10037. +
  10038. + else
  10039. + return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
  10040. +
  10041. + case REAL_TYPE:
  10042. + /* If this is a range type, consider it to be the underlying
  10043. + type. */
  10044. + if (TREE_TYPE (type) != 0)
  10045. + break;
  10046. +
  10047. + /* Carefully distinguish all the standard types of C,
  10048. + without messing up if the language is not C. */
  10049. +
  10050. + if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
  10051. + return (qualifiers | 6);
  10052. +
  10053. + else
  10054. + return (qualifiers | 7);
  10055. +
  10056. + case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
  10057. + /* ??? We need to distinguish between double and float complex types,
  10058. + but I don't know how yet because I can't reach this code from
  10059. + existing front-ends. */
  10060. + return (qualifiers | 7); /* Who knows? */
  10061. +
  10062. + case VECTOR_TYPE:
  10063. + case BOOLEAN_TYPE: /* Boolean truth value type. */
  10064. + case LANG_TYPE:
  10065. + case NULLPTR_TYPE:
  10066. + return qualifiers;
  10067. +
  10068. + default:
  10069. + gcc_unreachable (); /* Not a type! */
  10070. + }
  10071. + }
  10072. +
  10073. + return qualifiers;
  10074. +}
  10075. +
  10076. +/* Nested function support. */
  10077. +
  10078. +/* Emit RTL insns to initialize the variable parts of a trampoline.
  10079. + FNADDR is an RTX for the address of the function's pure code.
  10080. + CXT is an RTX for the static chain value for the function.
  10081. +
  10082. + This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
  10083. + (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
  10084. + (to store insns). This is a bit excessive. Perhaps a different
  10085. + mechanism would be better here.
  10086. +
  10087. + Emit enough FLUSH insns to synchronize the data and instruction caches. */
  10088. +
  10089. +static void
  10090. +sparc32_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
  10091. +{
  10092. + /* SPARC 32-bit trampoline:
  10093. +
  10094. + sethi %hi(fn), %g1
  10095. + sethi %hi(static), %g2
  10096. + jmp %g1+%lo(fn)
  10097. + or %g2, %lo(static), %g2
  10098. +
  10099. + SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
  10100. + JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
  10101. + */
  10102. +
  10103. + emit_move_insn
  10104. + (adjust_address (m_tramp, SImode, 0),
  10105. + expand_binop (SImode, ior_optab,
  10106. + expand_shift (RSHIFT_EXPR, SImode, fnaddr, 10, 0, 1),
  10107. + GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
  10108. + NULL_RTX, 1, OPTAB_DIRECT));
  10109. +
  10110. + emit_move_insn
  10111. + (adjust_address (m_tramp, SImode, 4),
  10112. + expand_binop (SImode, ior_optab,
  10113. + expand_shift (RSHIFT_EXPR, SImode, cxt, 10, 0, 1),
  10114. + GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
  10115. + NULL_RTX, 1, OPTAB_DIRECT));
  10116. +
  10117. + emit_move_insn
  10118. + (adjust_address (m_tramp, SImode, 8),
  10119. + expand_binop (SImode, ior_optab,
  10120. + expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
  10121. + GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
  10122. + NULL_RTX, 1, OPTAB_DIRECT));
  10123. +
  10124. + emit_move_insn
  10125. + (adjust_address (m_tramp, SImode, 12),
  10126. + expand_binop (SImode, ior_optab,
  10127. + expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
  10128. + GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
  10129. + NULL_RTX, 1, OPTAB_DIRECT));
  10130. +
  10131. + /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
  10132. + aligned on a 16 byte boundary so one flush clears it all. */
  10133. + emit_insn (gen_flushsi (validize_mem (adjust_address (m_tramp, SImode, 0))));
  10134. + if (sparc_cpu != PROCESSOR_ULTRASPARC
  10135. + && sparc_cpu != PROCESSOR_ULTRASPARC3
  10136. + && sparc_cpu != PROCESSOR_NIAGARA
  10137. + && sparc_cpu != PROCESSOR_NIAGARA2
  10138. + && sparc_cpu != PROCESSOR_NIAGARA3
  10139. + && sparc_cpu != PROCESSOR_NIAGARA4
  10140. + && sparc_cpu != PROCESSOR_NIAGARA7
  10141. + && sparc_cpu != PROCESSOR_M8)
  10142. + emit_insn (gen_flushsi (validize_mem (adjust_address (m_tramp, SImode, 8))));
  10143. +
  10144. + /* Call __enable_execute_stack after writing onto the stack to make sure
  10145. + the stack address is accessible. */
  10146. +#ifdef HAVE_ENABLE_EXECUTE_STACK
  10147. + emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
  10148. + LCT_NORMAL, VOIDmode, XEXP (m_tramp, 0), Pmode);
  10149. +#endif
  10150. +
  10151. +}
  10152. +
  10153. +/* The 64-bit version is simpler because it makes more sense to load the
  10154. + values as "immediate" data out of the trampoline. It's also easier since
  10155. + we can read the PC without clobbering a register. */
  10156. +
  10157. +static void
  10158. +sparc64_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
  10159. +{
  10160. + /* SPARC 64-bit trampoline:
  10161. +
  10162. + rd %pc, %g1
  10163. + ldx [%g1+24], %g5
  10164. + jmp %g5
  10165. + ldx [%g1+16], %g5
  10166. + +16 bytes data
  10167. + */
  10168. +
  10169. + emit_move_insn (adjust_address (m_tramp, SImode, 0),
  10170. + GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
  10171. + emit_move_insn (adjust_address (m_tramp, SImode, 4),
  10172. + GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
  10173. + emit_move_insn (adjust_address (m_tramp, SImode, 8),
  10174. + GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
  10175. + emit_move_insn (adjust_address (m_tramp, SImode, 12),
  10176. + GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
  10177. + emit_move_insn (adjust_address (m_tramp, DImode, 16), cxt);
  10178. + emit_move_insn (adjust_address (m_tramp, DImode, 24), fnaddr);
  10179. + emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 0))));
  10180. +
  10181. + if (sparc_cpu != PROCESSOR_ULTRASPARC
  10182. + && sparc_cpu != PROCESSOR_ULTRASPARC3
  10183. + && sparc_cpu != PROCESSOR_NIAGARA
  10184. + && sparc_cpu != PROCESSOR_NIAGARA2
  10185. + && sparc_cpu != PROCESSOR_NIAGARA3
  10186. + && sparc_cpu != PROCESSOR_NIAGARA4
  10187. + && sparc_cpu != PROCESSOR_NIAGARA7
  10188. + && sparc_cpu != PROCESSOR_M8)
  10189. + emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 8))));
  10190. +
  10191. + /* Call __enable_execute_stack after writing onto the stack to make sure
  10192. + the stack address is accessible. */
  10193. +#ifdef HAVE_ENABLE_EXECUTE_STACK
  10194. + emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
  10195. + LCT_NORMAL, VOIDmode, XEXP (m_tramp, 0), Pmode);
  10196. +#endif
  10197. +}
  10198. +
  10199. +/* Worker for TARGET_TRAMPOLINE_INIT. */
  10200. +
  10201. +static void
  10202. +sparc_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
  10203. +{
  10204. + rtx fnaddr = force_reg (Pmode, XEXP (DECL_RTL (fndecl), 0));
  10205. + cxt = force_reg (Pmode, cxt);
  10206. + if (TARGET_ARCH64)
  10207. + sparc64_initialize_trampoline (m_tramp, fnaddr, cxt);
  10208. + else
  10209. + sparc32_initialize_trampoline (m_tramp, fnaddr, cxt);
  10210. +}
  10211. +
  10212. +/* Adjust the cost of a scheduling dependency. Return the new cost of
  10213. + a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
  10214. +
  10215. +static int
  10216. +supersparc_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
  10217. + int cost)
  10218. +{
  10219. + enum attr_type insn_type;
  10220. +
  10221. + if (recog_memoized (insn) < 0)
  10222. + return cost;
  10223. +
  10224. + insn_type = get_attr_type (insn);
  10225. +
  10226. + if (dep_type == 0)
  10227. + {
  10228. + /* Data dependency; DEP_INSN writes a register that INSN reads some
  10229. + cycles later. */
  10230. +
  10231. + /* if a load, then the dependence must be on the memory address;
  10232. + add an extra "cycle". Note that the cost could be two cycles
  10233. + if the reg was written late in an instruction group; we ca not tell
  10234. + here. */
  10235. + if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
  10236. + return cost + 3;
  10237. +
  10238. + /* Get the delay only if the address of the store is the dependence. */
  10239. + if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
  10240. + {
  10241. + rtx pat = PATTERN(insn);
  10242. + rtx dep_pat = PATTERN (dep_insn);
  10243. +
  10244. + if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
  10245. + return cost; /* This should not happen! */
  10246. +
  10247. + /* The dependency between the two instructions was on the data that
  10248. + is being stored. Assume that this implies that the address of the
  10249. + store is not dependent. */
  10250. + if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
  10251. + return cost;
  10252. +
  10253. + return cost + 3; /* An approximation. */
  10254. + }
  10255. +
  10256. + /* A shift instruction cannot receive its data from an instruction
  10257. + in the same cycle; add a one cycle penalty. */
  10258. + if (insn_type == TYPE_SHIFT)
  10259. + return cost + 3; /* Split before cascade into shift. */
  10260. + }
  10261. + else
  10262. + {
  10263. + /* Anti- or output- dependency; DEP_INSN reads/writes a register that
  10264. + INSN writes some cycles later. */
  10265. +
  10266. + /* These are only significant for the fpu unit; writing a fp reg before
  10267. + the fpu has finished with it stalls the processor. */
  10268. +
  10269. + /* Reusing an integer register causes no problems. */
  10270. + if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
  10271. + return 0;
  10272. + }
  10273. +
  10274. + return cost;
  10275. +}
  10276. +
  10277. +static int
  10278. +hypersparc_adjust_cost (rtx_insn *insn, int dtype, rtx_insn *dep_insn,
  10279. + int cost)
  10280. +{
  10281. + enum attr_type insn_type, dep_type;
  10282. + rtx pat = PATTERN(insn);
  10283. + rtx dep_pat = PATTERN (dep_insn);
  10284. +
  10285. + if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
  10286. + return cost;
  10287. +
  10288. + insn_type = get_attr_type (insn);
  10289. + dep_type = get_attr_type (dep_insn);
  10290. +
  10291. + switch (dtype)
  10292. + {
  10293. + case 0:
  10294. + /* Data dependency; DEP_INSN writes a register that INSN reads some
  10295. + cycles later. */
  10296. +
  10297. + switch (insn_type)
  10298. + {
  10299. + case TYPE_STORE:
  10300. + case TYPE_FPSTORE:
  10301. + /* Get the delay iff the address of the store is the dependence. */
  10302. + if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
  10303. + return cost;
  10304. +
  10305. + if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
  10306. + return cost;
  10307. + return cost + 3;
  10308. +
  10309. + case TYPE_LOAD:
  10310. + case TYPE_SLOAD:
  10311. + case TYPE_FPLOAD:
  10312. + /* If a load, then the dependence must be on the memory address. If
  10313. + the addresses aren't equal, then it might be a false dependency */
  10314. + if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
  10315. + {
  10316. + if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
  10317. + || GET_CODE (SET_DEST (dep_pat)) != MEM
  10318. + || GET_CODE (SET_SRC (pat)) != MEM
  10319. + || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
  10320. + XEXP (SET_SRC (pat), 0)))
  10321. + return cost + 2;
  10322. +
  10323. + return cost + 8;
  10324. + }
  10325. + break;
  10326. +
  10327. + case TYPE_BRANCH:
  10328. + /* Compare to branch latency is 0. There is no benefit from
  10329. + separating compare and branch. */
  10330. + if (dep_type == TYPE_COMPARE)
  10331. + return 0;
  10332. + /* Floating point compare to branch latency is less than
  10333. + compare to conditional move. */
  10334. + if (dep_type == TYPE_FPCMP)
  10335. + return cost - 1;
  10336. + break;
  10337. + default:
  10338. + break;
  10339. + }
  10340. + break;
  10341. +
  10342. + case REG_DEP_ANTI:
  10343. + /* Anti-dependencies only penalize the fpu unit. */
  10344. + if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
  10345. + return 0;
  10346. + break;
  10347. +
  10348. + default:
  10349. + break;
  10350. + }
  10351. +
  10352. + return cost;
  10353. +}
  10354. +
  10355. +static int
  10356. +sparc_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep, int cost,
  10357. + unsigned int)
  10358. +{
  10359. + switch (sparc_cpu)
  10360. + {
  10361. + case PROCESSOR_SUPERSPARC:
  10362. + cost = supersparc_adjust_cost (insn, dep_type, dep, cost);
  10363. + break;
  10364. + case PROCESSOR_HYPERSPARC:
  10365. + case PROCESSOR_SPARCLITE86X:
  10366. + cost = hypersparc_adjust_cost (insn, dep_type, dep, cost);
  10367. + break;
  10368. + default:
  10369. + break;
  10370. + }
  10371. + return cost;
  10372. +}
  10373. +
  10374. +static void
  10375. +sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
  10376. + int sched_verbose ATTRIBUTE_UNUSED,
  10377. + int max_ready ATTRIBUTE_UNUSED)
  10378. +{}
  10379. +
  10380. +static int
  10381. +sparc_use_sched_lookahead (void)
  10382. +{
  10383. + switch (sparc_cpu)
  10384. + {
  10385. + case PROCESSOR_ULTRASPARC:
  10386. + case PROCESSOR_ULTRASPARC3:
  10387. + return 4;
  10388. + case PROCESSOR_SUPERSPARC:
  10389. + case PROCESSOR_HYPERSPARC:
  10390. + case PROCESSOR_SPARCLITE86X:
  10391. + return 3;
  10392. + case PROCESSOR_NIAGARA4:
  10393. + case PROCESSOR_NIAGARA7:
  10394. + case PROCESSOR_M8:
  10395. + return 2;
  10396. + case PROCESSOR_NIAGARA:
  10397. + case PROCESSOR_NIAGARA2:
  10398. + case PROCESSOR_NIAGARA3:
  10399. + default:
  10400. + return 0;
  10401. + }
  10402. +}
  10403. +
  10404. +static int
  10405. +sparc_issue_rate (void)
  10406. +{
  10407. + switch (sparc_cpu)
  10408. + {
  10409. + case PROCESSOR_ULTRASPARC:
  10410. + case PROCESSOR_ULTRASPARC3:
  10411. + case PROCESSOR_M8:
  10412. + return 4;
  10413. + case PROCESSOR_SUPERSPARC:
  10414. + return 3;
  10415. + case PROCESSOR_HYPERSPARC:
  10416. + case PROCESSOR_SPARCLITE86X:
  10417. + case PROCESSOR_V9:
  10418. + /* Assume V9 processors are capable of at least dual-issue. */
  10419. + case PROCESSOR_NIAGARA4:
  10420. + case PROCESSOR_NIAGARA7:
  10421. + return 2;
  10422. + case PROCESSOR_NIAGARA:
  10423. + case PROCESSOR_NIAGARA2:
  10424. + case PROCESSOR_NIAGARA3:
  10425. + default:
  10426. + return 1;
  10427. + }
  10428. +}
  10429. +
  10430. +int
  10431. +sparc_branch_cost (bool speed_p, bool predictable_p)
  10432. +{
  10433. + if (!speed_p)
  10434. + return 2;
  10435. +
  10436. + /* For pre-V9 processors we use a single value (usually 3) to take into
  10437. + account the potential annulling of the delay slot (which ends up being
  10438. + a bubble in the pipeline slot) plus a cycle to take into consideration
  10439. + the instruction cache effects.
  10440. +
  10441. + On V9 and later processors, which have branch prediction facilities,
  10442. + we take into account whether the branch is (easily) predictable. */
  10443. + const int cost = sparc_costs->branch_cost;
  10444. +
  10445. + switch (sparc_cpu)
  10446. + {
  10447. + case PROCESSOR_V9:
  10448. + case PROCESSOR_ULTRASPARC:
  10449. + case PROCESSOR_ULTRASPARC3:
  10450. + case PROCESSOR_NIAGARA:
  10451. + case PROCESSOR_NIAGARA2:
  10452. + case PROCESSOR_NIAGARA3:
  10453. + case PROCESSOR_NIAGARA4:
  10454. + case PROCESSOR_NIAGARA7:
  10455. + case PROCESSOR_M8:
  10456. + return cost + (predictable_p ? 0 : 2);
  10457. +
  10458. + default:
  10459. + return cost;
  10460. + }
  10461. +}
  10462. +
  10463. +static int
  10464. +set_extends (rtx_insn *insn)
  10465. +{
  10466. + register rtx pat = PATTERN (insn);
  10467. +
  10468. + switch (GET_CODE (SET_SRC (pat)))
  10469. + {
  10470. + /* Load and some shift instructions zero extend. */
  10471. + case MEM:
  10472. + case ZERO_EXTEND:
  10473. + /* sethi clears the high bits */
  10474. + case HIGH:
  10475. + /* LO_SUM is used with sethi. sethi cleared the high
  10476. + bits and the values used with lo_sum are positive */
  10477. + case LO_SUM:
  10478. + /* Store flag stores 0 or 1 */
  10479. + case LT: case LTU:
  10480. + case GT: case GTU:
  10481. + case LE: case LEU:
  10482. + case GE: case GEU:
  10483. + case EQ:
  10484. + case NE:
  10485. + return 1;
  10486. + case AND:
  10487. + {
  10488. + rtx op0 = XEXP (SET_SRC (pat), 0);
  10489. + rtx op1 = XEXP (SET_SRC (pat), 1);
  10490. + if (GET_CODE (op1) == CONST_INT)
  10491. + return INTVAL (op1) >= 0;
  10492. + if (GET_CODE (op0) != REG)
  10493. + return 0;
  10494. + if (sparc_check_64 (op0, insn) == 1)
  10495. + return 1;
  10496. + return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
  10497. + }
  10498. + case IOR:
  10499. + case XOR:
  10500. + {
  10501. + rtx op0 = XEXP (SET_SRC (pat), 0);
  10502. + rtx op1 = XEXP (SET_SRC (pat), 1);
  10503. + if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
  10504. + return 0;
  10505. + if (GET_CODE (op1) == CONST_INT)
  10506. + return INTVAL (op1) >= 0;
  10507. + return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
  10508. + }
  10509. + case LSHIFTRT:
  10510. + return GET_MODE (SET_SRC (pat)) == SImode;
  10511. + /* Positive integers leave the high bits zero. */
  10512. + case CONST_INT:
  10513. + return !(INTVAL (SET_SRC (pat)) & 0x80000000);
  10514. + case ASHIFTRT:
  10515. + case SIGN_EXTEND:
  10516. + return - (GET_MODE (SET_SRC (pat)) == SImode);
  10517. + case REG:
  10518. + return sparc_check_64 (SET_SRC (pat), insn);
  10519. + default:
  10520. + return 0;
  10521. + }
  10522. +}
  10523. +
  10524. +/* We _ought_ to have only one kind per function, but... */
  10525. +static GTY(()) rtx sparc_addr_diff_list;
  10526. +static GTY(()) rtx sparc_addr_list;
  10527. +
  10528. +void
  10529. +sparc_defer_case_vector (rtx lab, rtx vec, int diff)
  10530. +{
  10531. + vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
  10532. + if (diff)
  10533. + sparc_addr_diff_list
  10534. + = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
  10535. + else
  10536. + sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
  10537. +}
  10538. +
  10539. +static void
  10540. +sparc_output_addr_vec (rtx vec)
  10541. +{
  10542. + rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
  10543. + int idx, vlen = XVECLEN (body, 0);
  10544. +
  10545. +#ifdef ASM_OUTPUT_ADDR_VEC_START
  10546. + ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
  10547. +#endif
  10548. +
  10549. +#ifdef ASM_OUTPUT_CASE_LABEL
  10550. + ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
  10551. + NEXT_INSN (lab));
  10552. +#else
  10553. + (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
  10554. +#endif
  10555. +
  10556. + for (idx = 0; idx < vlen; idx++)
  10557. + {
  10558. + ASM_OUTPUT_ADDR_VEC_ELT
  10559. + (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
  10560. + }
  10561. +
  10562. +#ifdef ASM_OUTPUT_ADDR_VEC_END
  10563. + ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
  10564. +#endif
  10565. +}
  10566. +
  10567. +static void
  10568. +sparc_output_addr_diff_vec (rtx vec)
  10569. +{
  10570. + rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
  10571. + rtx base = XEXP (XEXP (body, 0), 0);
  10572. + int idx, vlen = XVECLEN (body, 1);
  10573. +
  10574. +#ifdef ASM_OUTPUT_ADDR_VEC_START
  10575. + ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
  10576. +#endif
  10577. +
  10578. +#ifdef ASM_OUTPUT_CASE_LABEL
  10579. + ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
  10580. + NEXT_INSN (lab));
  10581. +#else
  10582. + (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
  10583. +#endif
  10584. +
  10585. + for (idx = 0; idx < vlen; idx++)
  10586. + {
  10587. + ASM_OUTPUT_ADDR_DIFF_ELT
  10588. + (asm_out_file,
  10589. + body,
  10590. + CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
  10591. + CODE_LABEL_NUMBER (base));
  10592. + }
  10593. +
  10594. +#ifdef ASM_OUTPUT_ADDR_VEC_END
  10595. + ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
  10596. +#endif
  10597. +}
  10598. +
  10599. +static void
  10600. +sparc_output_deferred_case_vectors (void)
  10601. +{
  10602. + rtx t;
  10603. + int align;
  10604. +
  10605. + if (sparc_addr_list == NULL_RTX
  10606. + && sparc_addr_diff_list == NULL_RTX)
  10607. + return;
  10608. +
  10609. + /* Align to cache line in the function's code section. */
  10610. + switch_to_section (current_function_section ());
  10611. +
  10612. + align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
  10613. + if (align > 0)
  10614. + ASM_OUTPUT_ALIGN (asm_out_file, align);
  10615. +
  10616. + for (t = sparc_addr_list; t ; t = XEXP (t, 1))
  10617. + sparc_output_addr_vec (XEXP (t, 0));
  10618. + for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
  10619. + sparc_output_addr_diff_vec (XEXP (t, 0));
  10620. +
  10621. + sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
  10622. +}
  10623. +
  10624. +/* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
  10625. + unknown. Return 1 if the high bits are zero, -1 if the register is
  10626. + sign extended. */
  10627. +int
  10628. +sparc_check_64 (rtx x, rtx_insn *insn)
  10629. +{
  10630. + /* If a register is set only once it is safe to ignore insns this
  10631. + code does not know how to handle. The loop will either recognize
  10632. + the single set and return the correct value or fail to recognize
  10633. + it and return 0. */
  10634. + int set_once = 0;
  10635. + rtx y = x;
  10636. +
  10637. + gcc_assert (GET_CODE (x) == REG);
  10638. +
  10639. + if (GET_MODE (x) == DImode)
  10640. + y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
  10641. +
  10642. + if (flag_expensive_optimizations
  10643. + && df && DF_REG_DEF_COUNT (REGNO (y)) == 1)
  10644. + set_once = 1;
  10645. +
  10646. + if (insn == 0)
  10647. + {
  10648. + if (set_once)
  10649. + insn = get_last_insn_anywhere ();
  10650. + else
  10651. + return 0;
  10652. + }
  10653. +
  10654. + while ((insn = PREV_INSN (insn)))
  10655. + {
  10656. + switch (GET_CODE (insn))
  10657. + {
  10658. + case JUMP_INSN:
  10659. + case NOTE:
  10660. + break;
  10661. + case CODE_LABEL:
  10662. + case CALL_INSN:
  10663. + default:
  10664. + if (! set_once)
  10665. + return 0;
  10666. + break;
  10667. + case INSN:
  10668. + {
  10669. + rtx pat = PATTERN (insn);
  10670. + if (GET_CODE (pat) != SET)
  10671. + return 0;
  10672. + if (rtx_equal_p (x, SET_DEST (pat)))
  10673. + return set_extends (insn);
  10674. + if (y && rtx_equal_p (y, SET_DEST (pat)))
  10675. + return set_extends (insn);
  10676. + if (reg_overlap_mentioned_p (SET_DEST (pat), y))
  10677. + return 0;
  10678. + }
  10679. + }
  10680. + }
  10681. + return 0;
  10682. +}
  10683. +
  10684. +/* Output a wide shift instruction in V8+ mode. INSN is the instruction,
  10685. + OPERANDS are its operands and OPCODE is the mnemonic to be used. */
  10686. +
  10687. +const char *
  10688. +output_v8plus_shift (rtx_insn *insn, rtx *operands, const char *opcode)
  10689. +{
  10690. + static char asm_code[60];
  10691. +
  10692. + /* The scratch register is only required when the destination
  10693. + register is not a 64-bit global or out register. */
  10694. + if (which_alternative != 2)
  10695. + operands[3] = operands[0];
  10696. +
  10697. + /* We can only shift by constants <= 63. */
  10698. + if (GET_CODE (operands[2]) == CONST_INT)
  10699. + operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
  10700. +
  10701. + if (GET_CODE (operands[1]) == CONST_INT)
  10702. + {
  10703. + output_asm_insn ("mov\t%1, %3", operands);
  10704. + }
  10705. + else
  10706. + {
  10707. + output_asm_insn ("sllx\t%H1, 32, %3", operands);
  10708. + if (sparc_check_64 (operands[1], insn) <= 0)
  10709. + output_asm_insn ("srl\t%L1, 0, %L1", operands);
  10710. + output_asm_insn ("or\t%L1, %3, %3", operands);
  10711. + }
  10712. +
  10713. + strcpy (asm_code, opcode);
  10714. +
  10715. + if (which_alternative != 2)
  10716. + return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
  10717. + else
  10718. + return
  10719. + strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
  10720. +}
  10721. +
  10722. +/* Output rtl to increment the profiler label LABELNO
  10723. + for profiling a function entry. */
  10724. +
  10725. +void
  10726. +sparc_profile_hook (int labelno)
  10727. +{
  10728. + char buf[32];
  10729. + rtx lab, fun;
  10730. +
  10731. + fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
  10732. + if (NO_PROFILE_COUNTERS)
  10733. + {
  10734. + emit_library_call (fun, LCT_NORMAL, VOIDmode);
  10735. + }
  10736. + else
  10737. + {
  10738. + ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
  10739. + lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
  10740. + emit_library_call (fun, LCT_NORMAL, VOIDmode, lab, Pmode);
  10741. + }
  10742. +}
  10743. +
  10744. +#ifdef TARGET_SOLARIS
  10745. +/* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
  10746. +
  10747. +static void
  10748. +sparc_solaris_elf_asm_named_section (const char *name, unsigned int flags,
  10749. + tree decl ATTRIBUTE_UNUSED)
  10750. +{
  10751. + if (HAVE_COMDAT_GROUP && flags & SECTION_LINKONCE)
  10752. + {
  10753. + solaris_elf_asm_comdat_section (name, flags, decl);
  10754. + return;
  10755. + }
  10756. +
  10757. + fprintf (asm_out_file, "\t.section\t\"%s\"", name);
  10758. +
  10759. + if (!(flags & SECTION_DEBUG))
  10760. + fputs (",#alloc", asm_out_file);
  10761. +#if HAVE_GAS_SECTION_EXCLUDE
  10762. + if (flags & SECTION_EXCLUDE)
  10763. + fputs (",#exclude", asm_out_file);
  10764. +#endif
  10765. + if (flags & SECTION_WRITE)
  10766. + fputs (",#write", asm_out_file);
  10767. + if (flags & SECTION_TLS)
  10768. + fputs (",#tls", asm_out_file);
  10769. + if (flags & SECTION_CODE)
  10770. + fputs (",#execinstr", asm_out_file);
  10771. +
  10772. + if (flags & SECTION_NOTYPE)
  10773. + ;
  10774. + else if (flags & SECTION_BSS)
  10775. + fputs (",#nobits", asm_out_file);
  10776. + else
  10777. + fputs (",#progbits", asm_out_file);
  10778. +
  10779. + fputc ('\n', asm_out_file);
  10780. +}
  10781. +#endif /* TARGET_SOLARIS */
  10782. +
  10783. +/* We do not allow indirect calls to be optimized into sibling calls.
  10784. +
  10785. + We cannot use sibling calls when delayed branches are disabled
  10786. + because they will likely require the call delay slot to be filled.
  10787. +
  10788. + Also, on SPARC 32-bit we cannot emit a sibling call when the
  10789. + current function returns a structure. This is because the "unimp
  10790. + after call" convention would cause the callee to return to the
  10791. + wrong place. The generic code already disallows cases where the
  10792. + function being called returns a structure.
  10793. +
  10794. + It may seem strange how this last case could occur. Usually there
  10795. + is code after the call which jumps to epilogue code which dumps the
  10796. + return value into the struct return area. That ought to invalidate
  10797. + the sibling call right? Well, in the C++ case we can end up passing
  10798. + the pointer to the struct return area to a constructor (which returns
  10799. + void) and then nothing else happens. Such a sibling call would look
  10800. + valid without the added check here.
  10801. +
  10802. + VxWorks PIC PLT entries require the global pointer to be initialized
  10803. + on entry. We therefore can't emit sibling calls to them. */
  10804. +static bool
  10805. +sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
  10806. +{
  10807. + return (decl
  10808. + && flag_delayed_branch
  10809. + && (TARGET_ARCH64 || ! cfun->returns_struct)
  10810. + && !(TARGET_VXWORKS_RTP
  10811. + && flag_pic
  10812. + && !targetm.binds_local_p (decl)));
  10813. +}
  10814. +
  10815. +/* libfunc renaming. */
  10816. +
  10817. +static void
  10818. +sparc_init_libfuncs (void)
  10819. +{
  10820. + if (TARGET_ARCH32)
  10821. + {
  10822. + /* Use the subroutines that Sun's library provides for integer
  10823. + multiply and divide. The `*' prevents an underscore from
  10824. + being prepended by the compiler. .umul is a little faster
  10825. + than .mul. */
  10826. + set_optab_libfunc (smul_optab, SImode, "*.umul");
  10827. + set_optab_libfunc (sdiv_optab, SImode, "*.div");
  10828. + set_optab_libfunc (udiv_optab, SImode, "*.udiv");
  10829. + set_optab_libfunc (smod_optab, SImode, "*.rem");
  10830. + set_optab_libfunc (umod_optab, SImode, "*.urem");
  10831. +
  10832. + /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
  10833. + set_optab_libfunc (add_optab, TFmode, "_Q_add");
  10834. + set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
  10835. + set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
  10836. + set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
  10837. + set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
  10838. +
  10839. + /* We can define the TFmode sqrt optab only if TARGET_FPU. This
  10840. + is because with soft-float, the SFmode and DFmode sqrt
  10841. + instructions will be absent, and the compiler will notice and
  10842. + try to use the TFmode sqrt instruction for calls to the
  10843. + builtin function sqrt, but this fails. */
  10844. + if (TARGET_FPU)
  10845. + set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
  10846. +
  10847. + set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
  10848. + set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
  10849. + set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
  10850. + set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
  10851. + set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
  10852. + set_optab_libfunc (le_optab, TFmode, "_Q_fle");
  10853. +
  10854. + set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
  10855. + set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
  10856. + set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
  10857. + set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
  10858. +
  10859. + set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
  10860. + set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
  10861. + set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
  10862. + set_conv_libfunc (ufloat_optab, TFmode, SImode, "_Q_utoq");
  10863. +
  10864. + if (DITF_CONVERSION_LIBFUNCS)
  10865. + {
  10866. + set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
  10867. + set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
  10868. + set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
  10869. + set_conv_libfunc (ufloat_optab, TFmode, DImode, "_Q_ulltoq");
  10870. + }
  10871. +
  10872. + if (SUN_CONVERSION_LIBFUNCS)
  10873. + {
  10874. + set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
  10875. + set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
  10876. + set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
  10877. + set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
  10878. + }
  10879. + }
  10880. + if (TARGET_ARCH64)
  10881. + {
  10882. + /* In the SPARC 64bit ABI, SImode multiply and divide functions
  10883. + do not exist in the library. Make sure the compiler does not
  10884. + emit calls to them by accident. (It should always use the
  10885. + hardware instructions.) */
  10886. + set_optab_libfunc (smul_optab, SImode, 0);
  10887. + set_optab_libfunc (sdiv_optab, SImode, 0);
  10888. + set_optab_libfunc (udiv_optab, SImode, 0);
  10889. + set_optab_libfunc (smod_optab, SImode, 0);
  10890. + set_optab_libfunc (umod_optab, SImode, 0);
  10891. +
  10892. + if (SUN_INTEGER_MULTIPLY_64)
  10893. + {
  10894. + set_optab_libfunc (smul_optab, DImode, "__mul64");
  10895. + set_optab_libfunc (sdiv_optab, DImode, "__div64");
  10896. + set_optab_libfunc (udiv_optab, DImode, "__udiv64");
  10897. + set_optab_libfunc (smod_optab, DImode, "__rem64");
  10898. + set_optab_libfunc (umod_optab, DImode, "__urem64");
  10899. + }
  10900. +
  10901. + if (SUN_CONVERSION_LIBFUNCS)
  10902. + {
  10903. + set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
  10904. + set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
  10905. + set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
  10906. + set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
  10907. + }
  10908. + }
  10909. +}
  10910. +
  10911. +/* SPARC builtins. */
  10912. +enum sparc_builtins
  10913. +{
  10914. + /* FPU builtins. */
  10915. + SPARC_BUILTIN_LDFSR,
  10916. + SPARC_BUILTIN_STFSR,
  10917. +
  10918. + /* VIS 1.0 builtins. */
  10919. + SPARC_BUILTIN_FPACK16,
  10920. + SPARC_BUILTIN_FPACK32,
  10921. + SPARC_BUILTIN_FPACKFIX,
  10922. + SPARC_BUILTIN_FEXPAND,
  10923. + SPARC_BUILTIN_FPMERGE,
  10924. + SPARC_BUILTIN_FMUL8X16,
  10925. + SPARC_BUILTIN_FMUL8X16AU,
  10926. + SPARC_BUILTIN_FMUL8X16AL,
  10927. + SPARC_BUILTIN_FMUL8SUX16,
  10928. + SPARC_BUILTIN_FMUL8ULX16,
  10929. + SPARC_BUILTIN_FMULD8SUX16,
  10930. + SPARC_BUILTIN_FMULD8ULX16,
  10931. + SPARC_BUILTIN_FALIGNDATAV4HI,
  10932. + SPARC_BUILTIN_FALIGNDATAV8QI,
  10933. + SPARC_BUILTIN_FALIGNDATAV2SI,
  10934. + SPARC_BUILTIN_FALIGNDATADI,
  10935. + SPARC_BUILTIN_WRGSR,
  10936. + SPARC_BUILTIN_RDGSR,
  10937. + SPARC_BUILTIN_ALIGNADDR,
  10938. + SPARC_BUILTIN_ALIGNADDRL,
  10939. + SPARC_BUILTIN_PDIST,
  10940. + SPARC_BUILTIN_EDGE8,
  10941. + SPARC_BUILTIN_EDGE8L,
  10942. + SPARC_BUILTIN_EDGE16,
  10943. + SPARC_BUILTIN_EDGE16L,
  10944. + SPARC_BUILTIN_EDGE32,
  10945. + SPARC_BUILTIN_EDGE32L,
  10946. + SPARC_BUILTIN_FCMPLE16,
  10947. + SPARC_BUILTIN_FCMPLE32,
  10948. + SPARC_BUILTIN_FCMPNE16,
  10949. + SPARC_BUILTIN_FCMPNE32,
  10950. + SPARC_BUILTIN_FCMPGT16,
  10951. + SPARC_BUILTIN_FCMPGT32,
  10952. + SPARC_BUILTIN_FCMPEQ16,
  10953. + SPARC_BUILTIN_FCMPEQ32,
  10954. + SPARC_BUILTIN_FPADD16,
  10955. + SPARC_BUILTIN_FPADD16S,
  10956. + SPARC_BUILTIN_FPADD32,
  10957. + SPARC_BUILTIN_FPADD32S,
  10958. + SPARC_BUILTIN_FPSUB16,
  10959. + SPARC_BUILTIN_FPSUB16S,
  10960. + SPARC_BUILTIN_FPSUB32,
  10961. + SPARC_BUILTIN_FPSUB32S,
  10962. + SPARC_BUILTIN_ARRAY8,
  10963. + SPARC_BUILTIN_ARRAY16,
  10964. + SPARC_BUILTIN_ARRAY32,
  10965. +
  10966. + /* VIS 2.0 builtins. */
  10967. + SPARC_BUILTIN_EDGE8N,
  10968. + SPARC_BUILTIN_EDGE8LN,
  10969. + SPARC_BUILTIN_EDGE16N,
  10970. + SPARC_BUILTIN_EDGE16LN,
  10971. + SPARC_BUILTIN_EDGE32N,
  10972. + SPARC_BUILTIN_EDGE32LN,
  10973. + SPARC_BUILTIN_BMASK,
  10974. + SPARC_BUILTIN_BSHUFFLEV4HI,
  10975. + SPARC_BUILTIN_BSHUFFLEV8QI,
  10976. + SPARC_BUILTIN_BSHUFFLEV2SI,
  10977. + SPARC_BUILTIN_BSHUFFLEDI,
  10978. +
  10979. + /* VIS 3.0 builtins. */
  10980. + SPARC_BUILTIN_CMASK8,
  10981. + SPARC_BUILTIN_CMASK16,
  10982. + SPARC_BUILTIN_CMASK32,
  10983. + SPARC_BUILTIN_FCHKSM16,
  10984. + SPARC_BUILTIN_FSLL16,
  10985. + SPARC_BUILTIN_FSLAS16,
  10986. + SPARC_BUILTIN_FSRL16,
  10987. + SPARC_BUILTIN_FSRA16,
  10988. + SPARC_BUILTIN_FSLL32,
  10989. + SPARC_BUILTIN_FSLAS32,
  10990. + SPARC_BUILTIN_FSRL32,
  10991. + SPARC_BUILTIN_FSRA32,
  10992. + SPARC_BUILTIN_PDISTN,
  10993. + SPARC_BUILTIN_FMEAN16,
  10994. + SPARC_BUILTIN_FPADD64,
  10995. + SPARC_BUILTIN_FPSUB64,
  10996. + SPARC_BUILTIN_FPADDS16,
  10997. + SPARC_BUILTIN_FPADDS16S,
  10998. + SPARC_BUILTIN_FPSUBS16,
  10999. + SPARC_BUILTIN_FPSUBS16S,
  11000. + SPARC_BUILTIN_FPADDS32,
  11001. + SPARC_BUILTIN_FPADDS32S,
  11002. + SPARC_BUILTIN_FPSUBS32,
  11003. + SPARC_BUILTIN_FPSUBS32S,
  11004. + SPARC_BUILTIN_FUCMPLE8,
  11005. + SPARC_BUILTIN_FUCMPNE8,
  11006. + SPARC_BUILTIN_FUCMPGT8,
  11007. + SPARC_BUILTIN_FUCMPEQ8,
  11008. + SPARC_BUILTIN_FHADDS,
  11009. + SPARC_BUILTIN_FHADDD,
  11010. + SPARC_BUILTIN_FHSUBS,
  11011. + SPARC_BUILTIN_FHSUBD,
  11012. + SPARC_BUILTIN_FNHADDS,
  11013. + SPARC_BUILTIN_FNHADDD,
  11014. + SPARC_BUILTIN_UMULXHI,
  11015. + SPARC_BUILTIN_XMULX,
  11016. + SPARC_BUILTIN_XMULXHI,
  11017. +
  11018. + /* VIS 4.0 builtins. */
  11019. + SPARC_BUILTIN_FPADD8,
  11020. + SPARC_BUILTIN_FPADDS8,
  11021. + SPARC_BUILTIN_FPADDUS8,
  11022. + SPARC_BUILTIN_FPADDUS16,
  11023. + SPARC_BUILTIN_FPCMPLE8,
  11024. + SPARC_BUILTIN_FPCMPGT8,
  11025. + SPARC_BUILTIN_FPCMPULE16,
  11026. + SPARC_BUILTIN_FPCMPUGT16,
  11027. + SPARC_BUILTIN_FPCMPULE32,
  11028. + SPARC_BUILTIN_FPCMPUGT32,
  11029. + SPARC_BUILTIN_FPMAX8,
  11030. + SPARC_BUILTIN_FPMAX16,
  11031. + SPARC_BUILTIN_FPMAX32,
  11032. + SPARC_BUILTIN_FPMAXU8,
  11033. + SPARC_BUILTIN_FPMAXU16,
  11034. + SPARC_BUILTIN_FPMAXU32,
  11035. + SPARC_BUILTIN_FPMIN8,
  11036. + SPARC_BUILTIN_FPMIN16,
  11037. + SPARC_BUILTIN_FPMIN32,
  11038. + SPARC_BUILTIN_FPMINU8,
  11039. + SPARC_BUILTIN_FPMINU16,
  11040. + SPARC_BUILTIN_FPMINU32,
  11041. + SPARC_BUILTIN_FPSUB8,
  11042. + SPARC_BUILTIN_FPSUBS8,
  11043. + SPARC_BUILTIN_FPSUBUS8,
  11044. + SPARC_BUILTIN_FPSUBUS16,
  11045. +
  11046. + /* VIS 4.0B builtins. */
  11047. +
  11048. + /* Note that all the DICTUNPACK* entries should be kept
  11049. + contiguous. */
  11050. + SPARC_BUILTIN_FIRST_DICTUNPACK,
  11051. + SPARC_BUILTIN_DICTUNPACK8 = SPARC_BUILTIN_FIRST_DICTUNPACK,
  11052. + SPARC_BUILTIN_DICTUNPACK16,
  11053. + SPARC_BUILTIN_DICTUNPACK32,
  11054. + SPARC_BUILTIN_LAST_DICTUNPACK = SPARC_BUILTIN_DICTUNPACK32,
  11055. +
  11056. + /* Note that all the FPCMP*SHL entries should be kept
  11057. + contiguous. */
  11058. + SPARC_BUILTIN_FIRST_FPCMPSHL,
  11059. + SPARC_BUILTIN_FPCMPLE8SHL = SPARC_BUILTIN_FIRST_FPCMPSHL,
  11060. + SPARC_BUILTIN_FPCMPGT8SHL,
  11061. + SPARC_BUILTIN_FPCMPEQ8SHL,
  11062. + SPARC_BUILTIN_FPCMPNE8SHL,
  11063. + SPARC_BUILTIN_FPCMPLE16SHL,
  11064. + SPARC_BUILTIN_FPCMPGT16SHL,
  11065. + SPARC_BUILTIN_FPCMPEQ16SHL,
  11066. + SPARC_BUILTIN_FPCMPNE16SHL,
  11067. + SPARC_BUILTIN_FPCMPLE32SHL,
  11068. + SPARC_BUILTIN_FPCMPGT32SHL,
  11069. + SPARC_BUILTIN_FPCMPEQ32SHL,
  11070. + SPARC_BUILTIN_FPCMPNE32SHL,
  11071. + SPARC_BUILTIN_FPCMPULE8SHL,
  11072. + SPARC_BUILTIN_FPCMPUGT8SHL,
  11073. + SPARC_BUILTIN_FPCMPULE16SHL,
  11074. + SPARC_BUILTIN_FPCMPUGT16SHL,
  11075. + SPARC_BUILTIN_FPCMPULE32SHL,
  11076. + SPARC_BUILTIN_FPCMPUGT32SHL,
  11077. + SPARC_BUILTIN_FPCMPDE8SHL,
  11078. + SPARC_BUILTIN_FPCMPDE16SHL,
  11079. + SPARC_BUILTIN_FPCMPDE32SHL,
  11080. + SPARC_BUILTIN_FPCMPUR8SHL,
  11081. + SPARC_BUILTIN_FPCMPUR16SHL,
  11082. + SPARC_BUILTIN_FPCMPUR32SHL,
  11083. + SPARC_BUILTIN_LAST_FPCMPSHL = SPARC_BUILTIN_FPCMPUR32SHL,
  11084. +
  11085. + SPARC_BUILTIN_MAX
  11086. +};
  11087. +
  11088. +static GTY (()) tree sparc_builtins[(int) SPARC_BUILTIN_MAX];
  11089. +static enum insn_code sparc_builtins_icode[(int) SPARC_BUILTIN_MAX];
  11090. +
  11091. +/* Return true if OPVAL can be used for operand OPNUM of instruction ICODE.
  11092. + The instruction should require a constant operand of some sort. The
  11093. + function prints an error if OPVAL is not valid. */
  11094. +
  11095. +static int
  11096. +check_constant_argument (enum insn_code icode, int opnum, rtx opval)
  11097. +{
  11098. + if (GET_CODE (opval) != CONST_INT)
  11099. + {
  11100. + error ("%qs expects a constant argument", insn_data[icode].name);
  11101. + return false;
  11102. + }
  11103. +
  11104. + if (!(*insn_data[icode].operand[opnum].predicate) (opval, VOIDmode))
  11105. + {
  11106. + error ("constant argument out of range for %qs", insn_data[icode].name);
  11107. + return false;
  11108. + }
  11109. + return true;
  11110. +}
  11111. +
  11112. +/* Add a SPARC builtin function with NAME, ICODE, CODE and TYPE. Return the
  11113. + function decl or NULL_TREE if the builtin was not added. */
  11114. +
  11115. +static tree
  11116. +def_builtin (const char *name, enum insn_code icode, enum sparc_builtins code,
  11117. + tree type)
  11118. +{
  11119. + tree t
  11120. + = add_builtin_function (name, type, code, BUILT_IN_MD, NULL, NULL_TREE);
  11121. +
  11122. + if (t)
  11123. + {
  11124. + sparc_builtins[code] = t;
  11125. + sparc_builtins_icode[code] = icode;
  11126. + }
  11127. +
  11128. + return t;
  11129. +}
  11130. +
  11131. +/* Likewise, but also marks the function as "const". */
  11132. +
  11133. +static tree
  11134. +def_builtin_const (const char *name, enum insn_code icode,
  11135. + enum sparc_builtins code, tree type)
  11136. +{
  11137. + tree t = def_builtin (name, icode, code, type);
  11138. +
  11139. + if (t)
  11140. + TREE_READONLY (t) = 1;
  11141. +
  11142. + return t;
  11143. +}
  11144. +
  11145. +/* Implement the TARGET_INIT_BUILTINS target hook.
  11146. + Create builtin functions for special SPARC instructions. */
  11147. +
  11148. +static void
  11149. +sparc_init_builtins (void)
  11150. +{
  11151. + if (TARGET_FPU)
  11152. + sparc_fpu_init_builtins ();
  11153. +
  11154. + if (TARGET_VIS)
  11155. + sparc_vis_init_builtins ();
  11156. +}
  11157. +
  11158. +/* Create builtin functions for FPU instructions. */
  11159. +
  11160. +static void
  11161. +sparc_fpu_init_builtins (void)
  11162. +{
  11163. + tree ftype
  11164. + = build_function_type_list (void_type_node,
  11165. + build_pointer_type (unsigned_type_node), 0);
  11166. + def_builtin ("__builtin_load_fsr", CODE_FOR_ldfsr,
  11167. + SPARC_BUILTIN_LDFSR, ftype);
  11168. + def_builtin ("__builtin_store_fsr", CODE_FOR_stfsr,
  11169. + SPARC_BUILTIN_STFSR, ftype);
  11170. +}
  11171. +
  11172. +/* Create builtin functions for VIS instructions. */
  11173. +
  11174. +static void
  11175. +sparc_vis_init_builtins (void)
  11176. +{
  11177. + tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
  11178. + tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
  11179. + tree v4hi = build_vector_type (intHI_type_node, 4);
  11180. + tree v2hi = build_vector_type (intHI_type_node, 2);
  11181. + tree v2si = build_vector_type (intSI_type_node, 2);
  11182. + tree v1si = build_vector_type (intSI_type_node, 1);
  11183. +
  11184. + tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
  11185. + tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
  11186. + tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
  11187. + tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
  11188. + tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
  11189. + tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
  11190. + tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
  11191. + tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
  11192. + tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
  11193. + tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
  11194. + tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
  11195. + tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
  11196. + tree v2hi_ftype_v2hi_v2hi = build_function_type_list (v2hi, v2hi, v2hi, 0);
  11197. + tree v1si_ftype_v1si_v1si = build_function_type_list (v1si, v1si, v1si, 0);
  11198. + tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
  11199. + v8qi, v8qi,
  11200. + intDI_type_node, 0);
  11201. + tree di_ftype_v8qi_v8qi = build_function_type_list (intDI_type_node,
  11202. + v8qi, v8qi, 0);
  11203. + tree si_ftype_v8qi_v8qi = build_function_type_list (intSI_type_node,
  11204. + v8qi, v8qi, 0);
  11205. + tree v8qi_ftype_df_si = build_function_type_list (v8qi, double_type_node,
  11206. + intSI_type_node, 0);
  11207. + tree v4hi_ftype_df_si = build_function_type_list (v4hi, double_type_node,
  11208. + intSI_type_node, 0);
  11209. + tree v2si_ftype_df_si = build_function_type_list (v2si, double_type_node,
  11210. + intDI_type_node, 0);
  11211. + tree di_ftype_di_di = build_function_type_list (intDI_type_node,
  11212. + intDI_type_node,
  11213. + intDI_type_node, 0);
  11214. + tree si_ftype_si_si = build_function_type_list (intSI_type_node,
  11215. + intSI_type_node,
  11216. + intSI_type_node, 0);
  11217. + tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
  11218. + ptr_type_node,
  11219. + intSI_type_node, 0);
  11220. + tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
  11221. + ptr_type_node,
  11222. + intDI_type_node, 0);
  11223. + tree si_ftype_ptr_ptr = build_function_type_list (intSI_type_node,
  11224. + ptr_type_node,
  11225. + ptr_type_node, 0);
  11226. + tree di_ftype_ptr_ptr = build_function_type_list (intDI_type_node,
  11227. + ptr_type_node,
  11228. + ptr_type_node, 0);
  11229. + tree si_ftype_v4hi_v4hi = build_function_type_list (intSI_type_node,
  11230. + v4hi, v4hi, 0);
  11231. + tree si_ftype_v2si_v2si = build_function_type_list (intSI_type_node,
  11232. + v2si, v2si, 0);
  11233. + tree di_ftype_v4hi_v4hi = build_function_type_list (intDI_type_node,
  11234. + v4hi, v4hi, 0);
  11235. + tree di_ftype_v2si_v2si = build_function_type_list (intDI_type_node,
  11236. + v2si, v2si, 0);
  11237. + tree void_ftype_di = build_function_type_list (void_type_node,
  11238. + intDI_type_node, 0);
  11239. + tree di_ftype_void = build_function_type_list (intDI_type_node,
  11240. + void_type_node, 0);
  11241. + tree void_ftype_si = build_function_type_list (void_type_node,
  11242. + intSI_type_node, 0);
  11243. + tree sf_ftype_sf_sf = build_function_type_list (float_type_node,
  11244. + float_type_node,
  11245. + float_type_node, 0);
  11246. + tree df_ftype_df_df = build_function_type_list (double_type_node,
  11247. + double_type_node,
  11248. + double_type_node, 0);
  11249. +
  11250. + /* Packing and expanding vectors. */
  11251. + def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis,
  11252. + SPARC_BUILTIN_FPACK16, v4qi_ftype_v4hi);
  11253. + def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
  11254. + SPARC_BUILTIN_FPACK32, v8qi_ftype_v2si_v8qi);
  11255. + def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
  11256. + SPARC_BUILTIN_FPACKFIX, v2hi_ftype_v2si);
  11257. + def_builtin_const ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis,
  11258. + SPARC_BUILTIN_FEXPAND, v4hi_ftype_v4qi);
  11259. + def_builtin_const ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
  11260. + SPARC_BUILTIN_FPMERGE, v8qi_ftype_v4qi_v4qi);
  11261. +
  11262. + /* Multiplications. */
  11263. + def_builtin_const ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
  11264. + SPARC_BUILTIN_FMUL8X16, v4hi_ftype_v4qi_v4hi);
  11265. + def_builtin_const ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
  11266. + SPARC_BUILTIN_FMUL8X16AU, v4hi_ftype_v4qi_v2hi);
  11267. + def_builtin_const ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
  11268. + SPARC_BUILTIN_FMUL8X16AL, v4hi_ftype_v4qi_v2hi);
  11269. + def_builtin_const ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
  11270. + SPARC_BUILTIN_FMUL8SUX16, v4hi_ftype_v8qi_v4hi);
  11271. + def_builtin_const ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
  11272. + SPARC_BUILTIN_FMUL8ULX16, v4hi_ftype_v8qi_v4hi);
  11273. + def_builtin_const ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
  11274. + SPARC_BUILTIN_FMULD8SUX16, v2si_ftype_v4qi_v2hi);
  11275. + def_builtin_const ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
  11276. + SPARC_BUILTIN_FMULD8ULX16, v2si_ftype_v4qi_v2hi);
  11277. +
  11278. + /* Data aligning. */
  11279. + def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
  11280. + SPARC_BUILTIN_FALIGNDATAV4HI, v4hi_ftype_v4hi_v4hi);
  11281. + def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
  11282. + SPARC_BUILTIN_FALIGNDATAV8QI, v8qi_ftype_v8qi_v8qi);
  11283. + def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
  11284. + SPARC_BUILTIN_FALIGNDATAV2SI, v2si_ftype_v2si_v2si);
  11285. + def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatav1di_vis,
  11286. + SPARC_BUILTIN_FALIGNDATADI, di_ftype_di_di);
  11287. +
  11288. + def_builtin ("__builtin_vis_write_gsr", CODE_FOR_wrgsr_vis,
  11289. + SPARC_BUILTIN_WRGSR, void_ftype_di);
  11290. + def_builtin ("__builtin_vis_read_gsr", CODE_FOR_rdgsr_vis,
  11291. + SPARC_BUILTIN_RDGSR, di_ftype_void);
  11292. +
  11293. + if (TARGET_ARCH64)
  11294. + {
  11295. + def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
  11296. + SPARC_BUILTIN_ALIGNADDR, ptr_ftype_ptr_di);
  11297. + def_builtin ("__builtin_vis_alignaddrl", CODE_FOR_alignaddrldi_vis,
  11298. + SPARC_BUILTIN_ALIGNADDRL, ptr_ftype_ptr_di);
  11299. + }
  11300. + else
  11301. + {
  11302. + def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
  11303. + SPARC_BUILTIN_ALIGNADDR, ptr_ftype_ptr_si);
  11304. + def_builtin ("__builtin_vis_alignaddrl", CODE_FOR_alignaddrlsi_vis,
  11305. + SPARC_BUILTIN_ALIGNADDRL, ptr_ftype_ptr_si);
  11306. + }
  11307. +
  11308. + /* Pixel distance. */
  11309. + def_builtin_const ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
  11310. + SPARC_BUILTIN_PDIST, di_ftype_v8qi_v8qi_di);
  11311. +
  11312. + /* Edge handling. */
  11313. + if (TARGET_ARCH64)
  11314. + {
  11315. + def_builtin_const ("__builtin_vis_edge8", CODE_FOR_edge8di_vis,
  11316. + SPARC_BUILTIN_EDGE8, di_ftype_ptr_ptr);
  11317. + def_builtin_const ("__builtin_vis_edge8l", CODE_FOR_edge8ldi_vis,
  11318. + SPARC_BUILTIN_EDGE8L, di_ftype_ptr_ptr);
  11319. + def_builtin_const ("__builtin_vis_edge16", CODE_FOR_edge16di_vis,
  11320. + SPARC_BUILTIN_EDGE16, di_ftype_ptr_ptr);
  11321. + def_builtin_const ("__builtin_vis_edge16l", CODE_FOR_edge16ldi_vis,
  11322. + SPARC_BUILTIN_EDGE16L, di_ftype_ptr_ptr);
  11323. + def_builtin_const ("__builtin_vis_edge32", CODE_FOR_edge32di_vis,
  11324. + SPARC_BUILTIN_EDGE32, di_ftype_ptr_ptr);
  11325. + def_builtin_const ("__builtin_vis_edge32l", CODE_FOR_edge32ldi_vis,
  11326. + SPARC_BUILTIN_EDGE32L, di_ftype_ptr_ptr);
  11327. + }
  11328. + else
  11329. + {
  11330. + def_builtin_const ("__builtin_vis_edge8", CODE_FOR_edge8si_vis,
  11331. + SPARC_BUILTIN_EDGE8, si_ftype_ptr_ptr);
  11332. + def_builtin_const ("__builtin_vis_edge8l", CODE_FOR_edge8lsi_vis,
  11333. + SPARC_BUILTIN_EDGE8L, si_ftype_ptr_ptr);
  11334. + def_builtin_const ("__builtin_vis_edge16", CODE_FOR_edge16si_vis,
  11335. + SPARC_BUILTIN_EDGE16, si_ftype_ptr_ptr);
  11336. + def_builtin_const ("__builtin_vis_edge16l", CODE_FOR_edge16lsi_vis,
  11337. + SPARC_BUILTIN_EDGE16L, si_ftype_ptr_ptr);
  11338. + def_builtin_const ("__builtin_vis_edge32", CODE_FOR_edge32si_vis,
  11339. + SPARC_BUILTIN_EDGE32, si_ftype_ptr_ptr);
  11340. + def_builtin_const ("__builtin_vis_edge32l", CODE_FOR_edge32lsi_vis,
  11341. + SPARC_BUILTIN_EDGE32L, si_ftype_ptr_ptr);
  11342. + }
  11343. +
  11344. + /* Pixel compare. */
  11345. + if (TARGET_ARCH64)
  11346. + {
  11347. + def_builtin_const ("__builtin_vis_fcmple16", CODE_FOR_fcmple16di_vis,
  11348. + SPARC_BUILTIN_FCMPLE16, di_ftype_v4hi_v4hi);
  11349. + def_builtin_const ("__builtin_vis_fcmple32", CODE_FOR_fcmple32di_vis,
  11350. + SPARC_BUILTIN_FCMPLE32, di_ftype_v2si_v2si);
  11351. + def_builtin_const ("__builtin_vis_fcmpne16", CODE_FOR_fcmpne16di_vis,
  11352. + SPARC_BUILTIN_FCMPNE16, di_ftype_v4hi_v4hi);
  11353. + def_builtin_const ("__builtin_vis_fcmpne32", CODE_FOR_fcmpne32di_vis,
  11354. + SPARC_BUILTIN_FCMPNE32, di_ftype_v2si_v2si);
  11355. + def_builtin_const ("__builtin_vis_fcmpgt16", CODE_FOR_fcmpgt16di_vis,
  11356. + SPARC_BUILTIN_FCMPGT16, di_ftype_v4hi_v4hi);
  11357. + def_builtin_const ("__builtin_vis_fcmpgt32", CODE_FOR_fcmpgt32di_vis,
  11358. + SPARC_BUILTIN_FCMPGT32, di_ftype_v2si_v2si);
  11359. + def_builtin_const ("__builtin_vis_fcmpeq16", CODE_FOR_fcmpeq16di_vis,
  11360. + SPARC_BUILTIN_FCMPEQ16, di_ftype_v4hi_v4hi);
  11361. + def_builtin_const ("__builtin_vis_fcmpeq32", CODE_FOR_fcmpeq32di_vis,
  11362. + SPARC_BUILTIN_FCMPEQ32, di_ftype_v2si_v2si);
  11363. + }
  11364. + else
  11365. + {
  11366. + def_builtin_const ("__builtin_vis_fcmple16", CODE_FOR_fcmple16si_vis,
  11367. + SPARC_BUILTIN_FCMPLE16, si_ftype_v4hi_v4hi);
  11368. + def_builtin_const ("__builtin_vis_fcmple32", CODE_FOR_fcmple32si_vis,
  11369. + SPARC_BUILTIN_FCMPLE32, si_ftype_v2si_v2si);
  11370. + def_builtin_const ("__builtin_vis_fcmpne16", CODE_FOR_fcmpne16si_vis,
  11371. + SPARC_BUILTIN_FCMPNE16, si_ftype_v4hi_v4hi);
  11372. + def_builtin_const ("__builtin_vis_fcmpne32", CODE_FOR_fcmpne32si_vis,
  11373. + SPARC_BUILTIN_FCMPNE32, si_ftype_v2si_v2si);
  11374. + def_builtin_const ("__builtin_vis_fcmpgt16", CODE_FOR_fcmpgt16si_vis,
  11375. + SPARC_BUILTIN_FCMPGT16, si_ftype_v4hi_v4hi);
  11376. + def_builtin_const ("__builtin_vis_fcmpgt32", CODE_FOR_fcmpgt32si_vis,
  11377. + SPARC_BUILTIN_FCMPGT32, si_ftype_v2si_v2si);
  11378. + def_builtin_const ("__builtin_vis_fcmpeq16", CODE_FOR_fcmpeq16si_vis,
  11379. + SPARC_BUILTIN_FCMPEQ16, si_ftype_v4hi_v4hi);
  11380. + def_builtin_const ("__builtin_vis_fcmpeq32", CODE_FOR_fcmpeq32si_vis,
  11381. + SPARC_BUILTIN_FCMPEQ32, si_ftype_v2si_v2si);
  11382. + }
  11383. +
  11384. + /* Addition and subtraction. */
  11385. + def_builtin_const ("__builtin_vis_fpadd16", CODE_FOR_addv4hi3,
  11386. + SPARC_BUILTIN_FPADD16, v4hi_ftype_v4hi_v4hi);
  11387. + def_builtin_const ("__builtin_vis_fpadd16s", CODE_FOR_addv2hi3,
  11388. + SPARC_BUILTIN_FPADD16S, v2hi_ftype_v2hi_v2hi);
  11389. + def_builtin_const ("__builtin_vis_fpadd32", CODE_FOR_addv2si3,
  11390. + SPARC_BUILTIN_FPADD32, v2si_ftype_v2si_v2si);
  11391. + def_builtin_const ("__builtin_vis_fpadd32s", CODE_FOR_addv1si3,
  11392. + SPARC_BUILTIN_FPADD32S, v1si_ftype_v1si_v1si);
  11393. + def_builtin_const ("__builtin_vis_fpsub16", CODE_FOR_subv4hi3,
  11394. + SPARC_BUILTIN_FPSUB16, v4hi_ftype_v4hi_v4hi);
  11395. + def_builtin_const ("__builtin_vis_fpsub16s", CODE_FOR_subv2hi3,
  11396. + SPARC_BUILTIN_FPSUB16S, v2hi_ftype_v2hi_v2hi);
  11397. + def_builtin_const ("__builtin_vis_fpsub32", CODE_FOR_subv2si3,
  11398. + SPARC_BUILTIN_FPSUB32, v2si_ftype_v2si_v2si);
  11399. + def_builtin_const ("__builtin_vis_fpsub32s", CODE_FOR_subv1si3,
  11400. + SPARC_BUILTIN_FPSUB32S, v1si_ftype_v1si_v1si);
  11401. +
  11402. + /* Three-dimensional array addressing. */
  11403. + if (TARGET_ARCH64)
  11404. + {
  11405. + def_builtin_const ("__builtin_vis_array8", CODE_FOR_array8di_vis,
  11406. + SPARC_BUILTIN_ARRAY8, di_ftype_di_di);
  11407. + def_builtin_const ("__builtin_vis_array16", CODE_FOR_array16di_vis,
  11408. + SPARC_BUILTIN_ARRAY16, di_ftype_di_di);
  11409. + def_builtin_const ("__builtin_vis_array32", CODE_FOR_array32di_vis,
  11410. + SPARC_BUILTIN_ARRAY32, di_ftype_di_di);
  11411. + }
  11412. + else
  11413. + {
  11414. + def_builtin_const ("__builtin_vis_array8", CODE_FOR_array8si_vis,
  11415. + SPARC_BUILTIN_ARRAY8, si_ftype_si_si);
  11416. + def_builtin_const ("__builtin_vis_array16", CODE_FOR_array16si_vis,
  11417. + SPARC_BUILTIN_ARRAY16, si_ftype_si_si);
  11418. + def_builtin_const ("__builtin_vis_array32", CODE_FOR_array32si_vis,
  11419. + SPARC_BUILTIN_ARRAY32, si_ftype_si_si);
  11420. + }
  11421. +
  11422. + if (TARGET_VIS2)
  11423. + {
  11424. + /* Edge handling. */
  11425. + if (TARGET_ARCH64)
  11426. + {
  11427. + def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8ndi_vis,
  11428. + SPARC_BUILTIN_EDGE8N, di_ftype_ptr_ptr);
  11429. + def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lndi_vis,
  11430. + SPARC_BUILTIN_EDGE8LN, di_ftype_ptr_ptr);
  11431. + def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16ndi_vis,
  11432. + SPARC_BUILTIN_EDGE16N, di_ftype_ptr_ptr);
  11433. + def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lndi_vis,
  11434. + SPARC_BUILTIN_EDGE16LN, di_ftype_ptr_ptr);
  11435. + def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32ndi_vis,
  11436. + SPARC_BUILTIN_EDGE32N, di_ftype_ptr_ptr);
  11437. + def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lndi_vis,
  11438. + SPARC_BUILTIN_EDGE32LN, di_ftype_ptr_ptr);
  11439. + }
  11440. + else
  11441. + {
  11442. + def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8nsi_vis,
  11443. + SPARC_BUILTIN_EDGE8N, si_ftype_ptr_ptr);
  11444. + def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lnsi_vis,
  11445. + SPARC_BUILTIN_EDGE8LN, si_ftype_ptr_ptr);
  11446. + def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16nsi_vis,
  11447. + SPARC_BUILTIN_EDGE16N, si_ftype_ptr_ptr);
  11448. + def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lnsi_vis,
  11449. + SPARC_BUILTIN_EDGE16LN, si_ftype_ptr_ptr);
  11450. + def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32nsi_vis,
  11451. + SPARC_BUILTIN_EDGE32N, si_ftype_ptr_ptr);
  11452. + def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lnsi_vis,
  11453. + SPARC_BUILTIN_EDGE32LN, si_ftype_ptr_ptr);
  11454. + }
  11455. +
  11456. + /* Byte mask and shuffle. */
  11457. + if (TARGET_ARCH64)
  11458. + def_builtin ("__builtin_vis_bmask", CODE_FOR_bmaskdi_vis,
  11459. + SPARC_BUILTIN_BMASK, di_ftype_di_di);
  11460. + else
  11461. + def_builtin ("__builtin_vis_bmask", CODE_FOR_bmasksi_vis,
  11462. + SPARC_BUILTIN_BMASK, si_ftype_si_si);
  11463. + def_builtin ("__builtin_vis_bshufflev4hi", CODE_FOR_bshufflev4hi_vis,
  11464. + SPARC_BUILTIN_BSHUFFLEV4HI, v4hi_ftype_v4hi_v4hi);
  11465. + def_builtin ("__builtin_vis_bshufflev8qi", CODE_FOR_bshufflev8qi_vis,
  11466. + SPARC_BUILTIN_BSHUFFLEV8QI, v8qi_ftype_v8qi_v8qi);
  11467. + def_builtin ("__builtin_vis_bshufflev2si", CODE_FOR_bshufflev2si_vis,
  11468. + SPARC_BUILTIN_BSHUFFLEV2SI, v2si_ftype_v2si_v2si);
  11469. + def_builtin ("__builtin_vis_bshuffledi", CODE_FOR_bshufflev1di_vis,
  11470. + SPARC_BUILTIN_BSHUFFLEDI, di_ftype_di_di);
  11471. + }
  11472. +
  11473. + if (TARGET_VIS3)
  11474. + {
  11475. + if (TARGET_ARCH64)
  11476. + {
  11477. + def_builtin ("__builtin_vis_cmask8", CODE_FOR_cmask8di_vis,
  11478. + SPARC_BUILTIN_CMASK8, void_ftype_di);
  11479. + def_builtin ("__builtin_vis_cmask16", CODE_FOR_cmask16di_vis,
  11480. + SPARC_BUILTIN_CMASK16, void_ftype_di);
  11481. + def_builtin ("__builtin_vis_cmask32", CODE_FOR_cmask32di_vis,
  11482. + SPARC_BUILTIN_CMASK32, void_ftype_di);
  11483. + }
  11484. + else
  11485. + {
  11486. + def_builtin ("__builtin_vis_cmask8", CODE_FOR_cmask8si_vis,
  11487. + SPARC_BUILTIN_CMASK8, void_ftype_si);
  11488. + def_builtin ("__builtin_vis_cmask16", CODE_FOR_cmask16si_vis,
  11489. + SPARC_BUILTIN_CMASK16, void_ftype_si);
  11490. + def_builtin ("__builtin_vis_cmask32", CODE_FOR_cmask32si_vis,
  11491. + SPARC_BUILTIN_CMASK32, void_ftype_si);
  11492. + }
  11493. +
  11494. + def_builtin_const ("__builtin_vis_fchksm16", CODE_FOR_fchksm16_vis,
  11495. + SPARC_BUILTIN_FCHKSM16, v4hi_ftype_v4hi_v4hi);
  11496. +
  11497. + def_builtin_const ("__builtin_vis_fsll16", CODE_FOR_vashlv4hi3,
  11498. + SPARC_BUILTIN_FSLL16, v4hi_ftype_v4hi_v4hi);
  11499. + def_builtin_const ("__builtin_vis_fslas16", CODE_FOR_vssashlv4hi3,
  11500. + SPARC_BUILTIN_FSLAS16, v4hi_ftype_v4hi_v4hi);
  11501. + def_builtin_const ("__builtin_vis_fsrl16", CODE_FOR_vlshrv4hi3,
  11502. + SPARC_BUILTIN_FSRL16, v4hi_ftype_v4hi_v4hi);
  11503. + def_builtin_const ("__builtin_vis_fsra16", CODE_FOR_vashrv4hi3,
  11504. + SPARC_BUILTIN_FSRA16, v4hi_ftype_v4hi_v4hi);
  11505. + def_builtin_const ("__builtin_vis_fsll32", CODE_FOR_vashlv2si3,
  11506. + SPARC_BUILTIN_FSLL32, v2si_ftype_v2si_v2si);
  11507. + def_builtin_const ("__builtin_vis_fslas32", CODE_FOR_vssashlv2si3,
  11508. + SPARC_BUILTIN_FSLAS32, v2si_ftype_v2si_v2si);
  11509. + def_builtin_const ("__builtin_vis_fsrl32", CODE_FOR_vlshrv2si3,
  11510. + SPARC_BUILTIN_FSRL32, v2si_ftype_v2si_v2si);
  11511. + def_builtin_const ("__builtin_vis_fsra32", CODE_FOR_vashrv2si3,
  11512. + SPARC_BUILTIN_FSRA32, v2si_ftype_v2si_v2si);
  11513. +
  11514. + if (TARGET_ARCH64)
  11515. + def_builtin_const ("__builtin_vis_pdistn", CODE_FOR_pdistndi_vis,
  11516. + SPARC_BUILTIN_PDISTN, di_ftype_v8qi_v8qi);
  11517. + else
  11518. + def_builtin_const ("__builtin_vis_pdistn", CODE_FOR_pdistnsi_vis,
  11519. + SPARC_BUILTIN_PDISTN, si_ftype_v8qi_v8qi);
  11520. +
  11521. + def_builtin_const ("__builtin_vis_fmean16", CODE_FOR_fmean16_vis,
  11522. + SPARC_BUILTIN_FMEAN16, v4hi_ftype_v4hi_v4hi);
  11523. + def_builtin_const ("__builtin_vis_fpadd64", CODE_FOR_fpadd64_vis,
  11524. + SPARC_BUILTIN_FPADD64, di_ftype_di_di);
  11525. + def_builtin_const ("__builtin_vis_fpsub64", CODE_FOR_fpsub64_vis,
  11526. + SPARC_BUILTIN_FPSUB64, di_ftype_di_di);
  11527. +
  11528. + def_builtin_const ("__builtin_vis_fpadds16", CODE_FOR_ssaddv4hi3,
  11529. + SPARC_BUILTIN_FPADDS16, v4hi_ftype_v4hi_v4hi);
  11530. + def_builtin_const ("__builtin_vis_fpadds16s", CODE_FOR_ssaddv2hi3,
  11531. + SPARC_BUILTIN_FPADDS16S, v2hi_ftype_v2hi_v2hi);
  11532. + def_builtin_const ("__builtin_vis_fpsubs16", CODE_FOR_sssubv4hi3,
  11533. + SPARC_BUILTIN_FPSUBS16, v4hi_ftype_v4hi_v4hi);
  11534. + def_builtin_const ("__builtin_vis_fpsubs16s", CODE_FOR_sssubv2hi3,
  11535. + SPARC_BUILTIN_FPSUBS16S, v2hi_ftype_v2hi_v2hi);
  11536. + def_builtin_const ("__builtin_vis_fpadds32", CODE_FOR_ssaddv2si3,
  11537. + SPARC_BUILTIN_FPADDS32, v2si_ftype_v2si_v2si);
  11538. + def_builtin_const ("__builtin_vis_fpadds32s", CODE_FOR_ssaddv1si3,
  11539. + SPARC_BUILTIN_FPADDS32S, v1si_ftype_v1si_v1si);
  11540. + def_builtin_const ("__builtin_vis_fpsubs32", CODE_FOR_sssubv2si3,
  11541. + SPARC_BUILTIN_FPSUBS32, v2si_ftype_v2si_v2si);
  11542. + def_builtin_const ("__builtin_vis_fpsubs32s", CODE_FOR_sssubv1si3,
  11543. + SPARC_BUILTIN_FPSUBS32S, v1si_ftype_v1si_v1si);
  11544. +
  11545. + if (TARGET_ARCH64)
  11546. + {
  11547. + def_builtin_const ("__builtin_vis_fucmple8", CODE_FOR_fucmple8di_vis,
  11548. + SPARC_BUILTIN_FUCMPLE8, di_ftype_v8qi_v8qi);
  11549. + def_builtin_const ("__builtin_vis_fucmpne8", CODE_FOR_fucmpne8di_vis,
  11550. + SPARC_BUILTIN_FUCMPNE8, di_ftype_v8qi_v8qi);
  11551. + def_builtin_const ("__builtin_vis_fucmpgt8", CODE_FOR_fucmpgt8di_vis,
  11552. + SPARC_BUILTIN_FUCMPGT8, di_ftype_v8qi_v8qi);
  11553. + def_builtin_const ("__builtin_vis_fucmpeq8", CODE_FOR_fucmpeq8di_vis,
  11554. + SPARC_BUILTIN_FUCMPEQ8, di_ftype_v8qi_v8qi);
  11555. + }
  11556. + else
  11557. + {
  11558. + def_builtin_const ("__builtin_vis_fucmple8", CODE_FOR_fucmple8si_vis,
  11559. + SPARC_BUILTIN_FUCMPLE8, si_ftype_v8qi_v8qi);
  11560. + def_builtin_const ("__builtin_vis_fucmpne8", CODE_FOR_fucmpne8si_vis,
  11561. + SPARC_BUILTIN_FUCMPNE8, si_ftype_v8qi_v8qi);
  11562. + def_builtin_const ("__builtin_vis_fucmpgt8", CODE_FOR_fucmpgt8si_vis,
  11563. + SPARC_BUILTIN_FUCMPGT8, si_ftype_v8qi_v8qi);
  11564. + def_builtin_const ("__builtin_vis_fucmpeq8", CODE_FOR_fucmpeq8si_vis,
  11565. + SPARC_BUILTIN_FUCMPEQ8, si_ftype_v8qi_v8qi);
  11566. + }
  11567. +
  11568. + def_builtin_const ("__builtin_vis_fhadds", CODE_FOR_fhaddsf_vis,
  11569. + SPARC_BUILTIN_FHADDS, sf_ftype_sf_sf);
  11570. + def_builtin_const ("__builtin_vis_fhaddd", CODE_FOR_fhadddf_vis,
  11571. + SPARC_BUILTIN_FHADDD, df_ftype_df_df);
  11572. + def_builtin_const ("__builtin_vis_fhsubs", CODE_FOR_fhsubsf_vis,
  11573. + SPARC_BUILTIN_FHSUBS, sf_ftype_sf_sf);
  11574. + def_builtin_const ("__builtin_vis_fhsubd", CODE_FOR_fhsubdf_vis,
  11575. + SPARC_BUILTIN_FHSUBD, df_ftype_df_df);
  11576. + def_builtin_const ("__builtin_vis_fnhadds", CODE_FOR_fnhaddsf_vis,
  11577. + SPARC_BUILTIN_FNHADDS, sf_ftype_sf_sf);
  11578. + def_builtin_const ("__builtin_vis_fnhaddd", CODE_FOR_fnhadddf_vis,
  11579. + SPARC_BUILTIN_FNHADDD, df_ftype_df_df);
  11580. +
  11581. + def_builtin_const ("__builtin_vis_umulxhi", CODE_FOR_umulxhi_vis,
  11582. + SPARC_BUILTIN_UMULXHI, di_ftype_di_di);
  11583. + def_builtin_const ("__builtin_vis_xmulx", CODE_FOR_xmulx_vis,
  11584. + SPARC_BUILTIN_XMULX, di_ftype_di_di);
  11585. + def_builtin_const ("__builtin_vis_xmulxhi", CODE_FOR_xmulxhi_vis,
  11586. + SPARC_BUILTIN_XMULXHI, di_ftype_di_di);
  11587. + }
  11588. +
  11589. + if (TARGET_VIS4)
  11590. + {
  11591. + def_builtin_const ("__builtin_vis_fpadd8", CODE_FOR_addv8qi3,
  11592. + SPARC_BUILTIN_FPADD8, v8qi_ftype_v8qi_v8qi);
  11593. + def_builtin_const ("__builtin_vis_fpadds8", CODE_FOR_ssaddv8qi3,
  11594. + SPARC_BUILTIN_FPADDS8, v8qi_ftype_v8qi_v8qi);
  11595. + def_builtin_const ("__builtin_vis_fpaddus8", CODE_FOR_usaddv8qi3,
  11596. + SPARC_BUILTIN_FPADDUS8, v8qi_ftype_v8qi_v8qi);
  11597. + def_builtin_const ("__builtin_vis_fpaddus16", CODE_FOR_usaddv4hi3,
  11598. + SPARC_BUILTIN_FPADDUS16, v4hi_ftype_v4hi_v4hi);
  11599. +
  11600. +
  11601. + if (TARGET_ARCH64)
  11602. + {
  11603. + def_builtin_const ("__builtin_vis_fpcmple8", CODE_FOR_fpcmple8di_vis,
  11604. + SPARC_BUILTIN_FPCMPLE8, di_ftype_v8qi_v8qi);
  11605. + def_builtin_const ("__builtin_vis_fpcmpgt8", CODE_FOR_fpcmpgt8di_vis,
  11606. + SPARC_BUILTIN_FPCMPGT8, di_ftype_v8qi_v8qi);
  11607. + def_builtin_const ("__builtin_vis_fpcmpule16", CODE_FOR_fpcmpule16di_vis,
  11608. + SPARC_BUILTIN_FPCMPULE16, di_ftype_v4hi_v4hi);
  11609. + def_builtin_const ("__builtin_vis_fpcmpugt16", CODE_FOR_fpcmpugt16di_vis,
  11610. + SPARC_BUILTIN_FPCMPUGT16, di_ftype_v4hi_v4hi);
  11611. + def_builtin_const ("__builtin_vis_fpcmpule32", CODE_FOR_fpcmpule32di_vis,
  11612. + SPARC_BUILTIN_FPCMPULE32, di_ftype_v2si_v2si);
  11613. + def_builtin_const ("__builtin_vis_fpcmpugt32", CODE_FOR_fpcmpugt32di_vis,
  11614. + SPARC_BUILTIN_FPCMPUGT32, di_ftype_v2si_v2si);
  11615. + }
  11616. + else
  11617. + {
  11618. + def_builtin_const ("__builtin_vis_fpcmple8", CODE_FOR_fpcmple8si_vis,
  11619. + SPARC_BUILTIN_FPCMPLE8, si_ftype_v8qi_v8qi);
  11620. + def_builtin_const ("__builtin_vis_fpcmpgt8", CODE_FOR_fpcmpgt8si_vis,
  11621. + SPARC_BUILTIN_FPCMPGT8, si_ftype_v8qi_v8qi);
  11622. + def_builtin_const ("__builtin_vis_fpcmpule16", CODE_FOR_fpcmpule16si_vis,
  11623. + SPARC_BUILTIN_FPCMPULE16, si_ftype_v4hi_v4hi);
  11624. + def_builtin_const ("__builtin_vis_fpcmpugt16", CODE_FOR_fpcmpugt16si_vis,
  11625. + SPARC_BUILTIN_FPCMPUGT16, si_ftype_v4hi_v4hi);
  11626. + def_builtin_const ("__builtin_vis_fpcmpule32", CODE_FOR_fpcmpule32si_vis,
  11627. + SPARC_BUILTIN_FPCMPULE32, di_ftype_v2si_v2si);
  11628. + def_builtin_const ("__builtin_vis_fpcmpugt32", CODE_FOR_fpcmpugt32si_vis,
  11629. + SPARC_BUILTIN_FPCMPUGT32, di_ftype_v2si_v2si);
  11630. + }
  11631. +
  11632. + def_builtin_const ("__builtin_vis_fpmax8", CODE_FOR_maxv8qi3,
  11633. + SPARC_BUILTIN_FPMAX8, v8qi_ftype_v8qi_v8qi);
  11634. + def_builtin_const ("__builtin_vis_fpmax16", CODE_FOR_maxv4hi3,
  11635. + SPARC_BUILTIN_FPMAX16, v4hi_ftype_v4hi_v4hi);
  11636. + def_builtin_const ("__builtin_vis_fpmax32", CODE_FOR_maxv2si3,
  11637. + SPARC_BUILTIN_FPMAX32, v2si_ftype_v2si_v2si);
  11638. + def_builtin_const ("__builtin_vis_fpmaxu8", CODE_FOR_maxuv8qi3,
  11639. + SPARC_BUILTIN_FPMAXU8, v8qi_ftype_v8qi_v8qi);
  11640. + def_builtin_const ("__builtin_vis_fpmaxu16", CODE_FOR_maxuv4hi3,
  11641. + SPARC_BUILTIN_FPMAXU16, v4hi_ftype_v4hi_v4hi);
  11642. + def_builtin_const ("__builtin_vis_fpmaxu32", CODE_FOR_maxuv2si3,
  11643. + SPARC_BUILTIN_FPMAXU32, v2si_ftype_v2si_v2si);
  11644. + def_builtin_const ("__builtin_vis_fpmin8", CODE_FOR_minv8qi3,
  11645. + SPARC_BUILTIN_FPMIN8, v8qi_ftype_v8qi_v8qi);
  11646. + def_builtin_const ("__builtin_vis_fpmin16", CODE_FOR_minv4hi3,
  11647. + SPARC_BUILTIN_FPMIN16, v4hi_ftype_v4hi_v4hi);
  11648. + def_builtin_const ("__builtin_vis_fpmin32", CODE_FOR_minv2si3,
  11649. + SPARC_BUILTIN_FPMIN32, v2si_ftype_v2si_v2si);
  11650. + def_builtin_const ("__builtin_vis_fpminu8", CODE_FOR_minuv8qi3,
  11651. + SPARC_BUILTIN_FPMINU8, v8qi_ftype_v8qi_v8qi);
  11652. + def_builtin_const ("__builtin_vis_fpminu16", CODE_FOR_minuv4hi3,
  11653. + SPARC_BUILTIN_FPMINU16, v4hi_ftype_v4hi_v4hi);
  11654. + def_builtin_const ("__builtin_vis_fpminu32", CODE_FOR_minuv2si3,
  11655. + SPARC_BUILTIN_FPMINU32, v2si_ftype_v2si_v2si);
  11656. + def_builtin_const ("__builtin_vis_fpsub8", CODE_FOR_subv8qi3,
  11657. + SPARC_BUILTIN_FPSUB8, v8qi_ftype_v8qi_v8qi);
  11658. + def_builtin_const ("__builtin_vis_fpsubs8", CODE_FOR_sssubv8qi3,
  11659. + SPARC_BUILTIN_FPSUBS8, v8qi_ftype_v8qi_v8qi);
  11660. + def_builtin_const ("__builtin_vis_fpsubus8", CODE_FOR_ussubv8qi3,
  11661. + SPARC_BUILTIN_FPSUBUS8, v8qi_ftype_v8qi_v8qi);
  11662. + def_builtin_const ("__builtin_vis_fpsubus16", CODE_FOR_ussubv4hi3,
  11663. + SPARC_BUILTIN_FPSUBUS16, v4hi_ftype_v4hi_v4hi);
  11664. + }
  11665. +
  11666. + if (TARGET_VIS4B)
  11667. + {
  11668. + def_builtin_const ("__builtin_vis_dictunpack8", CODE_FOR_dictunpack8,
  11669. + SPARC_BUILTIN_DICTUNPACK8, v8qi_ftype_df_si);
  11670. + def_builtin_const ("__builtin_vis_dictunpack16", CODE_FOR_dictunpack16,
  11671. + SPARC_BUILTIN_DICTUNPACK16, v4hi_ftype_df_si);
  11672. + def_builtin_const ("__builtin_vis_dictunpack32", CODE_FOR_dictunpack32,
  11673. + SPARC_BUILTIN_DICTUNPACK32, v2si_ftype_df_si);
  11674. +
  11675. + if (TARGET_ARCH64)
  11676. + {
  11677. + tree di_ftype_v8qi_v8qi_si = build_function_type_list (intDI_type_node,
  11678. + v8qi, v8qi,
  11679. + intSI_type_node, 0);
  11680. + tree di_ftype_v4hi_v4hi_si = build_function_type_list (intDI_type_node,
  11681. + v4hi, v4hi,
  11682. + intSI_type_node, 0);
  11683. + tree di_ftype_v2si_v2si_si = build_function_type_list (intDI_type_node,
  11684. + v2si, v2si,
  11685. + intSI_type_node, 0);
  11686. +
  11687. + def_builtin_const ("__builtin_vis_fpcmple8shl", CODE_FOR_fpcmple8dishl,
  11688. + SPARC_BUILTIN_FPCMPLE8SHL, di_ftype_v8qi_v8qi_si);
  11689. + def_builtin_const ("__builtin_vis_fpcmpgt8shl", CODE_FOR_fpcmpgt8dishl,
  11690. + SPARC_BUILTIN_FPCMPGT8SHL, di_ftype_v8qi_v8qi_si);
  11691. + def_builtin_const ("__builtin_vis_fpcmpeq8shl", CODE_FOR_fpcmpeq8dishl,
  11692. + SPARC_BUILTIN_FPCMPEQ8SHL, di_ftype_v8qi_v8qi_si);
  11693. + def_builtin_const ("__builtin_vis_fpcmpne8shl", CODE_FOR_fpcmpne8dishl,
  11694. + SPARC_BUILTIN_FPCMPNE8SHL, di_ftype_v8qi_v8qi_si);
  11695. +
  11696. + def_builtin_const ("__builtin_vis_fpcmple16shl", CODE_FOR_fpcmple16dishl,
  11697. + SPARC_BUILTIN_FPCMPLE16SHL, di_ftype_v4hi_v4hi_si);
  11698. + def_builtin_const ("__builtin_vis_fpcmpgt16shl", CODE_FOR_fpcmpgt16dishl,
  11699. + SPARC_BUILTIN_FPCMPGT16SHL, di_ftype_v4hi_v4hi_si);
  11700. + def_builtin_const ("__builtin_vis_fpcmpeq16shl", CODE_FOR_fpcmpeq16dishl,
  11701. + SPARC_BUILTIN_FPCMPEQ16SHL, di_ftype_v4hi_v4hi_si);
  11702. + def_builtin_const ("__builtin_vis_fpcmpne16shl", CODE_FOR_fpcmpne16dishl,
  11703. + SPARC_BUILTIN_FPCMPNE16SHL, di_ftype_v4hi_v4hi_si);
  11704. +
  11705. + def_builtin_const ("__builtin_vis_fpcmple32shl", CODE_FOR_fpcmple32dishl,
  11706. + SPARC_BUILTIN_FPCMPLE32SHL, di_ftype_v2si_v2si_si);
  11707. + def_builtin_const ("__builtin_vis_fpcmpgt32shl", CODE_FOR_fpcmpgt32dishl,
  11708. + SPARC_BUILTIN_FPCMPGT32SHL, di_ftype_v2si_v2si_si);
  11709. + def_builtin_const ("__builtin_vis_fpcmpeq32shl", CODE_FOR_fpcmpeq32dishl,
  11710. + SPARC_BUILTIN_FPCMPEQ32SHL, di_ftype_v2si_v2si_si);
  11711. + def_builtin_const ("__builtin_vis_fpcmpne32shl", CODE_FOR_fpcmpne32dishl,
  11712. + SPARC_BUILTIN_FPCMPNE32SHL, di_ftype_v2si_v2si_si);
  11713. +
  11714. +
  11715. + def_builtin_const ("__builtin_vis_fpcmpule8shl", CODE_FOR_fpcmpule8dishl,
  11716. + SPARC_BUILTIN_FPCMPULE8SHL, di_ftype_v8qi_v8qi_si);
  11717. + def_builtin_const ("__builtin_vis_fpcmpugt8shl", CODE_FOR_fpcmpugt8dishl,
  11718. + SPARC_BUILTIN_FPCMPUGT8SHL, di_ftype_v8qi_v8qi_si);
  11719. +
  11720. + def_builtin_const ("__builtin_vis_fpcmpule16shl", CODE_FOR_fpcmpule16dishl,
  11721. + SPARC_BUILTIN_FPCMPULE16SHL, di_ftype_v4hi_v4hi_si);
  11722. + def_builtin_const ("__builtin_vis_fpcmpugt16shl", CODE_FOR_fpcmpugt16dishl,
  11723. + SPARC_BUILTIN_FPCMPUGT16SHL, di_ftype_v4hi_v4hi_si);
  11724. +
  11725. + def_builtin_const ("__builtin_vis_fpcmpule32shl", CODE_FOR_fpcmpule32dishl,
  11726. + SPARC_BUILTIN_FPCMPULE32SHL, di_ftype_v2si_v2si_si);
  11727. + def_builtin_const ("__builtin_vis_fpcmpugt32shl", CODE_FOR_fpcmpugt32dishl,
  11728. + SPARC_BUILTIN_FPCMPUGT32SHL, di_ftype_v2si_v2si_si);
  11729. +
  11730. + def_builtin_const ("__builtin_vis_fpcmpde8shl", CODE_FOR_fpcmpde8dishl,
  11731. + SPARC_BUILTIN_FPCMPDE8SHL, di_ftype_v8qi_v8qi_si);
  11732. + def_builtin_const ("__builtin_vis_fpcmpde16shl", CODE_FOR_fpcmpde16dishl,
  11733. + SPARC_BUILTIN_FPCMPDE16SHL, di_ftype_v4hi_v4hi_si);
  11734. + def_builtin_const ("__builtin_vis_fpcmpde32shl", CODE_FOR_fpcmpde32dishl,
  11735. + SPARC_BUILTIN_FPCMPDE32SHL, di_ftype_v2si_v2si_si);
  11736. +
  11737. + def_builtin_const ("__builtin_vis_fpcmpur8shl", CODE_FOR_fpcmpur8dishl,
  11738. + SPARC_BUILTIN_FPCMPUR8SHL, di_ftype_v8qi_v8qi_si);
  11739. + def_builtin_const ("__builtin_vis_fpcmpur16shl", CODE_FOR_fpcmpur16dishl,
  11740. + SPARC_BUILTIN_FPCMPUR16SHL, di_ftype_v4hi_v4hi_si);
  11741. + def_builtin_const ("__builtin_vis_fpcmpur32shl", CODE_FOR_fpcmpur32dishl,
  11742. + SPARC_BUILTIN_FPCMPUR32SHL, di_ftype_v2si_v2si_si);
  11743. +
  11744. + }
  11745. + else
  11746. + {
  11747. + tree si_ftype_v8qi_v8qi_si = build_function_type_list (intSI_type_node,
  11748. + v8qi, v8qi,
  11749. + intSI_type_node, 0);
  11750. + tree si_ftype_v4hi_v4hi_si = build_function_type_list (intSI_type_node,
  11751. + v4hi, v4hi,
  11752. + intSI_type_node, 0);
  11753. + tree si_ftype_v2si_v2si_si = build_function_type_list (intSI_type_node,
  11754. + v2si, v2si,
  11755. + intSI_type_node, 0);
  11756. +
  11757. + def_builtin_const ("__builtin_vis_fpcmple8shl", CODE_FOR_fpcmple8sishl,
  11758. + SPARC_BUILTIN_FPCMPLE8SHL, si_ftype_v8qi_v8qi_si);
  11759. + def_builtin_const ("__builtin_vis_fpcmpgt8shl", CODE_FOR_fpcmpgt8sishl,
  11760. + SPARC_BUILTIN_FPCMPGT8SHL, si_ftype_v8qi_v8qi_si);
  11761. + def_builtin_const ("__builtin_vis_fpcmpeq8shl", CODE_FOR_fpcmpeq8sishl,
  11762. + SPARC_BUILTIN_FPCMPEQ8SHL, si_ftype_v8qi_v8qi_si);
  11763. + def_builtin_const ("__builtin_vis_fpcmpne8shl", CODE_FOR_fpcmpne8sishl,
  11764. + SPARC_BUILTIN_FPCMPNE8SHL, si_ftype_v8qi_v8qi_si);
  11765. +
  11766. + def_builtin_const ("__builtin_vis_fpcmple16shl", CODE_FOR_fpcmple16sishl,
  11767. + SPARC_BUILTIN_FPCMPLE16SHL, si_ftype_v4hi_v4hi_si);
  11768. + def_builtin_const ("__builtin_vis_fpcmpgt16shl", CODE_FOR_fpcmpgt16sishl,
  11769. + SPARC_BUILTIN_FPCMPGT16SHL, si_ftype_v4hi_v4hi_si);
  11770. + def_builtin_const ("__builtin_vis_fpcmpeq16shl", CODE_FOR_fpcmpeq16sishl,
  11771. + SPARC_BUILTIN_FPCMPEQ16SHL, si_ftype_v4hi_v4hi_si);
  11772. + def_builtin_const ("__builtin_vis_fpcmpne16shl", CODE_FOR_fpcmpne16sishl,
  11773. + SPARC_BUILTIN_FPCMPNE16SHL, si_ftype_v4hi_v4hi_si);
  11774. +
  11775. + def_builtin_const ("__builtin_vis_fpcmple32shl", CODE_FOR_fpcmple32sishl,
  11776. + SPARC_BUILTIN_FPCMPLE32SHL, si_ftype_v2si_v2si_si);
  11777. + def_builtin_const ("__builtin_vis_fpcmpgt32shl", CODE_FOR_fpcmpgt32sishl,
  11778. + SPARC_BUILTIN_FPCMPGT32SHL, si_ftype_v2si_v2si_si);
  11779. + def_builtin_const ("__builtin_vis_fpcmpeq32shl", CODE_FOR_fpcmpeq32sishl,
  11780. + SPARC_BUILTIN_FPCMPEQ32SHL, si_ftype_v2si_v2si_si);
  11781. + def_builtin_const ("__builtin_vis_fpcmpne32shl", CODE_FOR_fpcmpne32sishl,
  11782. + SPARC_BUILTIN_FPCMPNE32SHL, si_ftype_v2si_v2si_si);
  11783. +
  11784. +
  11785. + def_builtin_const ("__builtin_vis_fpcmpule8shl", CODE_FOR_fpcmpule8sishl,
  11786. + SPARC_BUILTIN_FPCMPULE8SHL, si_ftype_v8qi_v8qi_si);
  11787. + def_builtin_const ("__builtin_vis_fpcmpugt8shl", CODE_FOR_fpcmpugt8sishl,
  11788. + SPARC_BUILTIN_FPCMPUGT8SHL, si_ftype_v8qi_v8qi_si);
  11789. +
  11790. + def_builtin_const ("__builtin_vis_fpcmpule16shl", CODE_FOR_fpcmpule16sishl,
  11791. + SPARC_BUILTIN_FPCMPULE16SHL, si_ftype_v4hi_v4hi_si);
  11792. + def_builtin_const ("__builtin_vis_fpcmpugt16shl", CODE_FOR_fpcmpugt16sishl,
  11793. + SPARC_BUILTIN_FPCMPUGT16SHL, si_ftype_v4hi_v4hi_si);
  11794. +
  11795. + def_builtin_const ("__builtin_vis_fpcmpule32shl", CODE_FOR_fpcmpule32sishl,
  11796. + SPARC_BUILTIN_FPCMPULE32SHL, si_ftype_v2si_v2si_si);
  11797. + def_builtin_const ("__builtin_vis_fpcmpugt32shl", CODE_FOR_fpcmpugt32sishl,
  11798. + SPARC_BUILTIN_FPCMPUGT32SHL, si_ftype_v2si_v2si_si);
  11799. +
  11800. + def_builtin_const ("__builtin_vis_fpcmpde8shl", CODE_FOR_fpcmpde8sishl,
  11801. + SPARC_BUILTIN_FPCMPDE8SHL, si_ftype_v8qi_v8qi_si);
  11802. + def_builtin_const ("__builtin_vis_fpcmpde16shl", CODE_FOR_fpcmpde16sishl,
  11803. + SPARC_BUILTIN_FPCMPDE16SHL, si_ftype_v4hi_v4hi_si);
  11804. + def_builtin_const ("__builtin_vis_fpcmpde32shl", CODE_FOR_fpcmpde32sishl,
  11805. + SPARC_BUILTIN_FPCMPDE32SHL, si_ftype_v2si_v2si_si);
  11806. +
  11807. + def_builtin_const ("__builtin_vis_fpcmpur8shl", CODE_FOR_fpcmpur8sishl,
  11808. + SPARC_BUILTIN_FPCMPUR8SHL, si_ftype_v8qi_v8qi_si);
  11809. + def_builtin_const ("__builtin_vis_fpcmpur16shl", CODE_FOR_fpcmpur16sishl,
  11810. + SPARC_BUILTIN_FPCMPUR16SHL, si_ftype_v4hi_v4hi_si);
  11811. + def_builtin_const ("__builtin_vis_fpcmpur32shl", CODE_FOR_fpcmpur32sishl,
  11812. + SPARC_BUILTIN_FPCMPUR32SHL, si_ftype_v2si_v2si_si);
  11813. + }
  11814. + }
  11815. +}
  11816. +
  11817. +/* Implement TARGET_BUILTIN_DECL hook. */
  11818. +
  11819. +static tree
  11820. +sparc_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
  11821. +{
  11822. + if (code >= SPARC_BUILTIN_MAX)
  11823. + return error_mark_node;
  11824. +
  11825. + return sparc_builtins[code];
  11826. +}
  11827. +
  11828. +/* Implemented TARGET_EXPAND_BUILTIN hook. */
  11829. +
  11830. +static rtx
  11831. +sparc_expand_builtin (tree exp, rtx target,
  11832. + rtx subtarget ATTRIBUTE_UNUSED,
  11833. + machine_mode tmode ATTRIBUTE_UNUSED,
  11834. + int ignore ATTRIBUTE_UNUSED)
  11835. +{
  11836. + tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
  11837. + enum sparc_builtins code
  11838. + = (enum sparc_builtins) DECL_MD_FUNCTION_CODE (fndecl);
  11839. + enum insn_code icode = sparc_builtins_icode[code];
  11840. + bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
  11841. + call_expr_arg_iterator iter;
  11842. + int arg_count = 0;
  11843. + rtx pat, op[4];
  11844. + tree arg;
  11845. +
  11846. + if (nonvoid)
  11847. + {
  11848. + machine_mode tmode = insn_data[icode].operand[0].mode;
  11849. + if (!target
  11850. + || GET_MODE (target) != tmode
  11851. + || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
  11852. + op[0] = gen_reg_rtx (tmode);
  11853. + else
  11854. + op[0] = target;
  11855. + }
  11856. + else
  11857. + op[0] = NULL_RTX;
  11858. +
  11859. + FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
  11860. + {
  11861. + const struct insn_operand_data *insn_op;
  11862. + int idx;
  11863. +
  11864. + if (arg == error_mark_node)
  11865. + return NULL_RTX;
  11866. +
  11867. + arg_count++;
  11868. + idx = arg_count - !nonvoid;
  11869. + insn_op = &insn_data[icode].operand[idx];
  11870. + op[arg_count] = expand_normal (arg);
  11871. +
  11872. + /* Some of the builtins require constant arguments. We check
  11873. + for this here. */
  11874. + if ((code >= SPARC_BUILTIN_FIRST_FPCMPSHL
  11875. + && code <= SPARC_BUILTIN_LAST_FPCMPSHL
  11876. + && arg_count == 3)
  11877. + || (code >= SPARC_BUILTIN_FIRST_DICTUNPACK
  11878. + && code <= SPARC_BUILTIN_LAST_DICTUNPACK
  11879. + && arg_count == 2))
  11880. + {
  11881. + if (!check_constant_argument (icode, idx, op[arg_count]))
  11882. + return const0_rtx;
  11883. + }
  11884. +
  11885. + if (code == SPARC_BUILTIN_LDFSR || code == SPARC_BUILTIN_STFSR)
  11886. + {
  11887. + if (!address_operand (op[arg_count], SImode))
  11888. + {
  11889. + op[arg_count] = convert_memory_address (Pmode, op[arg_count]);
  11890. + op[arg_count] = copy_addr_to_reg (op[arg_count]);
  11891. + }
  11892. + op[arg_count] = gen_rtx_MEM (SImode, op[arg_count]);
  11893. + }
  11894. +
  11895. + else if (insn_op->mode == V1DImode
  11896. + && GET_MODE (op[arg_count]) == DImode)
  11897. + op[arg_count] = gen_lowpart (V1DImode, op[arg_count]);
  11898. +
  11899. + else if (insn_op->mode == V1SImode
  11900. + && GET_MODE (op[arg_count]) == SImode)
  11901. + op[arg_count] = gen_lowpart (V1SImode, op[arg_count]);
  11902. +
  11903. + if (! (*insn_data[icode].operand[idx].predicate) (op[arg_count],
  11904. + insn_op->mode))
  11905. + op[arg_count] = copy_to_mode_reg (insn_op->mode, op[arg_count]);
  11906. + }
  11907. +
  11908. + switch (arg_count)
  11909. + {
  11910. + case 0:
  11911. + pat = GEN_FCN (icode) (op[0]);
  11912. + break;
  11913. + case 1:
  11914. + if (nonvoid)
  11915. + pat = GEN_FCN (icode) (op[0], op[1]);
  11916. + else
  11917. + pat = GEN_FCN (icode) (op[1]);
  11918. + break;
  11919. + case 2:
  11920. + pat = GEN_FCN (icode) (op[0], op[1], op[2]);
  11921. + break;
  11922. + case 3:
  11923. + pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
  11924. + break;
  11925. + default:
  11926. + gcc_unreachable ();
  11927. + }
  11928. +
  11929. + if (!pat)
  11930. + return NULL_RTX;
  11931. +
  11932. + emit_insn (pat);
  11933. +
  11934. + return (nonvoid ? op[0] : const0_rtx);
  11935. +}
  11936. +
  11937. +/* Return the upper 16 bits of the 8x16 multiplication. */
  11938. +
  11939. +static int
  11940. +sparc_vis_mul8x16 (int e8, int e16)
  11941. +{
  11942. + return (e8 * e16 + 128) / 256;
  11943. +}
  11944. +
  11945. +/* Multiply the VECTOR_CSTs CST0 and CST1 as specified by FNCODE and put
  11946. + the result into the array N_ELTS, whose elements are of INNER_TYPE. */
  11947. +
  11948. +static void
  11949. +sparc_handle_vis_mul8x16 (vec<tree> *n_elts, enum sparc_builtins fncode,
  11950. + tree inner_type, tree cst0, tree cst1)
  11951. +{
  11952. + unsigned i, num = VECTOR_CST_NELTS (cst0);
  11953. + int scale;
  11954. +
  11955. + switch (fncode)
  11956. + {
  11957. + case SPARC_BUILTIN_FMUL8X16:
  11958. + for (i = 0; i < num; ++i)
  11959. + {
  11960. + int val
  11961. + = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0, i)),
  11962. + TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, i)));
  11963. + n_elts->quick_push (build_int_cst (inner_type, val));
  11964. + }
  11965. + break;
  11966. +
  11967. + case SPARC_BUILTIN_FMUL8X16AU:
  11968. + scale = TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, 0));
  11969. +
  11970. + for (i = 0; i < num; ++i)
  11971. + {
  11972. + int val
  11973. + = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0, i)),
  11974. + scale);
  11975. + n_elts->quick_push (build_int_cst (inner_type, val));
  11976. + }
  11977. + break;
  11978. +
  11979. + case SPARC_BUILTIN_FMUL8X16AL:
  11980. + scale = TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, 1));
  11981. +
  11982. + for (i = 0; i < num; ++i)
  11983. + {
  11984. + int val
  11985. + = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0, i)),
  11986. + scale);
  11987. + n_elts->quick_push (build_int_cst (inner_type, val));
  11988. + }
  11989. + break;
  11990. +
  11991. + default:
  11992. + gcc_unreachable ();
  11993. + }
  11994. +}
  11995. +
  11996. +/* Implement TARGET_FOLD_BUILTIN hook.
  11997. +
  11998. + Fold builtin functions for SPARC intrinsics. If IGNORE is true the
  11999. + result of the function call is ignored. NULL_TREE is returned if the
  12000. + function could not be folded. */
  12001. +
  12002. +static tree
  12003. +sparc_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
  12004. + tree *args, bool ignore)
  12005. +{
  12006. + enum sparc_builtins code
  12007. + = (enum sparc_builtins) DECL_MD_FUNCTION_CODE (fndecl);
  12008. + tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
  12009. + tree arg0, arg1, arg2;
  12010. +
  12011. + if (ignore)
  12012. + switch (code)
  12013. + {
  12014. + case SPARC_BUILTIN_LDFSR:
  12015. + case SPARC_BUILTIN_STFSR:
  12016. + case SPARC_BUILTIN_ALIGNADDR:
  12017. + case SPARC_BUILTIN_WRGSR:
  12018. + case SPARC_BUILTIN_BMASK:
  12019. + case SPARC_BUILTIN_CMASK8:
  12020. + case SPARC_BUILTIN_CMASK16:
  12021. + case SPARC_BUILTIN_CMASK32:
  12022. + break;
  12023. +
  12024. + default:
  12025. + return build_zero_cst (rtype);
  12026. + }
  12027. +
  12028. + switch (code)
  12029. + {
  12030. + case SPARC_BUILTIN_FEXPAND:
  12031. + arg0 = args[0];
  12032. + STRIP_NOPS (arg0);
  12033. +
  12034. + if (TREE_CODE (arg0) == VECTOR_CST)
  12035. + {
  12036. + tree inner_type = TREE_TYPE (rtype);
  12037. + unsigned i;
  12038. +
  12039. + tree_vector_builder n_elts (rtype, VECTOR_CST_NELTS (arg0), 1);
  12040. + for (i = 0; i < VECTOR_CST_NELTS (arg0); ++i)
  12041. + {
  12042. + unsigned HOST_WIDE_INT val
  12043. + = TREE_INT_CST_LOW (VECTOR_CST_ELT (arg0, i));
  12044. + n_elts.quick_push (build_int_cst (inner_type, val << 4));
  12045. + }
  12046. + return n_elts.build ();
  12047. + }
  12048. + break;
  12049. +
  12050. + case SPARC_BUILTIN_FMUL8X16:
  12051. + case SPARC_BUILTIN_FMUL8X16AU:
  12052. + case SPARC_BUILTIN_FMUL8X16AL:
  12053. + arg0 = args[0];
  12054. + arg1 = args[1];
  12055. + STRIP_NOPS (arg0);
  12056. + STRIP_NOPS (arg1);
  12057. +
  12058. + if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
  12059. + {
  12060. + tree inner_type = TREE_TYPE (rtype);
  12061. + tree_vector_builder n_elts (rtype, VECTOR_CST_NELTS (arg0), 1);
  12062. + sparc_handle_vis_mul8x16 (&n_elts, code, inner_type, arg0, arg1);
  12063. + return n_elts.build ();
  12064. + }
  12065. + break;
  12066. +
  12067. + case SPARC_BUILTIN_FPMERGE:
  12068. + arg0 = args[0];
  12069. + arg1 = args[1];
  12070. + STRIP_NOPS (arg0);
  12071. + STRIP_NOPS (arg1);
  12072. +
  12073. + if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
  12074. + {
  12075. + tree_vector_builder n_elts (rtype, 2 * VECTOR_CST_NELTS (arg0), 1);
  12076. + unsigned i;
  12077. + for (i = 0; i < VECTOR_CST_NELTS (arg0); ++i)
  12078. + {
  12079. + n_elts.quick_push (VECTOR_CST_ELT (arg0, i));
  12080. + n_elts.quick_push (VECTOR_CST_ELT (arg1, i));
  12081. + }
  12082. +
  12083. + return n_elts.build ();
  12084. + }
  12085. + break;
  12086. +
  12087. + case SPARC_BUILTIN_PDIST:
  12088. + case SPARC_BUILTIN_PDISTN:
  12089. + arg0 = args[0];
  12090. + arg1 = args[1];
  12091. + STRIP_NOPS (arg0);
  12092. + STRIP_NOPS (arg1);
  12093. + if (code == SPARC_BUILTIN_PDIST)
  12094. + {
  12095. + arg2 = args[2];
  12096. + STRIP_NOPS (arg2);
  12097. + }
  12098. + else
  12099. + arg2 = integer_zero_node;
  12100. +
  12101. + if (TREE_CODE (arg0) == VECTOR_CST
  12102. + && TREE_CODE (arg1) == VECTOR_CST
  12103. + && TREE_CODE (arg2) == INTEGER_CST)
  12104. + {
  12105. + bool overflow = false;
  12106. + widest_int result = wi::to_widest (arg2);
  12107. + widest_int tmp;
  12108. + unsigned i;
  12109. +
  12110. + for (i = 0; i < VECTOR_CST_NELTS (arg0); ++i)
  12111. + {
  12112. + tree e0 = VECTOR_CST_ELT (arg0, i);
  12113. + tree e1 = VECTOR_CST_ELT (arg1, i);
  12114. +
  12115. + wi::overflow_type neg1_ovf, neg2_ovf, add1_ovf, add2_ovf;
  12116. +
  12117. + tmp = wi::neg (wi::to_widest (e1), &neg1_ovf);
  12118. + tmp = wi::add (wi::to_widest (e0), tmp, SIGNED, &add1_ovf);
  12119. + if (wi::neg_p (tmp))
  12120. + tmp = wi::neg (tmp, &neg2_ovf);
  12121. + else
  12122. + neg2_ovf = wi::OVF_NONE;
  12123. + result = wi::add (result, tmp, SIGNED, &add2_ovf);
  12124. + overflow |= ((neg1_ovf != wi::OVF_NONE)
  12125. + | (neg2_ovf != wi::OVF_NONE)
  12126. + | (add1_ovf != wi::OVF_NONE)
  12127. + | (add2_ovf != wi::OVF_NONE));
  12128. + }
  12129. +
  12130. + gcc_assert (!overflow);
  12131. +
  12132. + return wide_int_to_tree (rtype, result);
  12133. + }
  12134. +
  12135. + default:
  12136. + break;
  12137. + }
  12138. +
  12139. + return NULL_TREE;
  12140. +}
  12141. +
  12142. +/* ??? This duplicates information provided to the compiler by the
  12143. + ??? scheduler description. Some day, teach genautomata to output
  12144. + ??? the latencies and then CSE will just use that. */
  12145. +
  12146. +static bool
  12147. +sparc_rtx_costs (rtx x, machine_mode mode, int outer_code,
  12148. + int opno ATTRIBUTE_UNUSED,
  12149. + int *total, bool speed ATTRIBUTE_UNUSED)
  12150. +{
  12151. + int code = GET_CODE (x);
  12152. + bool float_mode_p = FLOAT_MODE_P (mode);
  12153. +
  12154. + switch (code)
  12155. + {
  12156. + case CONST_INT:
  12157. + if (SMALL_INT (x))
  12158. + *total = 0;
  12159. + else
  12160. + *total = 2;
  12161. + return true;
  12162. +
  12163. + case CONST_WIDE_INT:
  12164. + *total = 0;
  12165. + if (!SPARC_SIMM13_P (CONST_WIDE_INT_ELT (x, 0)))
  12166. + *total += 2;
  12167. + if (!SPARC_SIMM13_P (CONST_WIDE_INT_ELT (x, 1)))
  12168. + *total += 2;
  12169. + return true;
  12170. +
  12171. + case HIGH:
  12172. + *total = 2;
  12173. + return true;
  12174. +
  12175. + case CONST:
  12176. + case LABEL_REF:
  12177. + case SYMBOL_REF:
  12178. + *total = 4;
  12179. + return true;
  12180. +
  12181. + case CONST_DOUBLE:
  12182. + *total = 8;
  12183. + return true;
  12184. +
  12185. + case MEM:
  12186. + /* If outer-code was a sign or zero extension, a cost
  12187. + of COSTS_N_INSNS (1) was already added in. This is
  12188. + why we are subtracting it back out. */
  12189. + if (outer_code == ZERO_EXTEND)
  12190. + {
  12191. + *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
  12192. + }
  12193. + else if (outer_code == SIGN_EXTEND)
  12194. + {
  12195. + *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
  12196. + }
  12197. + else if (float_mode_p)
  12198. + {
  12199. + *total = sparc_costs->float_load;
  12200. + }
  12201. + else
  12202. + {
  12203. + *total = sparc_costs->int_load;
  12204. + }
  12205. +
  12206. + return true;
  12207. +
  12208. + case PLUS:
  12209. + case MINUS:
  12210. + if (float_mode_p)
  12211. + *total = sparc_costs->float_plusminus;
  12212. + else
  12213. + *total = COSTS_N_INSNS (1);
  12214. + return false;
  12215. +
  12216. + case FMA:
  12217. + {
  12218. + rtx sub;
  12219. +
  12220. + gcc_assert (float_mode_p);
  12221. + *total = sparc_costs->float_mul;
  12222. +
  12223. + sub = XEXP (x, 0);
  12224. + if (GET_CODE (sub) == NEG)
  12225. + sub = XEXP (sub, 0);
  12226. + *total += rtx_cost (sub, mode, FMA, 0, speed);
  12227. +
  12228. + sub = XEXP (x, 2);
  12229. + if (GET_CODE (sub) == NEG)
  12230. + sub = XEXP (sub, 0);
  12231. + *total += rtx_cost (sub, mode, FMA, 2, speed);
  12232. + return true;
  12233. + }
  12234. +
  12235. + case MULT:
  12236. + if (float_mode_p)
  12237. + *total = sparc_costs->float_mul;
  12238. + else if (TARGET_ARCH32 && !TARGET_HARD_MUL)
  12239. + *total = COSTS_N_INSNS (25);
  12240. + else
  12241. + {
  12242. + int bit_cost;
  12243. +
  12244. + bit_cost = 0;
  12245. + if (sparc_costs->int_mul_bit_factor)
  12246. + {
  12247. + int nbits;
  12248. +
  12249. + if (GET_CODE (XEXP (x, 1)) == CONST_INT)
  12250. + {
  12251. + unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
  12252. + for (nbits = 0; value != 0; value &= value - 1)
  12253. + nbits++;
  12254. + }
  12255. + else
  12256. + nbits = 7;
  12257. +
  12258. + if (nbits < 3)
  12259. + nbits = 3;
  12260. + bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
  12261. + bit_cost = COSTS_N_INSNS (bit_cost);
  12262. + }
  12263. +
  12264. + if (mode == DImode || !TARGET_HARD_MUL)
  12265. + *total = sparc_costs->int_mulX + bit_cost;
  12266. + else
  12267. + *total = sparc_costs->int_mul + bit_cost;
  12268. + }
  12269. + return false;
  12270. +
  12271. + case ASHIFT:
  12272. + case ASHIFTRT:
  12273. + case LSHIFTRT:
  12274. + *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
  12275. + return false;
  12276. +
  12277. + case DIV:
  12278. + case UDIV:
  12279. + case MOD:
  12280. + case UMOD:
  12281. + if (float_mode_p)
  12282. + {
  12283. + if (mode == DFmode)
  12284. + *total = sparc_costs->float_div_df;
  12285. + else
  12286. + *total = sparc_costs->float_div_sf;
  12287. + }
  12288. + else
  12289. + {
  12290. + if (mode == DImode)
  12291. + *total = sparc_costs->int_divX;
  12292. + else
  12293. + *total = sparc_costs->int_div;
  12294. + }
  12295. + return false;
  12296. +
  12297. + case NEG:
  12298. + if (! float_mode_p)
  12299. + {
  12300. + *total = COSTS_N_INSNS (1);
  12301. + return false;
  12302. + }
  12303. + /* FALLTHRU */
  12304. +
  12305. + case ABS:
  12306. + case FLOAT:
  12307. + case UNSIGNED_FLOAT:
  12308. + case FIX:
  12309. + case UNSIGNED_FIX:
  12310. + case FLOAT_EXTEND:
  12311. + case FLOAT_TRUNCATE:
  12312. + *total = sparc_costs->float_move;
  12313. + return false;
  12314. +
  12315. + case SQRT:
  12316. + if (mode == DFmode)
  12317. + *total = sparc_costs->float_sqrt_df;
  12318. + else
  12319. + *total = sparc_costs->float_sqrt_sf;
  12320. + return false;
  12321. +
  12322. + case COMPARE:
  12323. + if (float_mode_p)
  12324. + *total = sparc_costs->float_cmp;
  12325. + else
  12326. + *total = COSTS_N_INSNS (1);
  12327. + return false;
  12328. +
  12329. + case IF_THEN_ELSE:
  12330. + if (float_mode_p)
  12331. + *total = sparc_costs->float_cmove;
  12332. + else
  12333. + *total = sparc_costs->int_cmove;
  12334. + return false;
  12335. +
  12336. + case IOR:
  12337. + /* Handle the NAND vector patterns. */
  12338. + if (sparc_vector_mode_supported_p (mode)
  12339. + && GET_CODE (XEXP (x, 0)) == NOT
  12340. + && GET_CODE (XEXP (x, 1)) == NOT)
  12341. + {
  12342. + *total = COSTS_N_INSNS (1);
  12343. + return true;
  12344. + }
  12345. + else
  12346. + return false;
  12347. +
  12348. + default:
  12349. + return false;
  12350. + }
  12351. +}
  12352. +
  12353. +/* Return true if CLASS is either GENERAL_REGS or I64_REGS. */
  12354. +
  12355. +static inline bool
  12356. +general_or_i64_p (reg_class_t rclass)
  12357. +{
  12358. + return (rclass == GENERAL_REGS || rclass == I64_REGS);
  12359. +}
  12360. +
  12361. +/* Implement TARGET_REGISTER_MOVE_COST. */
  12362. +
  12363. +static int
  12364. +sparc_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
  12365. + reg_class_t from, reg_class_t to)
  12366. +{
  12367. + bool need_memory = false;
  12368. +
  12369. + /* This helps postreload CSE to eliminate redundant comparisons. */
  12370. + if (from == NO_REGS || to == NO_REGS)
  12371. + return 100;
  12372. +
  12373. + if (from == FPCC_REGS || to == FPCC_REGS)
  12374. + need_memory = true;
  12375. + else if ((FP_REG_CLASS_P (from) && general_or_i64_p (to))
  12376. + || (general_or_i64_p (from) && FP_REG_CLASS_P (to)))
  12377. + {
  12378. + if (TARGET_VIS3)
  12379. + {
  12380. + int size = GET_MODE_SIZE (mode);
  12381. + if (size == 8 || size == 4)
  12382. + {
  12383. + if (! TARGET_ARCH32 || size == 4)
  12384. + return 4;
  12385. + else
  12386. + return 6;
  12387. + }
  12388. + }
  12389. + need_memory = true;
  12390. + }
  12391. +
  12392. + if (need_memory)
  12393. + {
  12394. + if (sparc_cpu == PROCESSOR_ULTRASPARC
  12395. + || sparc_cpu == PROCESSOR_ULTRASPARC3
  12396. + || sparc_cpu == PROCESSOR_NIAGARA
  12397. + || sparc_cpu == PROCESSOR_NIAGARA2
  12398. + || sparc_cpu == PROCESSOR_NIAGARA3
  12399. + || sparc_cpu == PROCESSOR_NIAGARA4
  12400. + || sparc_cpu == PROCESSOR_NIAGARA7
  12401. + || sparc_cpu == PROCESSOR_M8)
  12402. + return 12;
  12403. +
  12404. + return 6;
  12405. + }
  12406. +
  12407. + return 2;
  12408. +}
  12409. +
  12410. +/* Emit the sequence of insns SEQ while preserving the registers REG and REG2.
  12411. + This is achieved by means of a manual dynamic stack space allocation in
  12412. + the current frame. We make the assumption that SEQ doesn't contain any
  12413. + function calls, with the possible exception of calls to the GOT helper. */
  12414. +
  12415. +static void
  12416. +emit_and_preserve (rtx seq, rtx reg, rtx reg2)
  12417. +{
  12418. + /* We must preserve the lowest 16 words for the register save area. */
  12419. + HOST_WIDE_INT offset = 16*UNITS_PER_WORD;
  12420. + /* We really need only 2 words of fresh stack space. */
  12421. + HOST_WIDE_INT size = SPARC_STACK_ALIGN (offset + 2*UNITS_PER_WORD);
  12422. +
  12423. + rtx slot
  12424. + = gen_rtx_MEM (word_mode, plus_constant (Pmode, stack_pointer_rtx,
  12425. + SPARC_STACK_BIAS + offset));
  12426. +
  12427. + emit_insn (gen_stack_pointer_inc (GEN_INT (-size)));
  12428. + emit_insn (gen_rtx_SET (slot, reg));
  12429. + if (reg2)
  12430. + emit_insn (gen_rtx_SET (adjust_address (slot, word_mode, UNITS_PER_WORD),
  12431. + reg2));
  12432. + emit_insn (seq);
  12433. + if (reg2)
  12434. + emit_insn (gen_rtx_SET (reg2,
  12435. + adjust_address (slot, word_mode, UNITS_PER_WORD)));
  12436. + emit_insn (gen_rtx_SET (reg, slot));
  12437. + emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
  12438. +}
  12439. +
  12440. +/* Output the assembler code for a thunk function. THUNK_DECL is the
  12441. + declaration for the thunk function itself, FUNCTION is the decl for
  12442. + the target function. DELTA is an immediate constant offset to be
  12443. + added to THIS. If VCALL_OFFSET is nonzero, the word at address
  12444. + (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
  12445. +
  12446. +static void
  12447. +sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
  12448. + HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
  12449. + tree function)
  12450. +{
  12451. + const char *fnname = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (thunk_fndecl));
  12452. + rtx this_rtx, funexp;
  12453. + rtx_insn *insn;
  12454. + unsigned int int_arg_first;
  12455. +
  12456. + reload_completed = 1;
  12457. + epilogue_completed = 1;
  12458. +
  12459. + emit_note (NOTE_INSN_PROLOGUE_END);
  12460. +
  12461. + if (TARGET_FLAT)
  12462. + {
  12463. + sparc_leaf_function_p = 1;
  12464. +
  12465. + int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
  12466. + }
  12467. + else if (flag_delayed_branch)
  12468. + {
  12469. + /* We will emit a regular sibcall below, so we need to instruct
  12470. + output_sibcall that we are in a leaf function. */
  12471. + sparc_leaf_function_p = crtl->uses_only_leaf_regs = 1;
  12472. +
  12473. + /* This will cause final.c to invoke leaf_renumber_regs so we
  12474. + must behave as if we were in a not-yet-leafified function. */
  12475. + int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
  12476. + }
  12477. + else
  12478. + {
  12479. + /* We will emit the sibcall manually below, so we will need to
  12480. + manually spill non-leaf registers. */
  12481. + sparc_leaf_function_p = crtl->uses_only_leaf_regs = 0;
  12482. +
  12483. + /* We really are in a leaf function. */
  12484. + int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
  12485. + }
  12486. +
  12487. + /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
  12488. + returns a structure, the structure return pointer is there instead. */
  12489. + if (TARGET_ARCH64
  12490. + && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
  12491. + this_rtx = gen_rtx_REG (Pmode, int_arg_first + 1);
  12492. + else
  12493. + this_rtx = gen_rtx_REG (Pmode, int_arg_first);
  12494. +
  12495. + /* Add DELTA. When possible use a plain add, otherwise load it into
  12496. + a register first. */
  12497. + if (delta)
  12498. + {
  12499. + rtx delta_rtx = GEN_INT (delta);
  12500. +
  12501. + if (! SPARC_SIMM13_P (delta))
  12502. + {
  12503. + rtx scratch = gen_rtx_REG (Pmode, 1);
  12504. + emit_move_insn (scratch, delta_rtx);
  12505. + delta_rtx = scratch;
  12506. + }
  12507. +
  12508. + /* THIS_RTX += DELTA. */
  12509. + emit_insn (gen_add2_insn (this_rtx, delta_rtx));
  12510. + }
  12511. +
  12512. + /* Add the word at address (*THIS_RTX + VCALL_OFFSET). */
  12513. + if (vcall_offset)
  12514. + {
  12515. + rtx vcall_offset_rtx = GEN_INT (vcall_offset);
  12516. + rtx scratch = gen_rtx_REG (Pmode, 1);
  12517. +
  12518. + gcc_assert (vcall_offset < 0);
  12519. +
  12520. + /* SCRATCH = *THIS_RTX. */
  12521. + emit_move_insn (scratch, gen_rtx_MEM (Pmode, this_rtx));
  12522. +
  12523. + /* Prepare for adding VCALL_OFFSET. The difficulty is that we
  12524. + may not have any available scratch register at this point. */
  12525. + if (SPARC_SIMM13_P (vcall_offset))
  12526. + ;
  12527. + /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
  12528. + else if (! fixed_regs[5]
  12529. + /* The below sequence is made up of at least 2 insns,
  12530. + while the default method may need only one. */
  12531. + && vcall_offset < -8192)
  12532. + {
  12533. + rtx scratch2 = gen_rtx_REG (Pmode, 5);
  12534. + emit_move_insn (scratch2, vcall_offset_rtx);
  12535. + vcall_offset_rtx = scratch2;
  12536. + }
  12537. + else
  12538. + {
  12539. + rtx increment = GEN_INT (-4096);
  12540. +
  12541. + /* VCALL_OFFSET is a negative number whose typical range can be
  12542. + estimated as -32768..0 in 32-bit mode. In almost all cases
  12543. + it is therefore cheaper to emit multiple add insns than
  12544. + spilling and loading the constant into a register (at least
  12545. + 6 insns). */
  12546. + while (! SPARC_SIMM13_P (vcall_offset))
  12547. + {
  12548. + emit_insn (gen_add2_insn (scratch, increment));
  12549. + vcall_offset += 4096;
  12550. + }
  12551. + vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
  12552. + }
  12553. +
  12554. + /* SCRATCH = *(*THIS_RTX + VCALL_OFFSET). */
  12555. + emit_move_insn (scratch, gen_rtx_MEM (Pmode,
  12556. + gen_rtx_PLUS (Pmode,
  12557. + scratch,
  12558. + vcall_offset_rtx)));
  12559. +
  12560. + /* THIS_RTX += *(*THIS_RTX + VCALL_OFFSET). */
  12561. + emit_insn (gen_add2_insn (this_rtx, scratch));
  12562. + }
  12563. +
  12564. + /* Generate a tail call to the target function. */
  12565. + if (! TREE_USED (function))
  12566. + {
  12567. + assemble_external (function);
  12568. + TREE_USED (function) = 1;
  12569. + }
  12570. + funexp = XEXP (DECL_RTL (function), 0);
  12571. +
  12572. + if (flag_delayed_branch)
  12573. + {
  12574. + funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
  12575. + insn = emit_call_insn (gen_sibcall (funexp));
  12576. + SIBLING_CALL_P (insn) = 1;
  12577. + }
  12578. + else
  12579. + {
  12580. + /* The hoops we have to jump through in order to generate a sibcall
  12581. + without using delay slots... */
  12582. + rtx spill_reg, seq, scratch = gen_rtx_REG (Pmode, 1);
  12583. +
  12584. + if (flag_pic)
  12585. + {
  12586. + spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
  12587. + start_sequence ();
  12588. + load_got_register (); /* clobbers %o7 */
  12589. + if (!TARGET_VXWORKS_RTP)
  12590. + pic_offset_table_rtx = got_register_rtx;
  12591. + scratch = sparc_legitimize_pic_address (funexp, scratch);
  12592. + seq = get_insns ();
  12593. + end_sequence ();
  12594. + emit_and_preserve (seq, spill_reg, pic_offset_table_rtx);
  12595. + }
  12596. + else if (TARGET_ARCH32)
  12597. + {
  12598. + emit_insn (gen_rtx_SET (scratch,
  12599. + gen_rtx_HIGH (SImode, funexp)));
  12600. + emit_insn (gen_rtx_SET (scratch,
  12601. + gen_rtx_LO_SUM (SImode, scratch, funexp)));
  12602. + }
  12603. + else /* TARGET_ARCH64 */
  12604. + {
  12605. + switch (sparc_code_model)
  12606. + {
  12607. + case CM_MEDLOW:
  12608. + case CM_MEDMID:
  12609. + /* The destination can serve as a temporary. */
  12610. + sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
  12611. + break;
  12612. +
  12613. + case CM_MEDANY:
  12614. + case CM_EMBMEDANY:
  12615. + /* The destination cannot serve as a temporary. */
  12616. + spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
  12617. + start_sequence ();
  12618. + sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
  12619. + seq = get_insns ();
  12620. + end_sequence ();
  12621. + emit_and_preserve (seq, spill_reg, 0);
  12622. + break;
  12623. +
  12624. + default:
  12625. + gcc_unreachable ();
  12626. + }
  12627. + }
  12628. +
  12629. + emit_jump_insn (gen_indirect_jump (scratch));
  12630. + }
  12631. +
  12632. + emit_barrier ();
  12633. +
  12634. + /* Run just enough of rest_of_compilation to get the insns emitted.
  12635. + There's not really enough bulk here to make other passes such as
  12636. + instruction scheduling worth while. */
  12637. + insn = get_insns ();
  12638. + shorten_branches (insn);
  12639. + assemble_start_function (thunk_fndecl, fnname);
  12640. + final_start_function (insn, file, 1);
  12641. + final (insn, file, 1);
  12642. + final_end_function ();
  12643. + assemble_end_function (thunk_fndecl, fnname);
  12644. +
  12645. + reload_completed = 0;
  12646. + epilogue_completed = 0;
  12647. +}
  12648. +
  12649. +/* Return true if sparc_output_mi_thunk would be able to output the
  12650. + assembler code for the thunk function specified by the arguments
  12651. + it is passed, and false otherwise. */
  12652. +static bool
  12653. +sparc_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
  12654. + HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
  12655. + HOST_WIDE_INT vcall_offset,
  12656. + const_tree function ATTRIBUTE_UNUSED)
  12657. +{
  12658. + /* Bound the loop used in the default method above. */
  12659. + return (vcall_offset >= -32768 || ! fixed_regs[5]);
  12660. +}
  12661. +
  12662. +/* How to allocate a 'struct machine_function'. */
  12663. +
  12664. +static struct machine_function *
  12665. +sparc_init_machine_status (void)
  12666. +{
  12667. + return ggc_cleared_alloc<machine_function> ();
  12668. +}
  12669. +
  12670. +/* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
  12671. +
  12672. +static unsigned HOST_WIDE_INT
  12673. +sparc_asan_shadow_offset (void)
  12674. +{
  12675. + return TARGET_ARCH64 ? (HOST_WIDE_INT_1 << 43) : (HOST_WIDE_INT_1 << 29);
  12676. +}
  12677. +
  12678. +/* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
  12679. + We need to emit DTP-relative relocations. */
  12680. +
  12681. +static void
  12682. +sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
  12683. +{
  12684. + switch (size)
  12685. + {
  12686. + case 4:
  12687. + fputs ("\t.word\t%r_tls_dtpoff32(", file);
  12688. + break;
  12689. + case 8:
  12690. + fputs ("\t.xword\t%r_tls_dtpoff64(", file);
  12691. + break;
  12692. + default:
  12693. + gcc_unreachable ();
  12694. + }
  12695. + output_addr_const (file, x);
  12696. + fputs (")", file);
  12697. +}
  12698. +
  12699. +/* Do whatever processing is required at the end of a file. */
  12700. +
  12701. +static void
  12702. +sparc_file_end (void)
  12703. +{
  12704. + /* If we need to emit the special GOT helper function, do so now. */
  12705. + if (got_helper_needed)
  12706. + {
  12707. + const char *name = XSTR (got_helper_rtx, 0);
  12708. +#ifdef DWARF2_UNWIND_INFO
  12709. + bool do_cfi;
  12710. +#endif
  12711. +
  12712. + if (USE_HIDDEN_LINKONCE)
  12713. + {
  12714. + tree decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
  12715. + get_identifier (name),
  12716. + build_function_type_list (void_type_node,
  12717. + NULL_TREE));
  12718. + DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
  12719. + NULL_TREE, void_type_node);
  12720. + TREE_PUBLIC (decl) = 1;
  12721. + TREE_STATIC (decl) = 1;
  12722. + make_decl_one_only (decl, DECL_ASSEMBLER_NAME (decl));
  12723. + DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
  12724. + DECL_VISIBILITY_SPECIFIED (decl) = 1;
  12725. + resolve_unique_section (decl, 0, flag_function_sections);
  12726. + allocate_struct_function (decl, true);
  12727. + cfun->is_thunk = 1;
  12728. + current_function_decl = decl;
  12729. + init_varasm_status ();
  12730. + assemble_start_function (decl, name);
  12731. + }
  12732. + else
  12733. + {
  12734. + const int align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
  12735. + switch_to_section (text_section);
  12736. + if (align > 0)
  12737. + ASM_OUTPUT_ALIGN (asm_out_file, align);
  12738. + ASM_OUTPUT_LABEL (asm_out_file, name);
  12739. + }
  12740. +
  12741. +#ifdef DWARF2_UNWIND_INFO
  12742. + do_cfi = dwarf2out_do_cfi_asm ();
  12743. + if (do_cfi)
  12744. + output_asm_insn (".cfi_startproc", NULL);
  12745. +#endif
  12746. + if (flag_delayed_branch)
  12747. + {
  12748. + output_asm_insn ("jmp\t%%o7+8", NULL);
  12749. + output_asm_insn (" add\t%%o7, %0, %0", &got_register_rtx);
  12750. + }
  12751. + else
  12752. + {
  12753. + output_asm_insn ("add\t%%o7, %0, %0", &got_register_rtx);
  12754. + output_asm_insn ("jmp\t%%o7+8", NULL);
  12755. + output_asm_insn (" nop", NULL);
  12756. + }
  12757. +#ifdef DWARF2_UNWIND_INFO
  12758. + if (do_cfi)
  12759. + output_asm_insn (".cfi_endproc", NULL);
  12760. +#endif
  12761. + }
  12762. +
  12763. + if (NEED_INDICATE_EXEC_STACK)
  12764. + file_end_indicate_exec_stack ();
  12765. +
  12766. +#ifdef TARGET_SOLARIS
  12767. + solaris_file_end ();
  12768. +#endif
  12769. +}
  12770. +
  12771. +#ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
  12772. +/* Implement TARGET_MANGLE_TYPE. */
  12773. +
  12774. +static const char *
  12775. +sparc_mangle_type (const_tree type)
  12776. +{
  12777. + if (TARGET_ARCH32
  12778. + && TYPE_MAIN_VARIANT (type) == long_double_type_node
  12779. + && TARGET_LONG_DOUBLE_128)
  12780. + return "g";
  12781. +
  12782. + /* For all other types, use normal C++ mangling. */
  12783. + return NULL;
  12784. +}
  12785. +#endif
  12786. +
  12787. +/* Expand a membar instruction for various use cases. Both the LOAD_STORE
  12788. + and BEFORE_AFTER arguments of the form X_Y. They are two-bit masks where
  12789. + bit 0 indicates that X is true, and bit 1 indicates Y is true. */
  12790. +
  12791. +void
  12792. +sparc_emit_membar_for_model (enum memmodel model,
  12793. + int load_store, int before_after)
  12794. +{
  12795. + /* Bits for the MEMBAR mmask field. */
  12796. + const int LoadLoad = 1;
  12797. + const int StoreLoad = 2;
  12798. + const int LoadStore = 4;
  12799. + const int StoreStore = 8;
  12800. +
  12801. + int mm = 0, implied = 0;
  12802. +
  12803. + switch (sparc_memory_model)
  12804. + {
  12805. + case SMM_SC:
  12806. + /* Sequential Consistency. All memory transactions are immediately
  12807. + visible in sequential execution order. No barriers needed. */
  12808. + implied = LoadLoad | StoreLoad | LoadStore | StoreStore;
  12809. + break;
  12810. +
  12811. + case SMM_TSO:
  12812. + /* Total Store Ordering: all memory transactions with store semantics
  12813. + are followed by an implied StoreStore. */
  12814. + implied |= StoreStore;
  12815. +
  12816. + /* If we're not looking for a raw barrer (before+after), then atomic
  12817. + operations get the benefit of being both load and store. */
  12818. + if (load_store == 3 && before_after == 1)
  12819. + implied |= StoreLoad;
  12820. + /* FALLTHRU */
  12821. +
  12822. + case SMM_PSO:
  12823. + /* Partial Store Ordering: all memory transactions with load semantics
  12824. + are followed by an implied LoadLoad | LoadStore. */
  12825. + implied |= LoadLoad | LoadStore;
  12826. +
  12827. + /* If we're not looking for a raw barrer (before+after), then atomic
  12828. + operations get the benefit of being both load and store. */
  12829. + if (load_store == 3 && before_after == 2)
  12830. + implied |= StoreLoad | StoreStore;
  12831. + /* FALLTHRU */
  12832. +
  12833. + case SMM_RMO:
  12834. + /* Relaxed Memory Ordering: no implicit bits. */
  12835. + break;
  12836. +
  12837. + default:
  12838. + gcc_unreachable ();
  12839. + }
  12840. +
  12841. + if (before_after & 1)
  12842. + {
  12843. + if (is_mm_release (model) || is_mm_acq_rel (model)
  12844. + || is_mm_seq_cst (model))
  12845. + {
  12846. + if (load_store & 1)
  12847. + mm |= LoadLoad | StoreLoad;
  12848. + if (load_store & 2)
  12849. + mm |= LoadStore | StoreStore;
  12850. + }
  12851. + }
  12852. + if (before_after & 2)
  12853. + {
  12854. + if (is_mm_acquire (model) || is_mm_acq_rel (model)
  12855. + || is_mm_seq_cst (model))
  12856. + {
  12857. + if (load_store & 1)
  12858. + mm |= LoadLoad | LoadStore;
  12859. + if (load_store & 2)
  12860. + mm |= StoreLoad | StoreStore;
  12861. + }
  12862. + }
  12863. +
  12864. + /* Remove the bits implied by the system memory model. */
  12865. + mm &= ~implied;
  12866. +
  12867. + /* For raw barriers (before+after), always emit a barrier.
  12868. + This will become a compile-time barrier if needed. */
  12869. + if (mm || before_after == 3)
  12870. + emit_insn (gen_membar (GEN_INT (mm)));
  12871. +}
  12872. +
  12873. +/* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
  12874. + compare and swap on the word containing the byte or half-word. */
  12875. +
  12876. +static void
  12877. +sparc_expand_compare_and_swap_12 (rtx bool_result, rtx result, rtx mem,
  12878. + rtx oldval, rtx newval)
  12879. +{
  12880. + rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
  12881. + rtx addr = gen_reg_rtx (Pmode);
  12882. + rtx off = gen_reg_rtx (SImode);
  12883. + rtx oldv = gen_reg_rtx (SImode);
  12884. + rtx newv = gen_reg_rtx (SImode);
  12885. + rtx oldvalue = gen_reg_rtx (SImode);
  12886. + rtx newvalue = gen_reg_rtx (SImode);
  12887. + rtx res = gen_reg_rtx (SImode);
  12888. + rtx resv = gen_reg_rtx (SImode);
  12889. + rtx memsi, val, mask, cc;
  12890. +
  12891. + emit_insn (gen_rtx_SET (addr, gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
  12892. +
  12893. + if (Pmode != SImode)
  12894. + addr1 = gen_lowpart (SImode, addr1);
  12895. + emit_insn (gen_rtx_SET (off, gen_rtx_AND (SImode, addr1, GEN_INT (3))));
  12896. +
  12897. + memsi = gen_rtx_MEM (SImode, addr);
  12898. + set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
  12899. + MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
  12900. +
  12901. + val = copy_to_reg (memsi);
  12902. +
  12903. + emit_insn (gen_rtx_SET (off,
  12904. + gen_rtx_XOR (SImode, off,
  12905. + GEN_INT (GET_MODE (mem) == QImode
  12906. + ? 3 : 2))));
  12907. +
  12908. + emit_insn (gen_rtx_SET (off, gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
  12909. +
  12910. + if (GET_MODE (mem) == QImode)
  12911. + mask = force_reg (SImode, GEN_INT (0xff));
  12912. + else
  12913. + mask = force_reg (SImode, GEN_INT (0xffff));
  12914. +
  12915. + emit_insn (gen_rtx_SET (mask, gen_rtx_ASHIFT (SImode, mask, off)));
  12916. +
  12917. + emit_insn (gen_rtx_SET (val,
  12918. + gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
  12919. + val)));
  12920. +
  12921. + oldval = gen_lowpart (SImode, oldval);
  12922. + emit_insn (gen_rtx_SET (oldv, gen_rtx_ASHIFT (SImode, oldval, off)));
  12923. +
  12924. + newval = gen_lowpart_common (SImode, newval);
  12925. + emit_insn (gen_rtx_SET (newv, gen_rtx_ASHIFT (SImode, newval, off)));
  12926. +
  12927. + emit_insn (gen_rtx_SET (oldv, gen_rtx_AND (SImode, oldv, mask)));
  12928. +
  12929. + emit_insn (gen_rtx_SET (newv, gen_rtx_AND (SImode, newv, mask)));
  12930. +
  12931. + rtx_code_label *end_label = gen_label_rtx ();
  12932. + rtx_code_label *loop_label = gen_label_rtx ();
  12933. + emit_label (loop_label);
  12934. +
  12935. + emit_insn (gen_rtx_SET (oldvalue, gen_rtx_IOR (SImode, oldv, val)));
  12936. +
  12937. + emit_insn (gen_rtx_SET (newvalue, gen_rtx_IOR (SImode, newv, val)));
  12938. +
  12939. + emit_move_insn (bool_result, const1_rtx);
  12940. +
  12941. + emit_insn (gen_atomic_compare_and_swapsi_1 (res, memsi, oldvalue, newvalue));
  12942. +
  12943. + emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
  12944. +
  12945. + emit_insn (gen_rtx_SET (resv,
  12946. + gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
  12947. + res)));
  12948. +
  12949. + emit_move_insn (bool_result, const0_rtx);
  12950. +
  12951. + cc = gen_compare_reg_1 (NE, resv, val);
  12952. + emit_insn (gen_rtx_SET (val, resv));
  12953. +
  12954. + /* Use cbranchcc4 to separate the compare and branch! */
  12955. + emit_jump_insn (gen_cbranchcc4 (gen_rtx_NE (VOIDmode, cc, const0_rtx),
  12956. + cc, const0_rtx, loop_label));
  12957. +
  12958. + emit_label (end_label);
  12959. +
  12960. + emit_insn (gen_rtx_SET (res, gen_rtx_AND (SImode, res, mask)));
  12961. +
  12962. + emit_insn (gen_rtx_SET (res, gen_rtx_LSHIFTRT (SImode, res, off)));
  12963. +
  12964. + emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
  12965. +}
  12966. +
  12967. +/* Expand code to perform a compare-and-swap. */
  12968. +
  12969. +void
  12970. +sparc_expand_compare_and_swap (rtx operands[])
  12971. +{
  12972. + rtx bval, retval, mem, oldval, newval;
  12973. + machine_mode mode;
  12974. + enum memmodel model;
  12975. +
  12976. + bval = operands[0];
  12977. + retval = operands[1];
  12978. + mem = operands[2];
  12979. + oldval = operands[3];
  12980. + newval = operands[4];
  12981. + model = (enum memmodel) INTVAL (operands[6]);
  12982. + mode = GET_MODE (mem);
  12983. +
  12984. + sparc_emit_membar_for_model (model, 3, 1);
  12985. +
  12986. + if (reg_overlap_mentioned_p (retval, oldval))
  12987. + oldval = copy_to_reg (oldval);
  12988. +
  12989. + if (mode == QImode || mode == HImode)
  12990. + sparc_expand_compare_and_swap_12 (bval, retval, mem, oldval, newval);
  12991. + else
  12992. + {
  12993. + rtx (*gen) (rtx, rtx, rtx, rtx);
  12994. + rtx x;
  12995. +
  12996. + if (mode == SImode)
  12997. + gen = gen_atomic_compare_and_swapsi_1;
  12998. + else
  12999. + gen = gen_atomic_compare_and_swapdi_1;
  13000. + emit_insn (gen (retval, mem, oldval, newval));
  13001. +
  13002. + x = emit_store_flag (bval, EQ, retval, oldval, mode, 1, 1);
  13003. + if (x != bval)
  13004. + convert_move (bval, x, 1);
  13005. + }
  13006. +
  13007. + sparc_emit_membar_for_model (model, 3, 2);
  13008. +}
  13009. +
  13010. +void
  13011. +sparc_expand_vec_perm_bmask (machine_mode vmode, rtx sel)
  13012. +{
  13013. + rtx t_1, t_2, t_3;
  13014. +
  13015. + sel = gen_lowpart (DImode, sel);
  13016. + switch (vmode)
  13017. + {
  13018. + case E_V2SImode:
  13019. + /* inp = xxxxxxxAxxxxxxxB */
  13020. + t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
  13021. + NULL_RTX, 1, OPTAB_DIRECT);
  13022. + /* t_1 = ....xxxxxxxAxxx. */
  13023. + sel = expand_simple_binop (SImode, AND, gen_lowpart (SImode, sel),
  13024. + GEN_INT (3), NULL_RTX, 1, OPTAB_DIRECT);
  13025. + t_1 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_1),
  13026. + GEN_INT (0x30000), NULL_RTX, 1, OPTAB_DIRECT);
  13027. + /* sel = .......B */
  13028. + /* t_1 = ...A.... */
  13029. + sel = expand_simple_binop (SImode, IOR, sel, t_1, sel, 1, OPTAB_DIRECT);
  13030. + /* sel = ...A...B */
  13031. + sel = expand_mult (SImode, sel, GEN_INT (0x4444), sel, 1);
  13032. + /* sel = AAAABBBB * 4 */
  13033. + t_1 = force_reg (SImode, GEN_INT (0x01230123));
  13034. + /* sel = { A*4, A*4+1, A*4+2, ... } */
  13035. + break;
  13036. +
  13037. + case E_V4HImode:
  13038. + /* inp = xxxAxxxBxxxCxxxD */
  13039. + t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (8),
  13040. + NULL_RTX, 1, OPTAB_DIRECT);
  13041. + t_2 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
  13042. + NULL_RTX, 1, OPTAB_DIRECT);
  13043. + t_3 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (24),
  13044. + NULL_RTX, 1, OPTAB_DIRECT);
  13045. + /* t_1 = ..xxxAxxxBxxxCxx */
  13046. + /* t_2 = ....xxxAxxxBxxxC */
  13047. + /* t_3 = ......xxxAxxxBxx */
  13048. + sel = expand_simple_binop (SImode, AND, gen_lowpart (SImode, sel),
  13049. + GEN_INT (0x07),
  13050. + NULL_RTX, 1, OPTAB_DIRECT);
  13051. + t_1 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_1),
  13052. + GEN_INT (0x0700),
  13053. + NULL_RTX, 1, OPTAB_DIRECT);
  13054. + t_2 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_2),
  13055. + GEN_INT (0x070000),
  13056. + NULL_RTX, 1, OPTAB_DIRECT);
  13057. + t_3 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_3),
  13058. + GEN_INT (0x07000000),
  13059. + NULL_RTX, 1, OPTAB_DIRECT);
  13060. + /* sel = .......D */
  13061. + /* t_1 = .....C.. */
  13062. + /* t_2 = ...B.... */
  13063. + /* t_3 = .A...... */
  13064. + sel = expand_simple_binop (SImode, IOR, sel, t_1, sel, 1, OPTAB_DIRECT);
  13065. + t_2 = expand_simple_binop (SImode, IOR, t_2, t_3, t_2, 1, OPTAB_DIRECT);
  13066. + sel = expand_simple_binop (SImode, IOR, sel, t_2, sel, 1, OPTAB_DIRECT);
  13067. + /* sel = .A.B.C.D */
  13068. + sel = expand_mult (SImode, sel, GEN_INT (0x22), sel, 1);
  13069. + /* sel = AABBCCDD * 2 */
  13070. + t_1 = force_reg (SImode, GEN_INT (0x01010101));
  13071. + /* sel = { A*2, A*2+1, B*2, B*2+1, ... } */
  13072. + break;
  13073. +
  13074. + case E_V8QImode:
  13075. + /* input = xAxBxCxDxExFxGxH */
  13076. + sel = expand_simple_binop (DImode, AND, sel,
  13077. + GEN_INT ((HOST_WIDE_INT)0x0f0f0f0f << 32
  13078. + | 0x0f0f0f0f),
  13079. + NULL_RTX, 1, OPTAB_DIRECT);
  13080. + /* sel = .A.B.C.D.E.F.G.H */
  13081. + t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (4),
  13082. + NULL_RTX, 1, OPTAB_DIRECT);
  13083. + /* t_1 = ..A.B.C.D.E.F.G. */
  13084. + sel = expand_simple_binop (DImode, IOR, sel, t_1,
  13085. + NULL_RTX, 1, OPTAB_DIRECT);
  13086. + /* sel = .AABBCCDDEEFFGGH */
  13087. + sel = expand_simple_binop (DImode, AND, sel,
  13088. + GEN_INT ((HOST_WIDE_INT)0xff00ff << 32
  13089. + | 0xff00ff),
  13090. + NULL_RTX, 1, OPTAB_DIRECT);
  13091. + /* sel = ..AB..CD..EF..GH */
  13092. + t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (8),
  13093. + NULL_RTX, 1, OPTAB_DIRECT);
  13094. + /* t_1 = ....AB..CD..EF.. */
  13095. + sel = expand_simple_binop (DImode, IOR, sel, t_1,
  13096. + NULL_RTX, 1, OPTAB_DIRECT);
  13097. + /* sel = ..ABABCDCDEFEFGH */
  13098. + sel = expand_simple_binop (DImode, AND, sel,
  13099. + GEN_INT ((HOST_WIDE_INT)0xffff << 32 | 0xffff),
  13100. + NULL_RTX, 1, OPTAB_DIRECT);
  13101. + /* sel = ....ABCD....EFGH */
  13102. + t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
  13103. + NULL_RTX, 1, OPTAB_DIRECT);
  13104. + /* t_1 = ........ABCD.... */
  13105. + sel = gen_lowpart (SImode, sel);
  13106. + t_1 = gen_lowpart (SImode, t_1);
  13107. + break;
  13108. +
  13109. + default:
  13110. + gcc_unreachable ();
  13111. + }
  13112. +
  13113. + /* Always perform the final addition/merge within the bmask insn. */
  13114. + emit_insn (gen_bmasksi_vis (gen_reg_rtx (SImode), sel, t_1));
  13115. +}
  13116. +
  13117. +/* Implement TARGET_VEC_PERM_CONST. */
  13118. +
  13119. +static bool
  13120. +sparc_vectorize_vec_perm_const (machine_mode vmode, rtx target, rtx op0,
  13121. + rtx op1, const vec_perm_indices &sel)
  13122. +{
  13123. + if (!TARGET_VIS2)
  13124. + return false;
  13125. +
  13126. + /* All permutes are supported. */
  13127. + if (!target)
  13128. + return true;
  13129. +
  13130. + /* Force target-independent code to convert constant permutations on other
  13131. + modes down to V8QI. Rely on this to avoid the complexity of the byte
  13132. + order of the permutation. */
  13133. + if (vmode != V8QImode)
  13134. + return false;
  13135. +
  13136. + unsigned int i, mask;
  13137. + for (i = mask = 0; i < 8; ++i)
  13138. + mask |= (sel[i] & 0xf) << (28 - i*4);
  13139. + rtx mask_rtx = force_reg (SImode, gen_int_mode (mask, SImode));
  13140. +
  13141. + emit_insn (gen_bmasksi_vis (gen_reg_rtx (SImode), mask_rtx, const0_rtx));
  13142. + emit_insn (gen_bshufflev8qi_vis (target, op0, op1));
  13143. + return true;
  13144. +}
  13145. +
  13146. +/* Implement TARGET_FRAME_POINTER_REQUIRED. */
  13147. +
  13148. +static bool
  13149. +sparc_frame_pointer_required (void)
  13150. +{
  13151. + /* If the stack pointer is dynamically modified in the function, it cannot
  13152. + serve as the frame pointer. */
  13153. + if (cfun->calls_alloca)
  13154. + return true;
  13155. +
  13156. + /* If the function receives nonlocal gotos, it needs to save the frame
  13157. + pointer in the nonlocal_goto_save_area object. */
  13158. + if (cfun->has_nonlocal_label)
  13159. + return true;
  13160. +
  13161. + /* In flat mode, that's it. */
  13162. + if (TARGET_FLAT)
  13163. + return false;
  13164. +
  13165. + /* Otherwise, the frame pointer is required if the function isn't leaf, but
  13166. + we cannot use sparc_leaf_function_p since it hasn't been computed yet. */
  13167. + return !(optimize > 0 && crtl->is_leaf && only_leaf_regs_used ());
  13168. +}
  13169. +
  13170. +/* The way this is structured, we can't eliminate SFP in favor of SP
  13171. + if the frame pointer is required: we want to use the SFP->HFP elimination
  13172. + in that case. But the test in update_eliminables doesn't know we are
  13173. + assuming below that we only do the former elimination. */
  13174. +
  13175. +static bool
  13176. +sparc_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
  13177. +{
  13178. + return to == HARD_FRAME_POINTER_REGNUM || !sparc_frame_pointer_required ();
  13179. +}
  13180. +
  13181. +/* If !TARGET_FPU, then make the fp registers and fp cc regs fixed so that
  13182. + they won't be allocated. */
  13183. +
  13184. +static void
  13185. +sparc_conditional_register_usage (void)
  13186. +{
  13187. + if (PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
  13188. + fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
  13189. + /* If the user has passed -f{fixed,call-{used,saved}}-g5 */
  13190. + /* then honor it. */
  13191. + if (TARGET_ARCH32 && fixed_regs[5])
  13192. + fixed_regs[5] = 1;
  13193. + else if (TARGET_ARCH64 && fixed_regs[5] == 2)
  13194. + fixed_regs[5] = 0;
  13195. + if (! TARGET_V9)
  13196. + {
  13197. + int regno;
  13198. + for (regno = SPARC_FIRST_V9_FP_REG;
  13199. + regno <= SPARC_LAST_V9_FP_REG;
  13200. + regno++)
  13201. + fixed_regs[regno] = 1;
  13202. + /* %fcc0 is used by v8 and v9. */
  13203. + for (regno = SPARC_FIRST_V9_FCC_REG + 1;
  13204. + regno <= SPARC_LAST_V9_FCC_REG;
  13205. + regno++)
  13206. + fixed_regs[regno] = 1;
  13207. + }
  13208. + if (! TARGET_FPU)
  13209. + {
  13210. + int regno;
  13211. + for (regno = 32; regno < SPARC_LAST_V9_FCC_REG; regno++)
  13212. + fixed_regs[regno] = 1;
  13213. + }
  13214. + /* If the user has passed -f{fixed,call-{used,saved}}-g2 */
  13215. + /* then honor it. Likewise with g3 and g4. */
  13216. + if (fixed_regs[2] == 2)
  13217. + fixed_regs[2] = ! TARGET_APP_REGS;
  13218. + if (fixed_regs[3] == 2)
  13219. + fixed_regs[3] = ! TARGET_APP_REGS;
  13220. + if (TARGET_ARCH32 && fixed_regs[4] == 2)
  13221. + fixed_regs[4] = ! TARGET_APP_REGS;
  13222. + else if (TARGET_CM_EMBMEDANY)
  13223. + fixed_regs[4] = 1;
  13224. + else if (fixed_regs[4] == 2)
  13225. + fixed_regs[4] = 0;
  13226. + if (TARGET_FLAT)
  13227. + {
  13228. + int regno;
  13229. + /* Disable leaf functions. */
  13230. + memset (sparc_leaf_regs, 0, FIRST_PSEUDO_REGISTER);
  13231. + for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
  13232. + leaf_reg_remap [regno] = regno;
  13233. + }
  13234. + if (TARGET_VIS)
  13235. + global_regs[SPARC_GSR_REG] = 1;
  13236. +}
  13237. +
  13238. +/* Implement TARGET_USE_PSEUDO_PIC_REG. */
  13239. +
  13240. +static bool
  13241. +sparc_use_pseudo_pic_reg (void)
  13242. +{
  13243. + return !TARGET_VXWORKS_RTP && flag_pic;
  13244. +}
  13245. +
  13246. +/* Implement TARGET_INIT_PIC_REG. */
  13247. +
  13248. +static void
  13249. +sparc_init_pic_reg (void)
  13250. +{
  13251. + edge entry_edge;
  13252. + rtx_insn *seq;
  13253. +
  13254. + /* In PIC mode, we need to always initialize the PIC register if optimization
  13255. + is enabled, because we are called from IRA and LRA may later force things
  13256. + to the constant pool for optimization purposes. */
  13257. + if (!flag_pic || (!crtl->uses_pic_offset_table && !optimize))
  13258. + return;
  13259. +
  13260. + start_sequence ();
  13261. + load_got_register ();
  13262. + if (!TARGET_VXWORKS_RTP)
  13263. + emit_move_insn (pic_offset_table_rtx, got_register_rtx);
  13264. + seq = get_insns ();
  13265. + end_sequence ();
  13266. +
  13267. + entry_edge = single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun));
  13268. + insert_insn_on_edge (seq, entry_edge);
  13269. + commit_one_edge_insertion (entry_edge);
  13270. +}
  13271. +
  13272. +/* Implement TARGET_PREFERRED_RELOAD_CLASS:
  13273. +
  13274. + - We can't load constants into FP registers.
  13275. + - We can't load FP constants into integer registers when soft-float,
  13276. + because there is no soft-float pattern with a r/F constraint.
  13277. + - We can't load FP constants into integer registers for TFmode unless
  13278. + it is 0.0L, because there is no movtf pattern with a r/F constraint.
  13279. + - Try and reload integer constants (symbolic or otherwise) back into
  13280. + registers directly, rather than having them dumped to memory. */
  13281. +
  13282. +static reg_class_t
  13283. +sparc_preferred_reload_class (rtx x, reg_class_t rclass)
  13284. +{
  13285. + machine_mode mode = GET_MODE (x);
  13286. + if (CONSTANT_P (x))
  13287. + {
  13288. + if (FP_REG_CLASS_P (rclass)
  13289. + || rclass == GENERAL_OR_FP_REGS
  13290. + || rclass == GENERAL_OR_EXTRA_FP_REGS
  13291. + || (GET_MODE_CLASS (mode) == MODE_FLOAT && ! TARGET_FPU)
  13292. + || (mode == TFmode && ! const_zero_operand (x, mode)))
  13293. + return NO_REGS;
  13294. +
  13295. + if (GET_MODE_CLASS (mode) == MODE_INT)
  13296. + return GENERAL_REGS;
  13297. +
  13298. + if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
  13299. + {
  13300. + if (! FP_REG_CLASS_P (rclass)
  13301. + || !(const_zero_operand (x, mode)
  13302. + || const_all_ones_operand (x, mode)))
  13303. + return NO_REGS;
  13304. + }
  13305. + }
  13306. +
  13307. + if (TARGET_VIS3
  13308. + && ! TARGET_ARCH64
  13309. + && (rclass == EXTRA_FP_REGS
  13310. + || rclass == GENERAL_OR_EXTRA_FP_REGS))
  13311. + {
  13312. + int regno = true_regnum (x);
  13313. +
  13314. + if (SPARC_INT_REG_P (regno))
  13315. + return (rclass == EXTRA_FP_REGS
  13316. + ? FP_REGS : GENERAL_OR_FP_REGS);
  13317. + }
  13318. +
  13319. + return rclass;
  13320. +}
  13321. +
  13322. +/* Return true if we use LRA instead of reload pass. */
  13323. +
  13324. +static bool
  13325. +sparc_lra_p (void)
  13326. +{
  13327. + return TARGET_LRA;
  13328. +}
  13329. +
  13330. +/* Output a wide multiply instruction in V8+ mode. INSN is the instruction,
  13331. + OPERANDS are its operands and OPCODE is the mnemonic to be used. */
  13332. +
  13333. +const char *
  13334. +output_v8plus_mult (rtx_insn *insn, rtx *operands, const char *opcode)
  13335. +{
  13336. + char mulstr[32];
  13337. +
  13338. + gcc_assert (! TARGET_ARCH64);
  13339. +
  13340. + if (sparc_check_64 (operands[1], insn) <= 0)
  13341. + output_asm_insn ("srl\t%L1, 0, %L1", operands);
  13342. + if (which_alternative == 1)
  13343. + output_asm_insn ("sllx\t%H1, 32, %H1", operands);
  13344. + if (GET_CODE (operands[2]) == CONST_INT)
  13345. + {
  13346. + if (which_alternative == 1)
  13347. + {
  13348. + output_asm_insn ("or\t%L1, %H1, %H1", operands);
  13349. + sprintf (mulstr, "%s\t%%H1, %%2, %%L0", opcode);
  13350. + output_asm_insn (mulstr, operands);
  13351. + return "srlx\t%L0, 32, %H0";
  13352. + }
  13353. + else
  13354. + {
  13355. + output_asm_insn ("sllx\t%H1, 32, %3", operands);
  13356. + output_asm_insn ("or\t%L1, %3, %3", operands);
  13357. + sprintf (mulstr, "%s\t%%3, %%2, %%3", opcode);
  13358. + output_asm_insn (mulstr, operands);
  13359. + output_asm_insn ("srlx\t%3, 32, %H0", operands);
  13360. + return "mov\t%3, %L0";
  13361. + }
  13362. + }
  13363. + else if (rtx_equal_p (operands[1], operands[2]))
  13364. + {
  13365. + if (which_alternative == 1)
  13366. + {
  13367. + output_asm_insn ("or\t%L1, %H1, %H1", operands);
  13368. + sprintf (mulstr, "%s\t%%H1, %%H1, %%L0", opcode);
  13369. + output_asm_insn (mulstr, operands);
  13370. + return "srlx\t%L0, 32, %H0";
  13371. + }
  13372. + else
  13373. + {
  13374. + output_asm_insn ("sllx\t%H1, 32, %3", operands);
  13375. + output_asm_insn ("or\t%L1, %3, %3", operands);
  13376. + sprintf (mulstr, "%s\t%%3, %%3, %%3", opcode);
  13377. + output_asm_insn (mulstr, operands);
  13378. + output_asm_insn ("srlx\t%3, 32, %H0", operands);
  13379. + return "mov\t%3, %L0";
  13380. + }
  13381. + }
  13382. + if (sparc_check_64 (operands[2], insn) <= 0)
  13383. + output_asm_insn ("srl\t%L2, 0, %L2", operands);
  13384. + if (which_alternative == 1)
  13385. + {
  13386. + output_asm_insn ("or\t%L1, %H1, %H1", operands);
  13387. + output_asm_insn ("sllx\t%H2, 32, %L1", operands);
  13388. + output_asm_insn ("or\t%L2, %L1, %L1", operands);
  13389. + sprintf (mulstr, "%s\t%%H1, %%L1, %%L0", opcode);
  13390. + output_asm_insn (mulstr, operands);
  13391. + return "srlx\t%L0, 32, %H0";
  13392. + }
  13393. + else
  13394. + {
  13395. + output_asm_insn ("sllx\t%H1, 32, %3", operands);
  13396. + output_asm_insn ("sllx\t%H2, 32, %4", operands);
  13397. + output_asm_insn ("or\t%L1, %3, %3", operands);
  13398. + output_asm_insn ("or\t%L2, %4, %4", operands);
  13399. + sprintf (mulstr, "%s\t%%3, %%4, %%3", opcode);
  13400. + output_asm_insn (mulstr, operands);
  13401. + output_asm_insn ("srlx\t%3, 32, %H0", operands);
  13402. + return "mov\t%3, %L0";
  13403. + }
  13404. +}
  13405. +
  13406. +/* Subroutine of sparc_expand_vector_init. Emit code to initialize
  13407. + all fields of TARGET to ELT by means of VIS2 BSHUFFLE insn. MODE
  13408. + and INNER_MODE are the modes describing TARGET. */
  13409. +
  13410. +static void
  13411. +vector_init_bshuffle (rtx target, rtx elt, machine_mode mode,
  13412. + machine_mode inner_mode)
  13413. +{
  13414. + rtx t1, final_insn, sel;
  13415. + int bmask;
  13416. +
  13417. + t1 = gen_reg_rtx (mode);
  13418. +
  13419. + elt = convert_modes (SImode, inner_mode, elt, true);
  13420. + emit_move_insn (gen_lowpart(SImode, t1), elt);
  13421. +
  13422. + switch (mode)
  13423. + {
  13424. + case E_V2SImode:
  13425. + final_insn = gen_bshufflev2si_vis (target, t1, t1);
  13426. + bmask = 0x45674567;
  13427. + break;
  13428. + case E_V4HImode:
  13429. + final_insn = gen_bshufflev4hi_vis (target, t1, t1);
  13430. + bmask = 0x67676767;
  13431. + break;
  13432. + case E_V8QImode:
  13433. + final_insn = gen_bshufflev8qi_vis (target, t1, t1);
  13434. + bmask = 0x77777777;
  13435. + break;
  13436. + default:
  13437. + gcc_unreachable ();
  13438. + }
  13439. +
  13440. + sel = force_reg (SImode, GEN_INT (bmask));
  13441. + emit_insn (gen_bmasksi_vis (gen_reg_rtx (SImode), sel, const0_rtx));
  13442. + emit_insn (final_insn);
  13443. +}
  13444. +
  13445. +/* Subroutine of sparc_expand_vector_init. Emit code to initialize
  13446. + all fields of TARGET to ELT in V8QI by means of VIS FPMERGE insn. */
  13447. +
  13448. +static void
  13449. +vector_init_fpmerge (rtx target, rtx elt)
  13450. +{
  13451. + rtx t1, t2, t2_low, t3, t3_low;
  13452. +
  13453. + t1 = gen_reg_rtx (V4QImode);
  13454. + elt = convert_modes (SImode, QImode, elt, true);
  13455. + emit_move_insn (gen_lowpart (SImode, t1), elt);
  13456. +
  13457. + t2 = gen_reg_rtx (V8QImode);
  13458. + t2_low = gen_lowpart (V4QImode, t2);
  13459. + emit_insn (gen_fpmerge_vis (t2, t1, t1));
  13460. +
  13461. + t3 = gen_reg_rtx (V8QImode);
  13462. + t3_low = gen_lowpart (V4QImode, t3);
  13463. + emit_insn (gen_fpmerge_vis (t3, t2_low, t2_low));
  13464. +
  13465. + emit_insn (gen_fpmerge_vis (target, t3_low, t3_low));
  13466. +}
  13467. +
  13468. +/* Subroutine of sparc_expand_vector_init. Emit code to initialize
  13469. + all fields of TARGET to ELT in V4HI by means of VIS FALIGNDATA insn. */
  13470. +
  13471. +static void
  13472. +vector_init_faligndata (rtx target, rtx elt)
  13473. +{
  13474. + rtx t1 = gen_reg_rtx (V4HImode);
  13475. + int i;
  13476. +
  13477. + elt = convert_modes (SImode, HImode, elt, true);
  13478. + emit_move_insn (gen_lowpart (SImode, t1), elt);
  13479. +
  13480. + emit_insn (gen_alignaddrsi_vis (gen_reg_rtx (SImode),
  13481. + force_reg (SImode, GEN_INT (6)),
  13482. + const0_rtx));
  13483. +
  13484. + for (i = 0; i < 4; i++)
  13485. + emit_insn (gen_faligndatav4hi_vis (target, t1, target));
  13486. +}
  13487. +
  13488. +/* Emit code to initialize TARGET to values for individual fields VALS. */
  13489. +
  13490. +void
  13491. +sparc_expand_vector_init (rtx target, rtx vals)
  13492. +{
  13493. + const machine_mode mode = GET_MODE (target);
  13494. + const machine_mode inner_mode = GET_MODE_INNER (mode);
  13495. + const int n_elts = GET_MODE_NUNITS (mode);
  13496. + int i, n_var = 0;
  13497. + bool all_same = true;
  13498. + rtx mem;
  13499. +
  13500. + for (i = 0; i < n_elts; i++)
  13501. + {
  13502. + rtx x = XVECEXP (vals, 0, i);
  13503. + if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
  13504. + n_var++;
  13505. +
  13506. + if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
  13507. + all_same = false;
  13508. + }
  13509. +
  13510. + if (n_var == 0)
  13511. + {
  13512. + emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
  13513. + return;
  13514. + }
  13515. +
  13516. + if (GET_MODE_SIZE (inner_mode) == GET_MODE_SIZE (mode))
  13517. + {
  13518. + if (GET_MODE_SIZE (inner_mode) == 4)
  13519. + {
  13520. + emit_move_insn (gen_lowpart (SImode, target),
  13521. + gen_lowpart (SImode, XVECEXP (vals, 0, 0)));
  13522. + return;
  13523. + }
  13524. + else if (GET_MODE_SIZE (inner_mode) == 8)
  13525. + {
  13526. + emit_move_insn (gen_lowpart (DImode, target),
  13527. + gen_lowpart (DImode, XVECEXP (vals, 0, 0)));
  13528. + return;
  13529. + }
  13530. + }
  13531. + else if (GET_MODE_SIZE (inner_mode) == GET_MODE_SIZE (word_mode)
  13532. + && GET_MODE_SIZE (mode) == 2 * GET_MODE_SIZE (word_mode))
  13533. + {
  13534. + emit_move_insn (gen_highpart (word_mode, target),
  13535. + gen_lowpart (word_mode, XVECEXP (vals, 0, 0)));
  13536. + emit_move_insn (gen_lowpart (word_mode, target),
  13537. + gen_lowpart (word_mode, XVECEXP (vals, 0, 1)));
  13538. + return;
  13539. + }
  13540. +
  13541. + if (all_same && GET_MODE_SIZE (mode) == 8)
  13542. + {
  13543. + if (TARGET_VIS2)
  13544. + {
  13545. + vector_init_bshuffle (target, XVECEXP (vals, 0, 0), mode, inner_mode);
  13546. + return;
  13547. + }
  13548. + if (mode == V8QImode)
  13549. + {
  13550. + vector_init_fpmerge (target, XVECEXP (vals, 0, 0));
  13551. + return;
  13552. + }
  13553. + if (mode == V4HImode)
  13554. + {
  13555. + vector_init_faligndata (target, XVECEXP (vals, 0, 0));
  13556. + return;
  13557. + }
  13558. + }
  13559. +
  13560. + mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
  13561. + for (i = 0; i < n_elts; i++)
  13562. + emit_move_insn (adjust_address_nv (mem, inner_mode,
  13563. + i * GET_MODE_SIZE (inner_mode)),
  13564. + XVECEXP (vals, 0, i));
  13565. + emit_move_insn (target, mem);
  13566. +}
  13567. +
  13568. +/* Implement TARGET_SECONDARY_RELOAD. */
  13569. +
  13570. +static reg_class_t
  13571. +sparc_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
  13572. + machine_mode mode, secondary_reload_info *sri)
  13573. +{
  13574. + enum reg_class rclass = (enum reg_class) rclass_i;
  13575. +
  13576. + sri->icode = CODE_FOR_nothing;
  13577. + sri->extra_cost = 0;
  13578. +
  13579. + /* We need a temporary when loading/storing a HImode/QImode value
  13580. + between memory and the FPU registers. This can happen when combine puts
  13581. + a paradoxical subreg in a float/fix conversion insn. */
  13582. + if (FP_REG_CLASS_P (rclass)
  13583. + && (mode == HImode || mode == QImode)
  13584. + && (GET_CODE (x) == MEM
  13585. + || ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
  13586. + && true_regnum (x) == -1)))
  13587. + return GENERAL_REGS;
  13588. +
  13589. + /* On 32-bit we need a temporary when loading/storing a DFmode value
  13590. + between unaligned memory and the upper FPU registers. */
  13591. + if (TARGET_ARCH32
  13592. + && rclass == EXTRA_FP_REGS
  13593. + && mode == DFmode
  13594. + && GET_CODE (x) == MEM
  13595. + && ! mem_min_alignment (x, 8))
  13596. + return FP_REGS;
  13597. +
  13598. + if (((TARGET_CM_MEDANY
  13599. + && symbolic_operand (x, mode))
  13600. + || (TARGET_CM_EMBMEDANY
  13601. + && text_segment_operand (x, mode)))
  13602. + && ! flag_pic)
  13603. + {
  13604. + if (in_p)
  13605. + sri->icode = direct_optab_handler (reload_in_optab, mode);
  13606. + else
  13607. + sri->icode = direct_optab_handler (reload_out_optab, mode);
  13608. + return NO_REGS;
  13609. + }
  13610. +
  13611. + if (TARGET_VIS3 && TARGET_ARCH32)
  13612. + {
  13613. + int regno = true_regnum (x);
  13614. +
  13615. + /* When using VIS3 fp<-->int register moves, on 32-bit we have
  13616. + to move 8-byte values in 4-byte pieces. This only works via
  13617. + FP_REGS, and not via EXTRA_FP_REGS. Therefore if we try to
  13618. + move between EXTRA_FP_REGS and GENERAL_REGS, we will need
  13619. + an FP_REGS intermediate move. */
  13620. + if ((rclass == EXTRA_FP_REGS && SPARC_INT_REG_P (regno))
  13621. + || ((general_or_i64_p (rclass)
  13622. + || rclass == GENERAL_OR_FP_REGS)
  13623. + && SPARC_FP_REG_P (regno)))
  13624. + {
  13625. + sri->extra_cost = 2;
  13626. + return FP_REGS;
  13627. + }
  13628. + }
  13629. +
  13630. + return NO_REGS;
  13631. +}
  13632. +
  13633. +/* Implement TARGET_SECONDARY_MEMORY_NEEDED.
  13634. +
  13635. + On SPARC when not VIS3 it is not possible to directly move data
  13636. + between GENERAL_REGS and FP_REGS. */
  13637. +
  13638. +static bool
  13639. +sparc_secondary_memory_needed (machine_mode mode, reg_class_t class1,
  13640. + reg_class_t class2)
  13641. +{
  13642. + return ((FP_REG_CLASS_P (class1) != FP_REG_CLASS_P (class2))
  13643. + && (! TARGET_VIS3
  13644. + || GET_MODE_SIZE (mode) > 8
  13645. + || GET_MODE_SIZE (mode) < 4));
  13646. +}
  13647. +
  13648. +/* Implement TARGET_SECONDARY_MEMORY_NEEDED_MODE.
  13649. +
  13650. + get_secondary_mem widens its argument to BITS_PER_WORD which loses on v9
  13651. + because the movsi and movsf patterns don't handle r/f moves.
  13652. + For v8 we copy the default definition. */
  13653. +
  13654. +static machine_mode
  13655. +sparc_secondary_memory_needed_mode (machine_mode mode)
  13656. +{
  13657. + if (TARGET_ARCH64)
  13658. + {
  13659. + if (GET_MODE_BITSIZE (mode) < 32)
  13660. + return mode_for_size (32, GET_MODE_CLASS (mode), 0).require ();
  13661. + return mode;
  13662. + }
  13663. + else
  13664. + {
  13665. + if (GET_MODE_BITSIZE (mode) < BITS_PER_WORD)
  13666. + return mode_for_size (BITS_PER_WORD,
  13667. + GET_MODE_CLASS (mode), 0).require ();
  13668. + return mode;
  13669. + }
  13670. +}
  13671. +
  13672. +/* Emit code to conditionally move either OPERANDS[2] or OPERANDS[3] into
  13673. + OPERANDS[0] in MODE. OPERANDS[1] is the operator of the condition. */
  13674. +
  13675. +bool
  13676. +sparc_expand_conditional_move (machine_mode mode, rtx *operands)
  13677. +{
  13678. + enum rtx_code rc = GET_CODE (operands[1]);
  13679. + machine_mode cmp_mode;
  13680. + rtx cc_reg, dst, cmp;
  13681. +
  13682. + cmp = operands[1];
  13683. + if (GET_MODE (XEXP (cmp, 0)) == DImode && !TARGET_ARCH64)
  13684. + return false;
  13685. +
  13686. + if (GET_MODE (XEXP (cmp, 0)) == TFmode && !TARGET_HARD_QUAD)
  13687. + cmp = sparc_emit_float_lib_cmp (XEXP (cmp, 0), XEXP (cmp, 1), rc);
  13688. +
  13689. + cmp_mode = GET_MODE (XEXP (cmp, 0));
  13690. + rc = GET_CODE (cmp);
  13691. +
  13692. + dst = operands[0];
  13693. + if (! rtx_equal_p (operands[2], dst)
  13694. + && ! rtx_equal_p (operands[3], dst))
  13695. + {
  13696. + if (reg_overlap_mentioned_p (dst, cmp))
  13697. + dst = gen_reg_rtx (mode);
  13698. +
  13699. + emit_move_insn (dst, operands[3]);
  13700. + }
  13701. + else if (operands[2] == dst)
  13702. + {
  13703. + operands[2] = operands[3];
  13704. +
  13705. + if (GET_MODE_CLASS (cmp_mode) == MODE_FLOAT)
  13706. + rc = reverse_condition_maybe_unordered (rc);
  13707. + else
  13708. + rc = reverse_condition (rc);
  13709. + }
  13710. +
  13711. + if (XEXP (cmp, 1) == const0_rtx
  13712. + && GET_CODE (XEXP (cmp, 0)) == REG
  13713. + && cmp_mode == DImode
  13714. + && v9_regcmp_p (rc))
  13715. + cc_reg = XEXP (cmp, 0);
  13716. + else
  13717. + cc_reg = gen_compare_reg_1 (rc, XEXP (cmp, 0), XEXP (cmp, 1));
  13718. +
  13719. + cmp = gen_rtx_fmt_ee (rc, GET_MODE (cc_reg), cc_reg, const0_rtx);
  13720. +
  13721. + emit_insn (gen_rtx_SET (dst,
  13722. + gen_rtx_IF_THEN_ELSE (mode, cmp, operands[2], dst)));
  13723. +
  13724. + if (dst != operands[0])
  13725. + emit_move_insn (operands[0], dst);
  13726. +
  13727. + return true;
  13728. +}
  13729. +
  13730. +/* Emit code to conditionally move a combination of OPERANDS[1] and OPERANDS[2]
  13731. + into OPERANDS[0] in MODE, depending on the outcome of the comparison of
  13732. + OPERANDS[4] and OPERANDS[5]. OPERANDS[3] is the operator of the condition.
  13733. + FCODE is the machine code to be used for OPERANDS[3] and CCODE the machine
  13734. + code to be used for the condition mask. */
  13735. +
  13736. +void
  13737. +sparc_expand_vcond (machine_mode mode, rtx *operands, int ccode, int fcode)
  13738. +{
  13739. + rtx mask, cop0, cop1, fcmp, cmask, bshuf, gsr;
  13740. + enum rtx_code code = GET_CODE (operands[3]);
  13741. +
  13742. + mask = gen_reg_rtx (Pmode);
  13743. + cop0 = operands[4];
  13744. + cop1 = operands[5];
  13745. + if (code == LT || code == GE)
  13746. + {
  13747. + rtx t;
  13748. +
  13749. + code = swap_condition (code);
  13750. + t = cop0; cop0 = cop1; cop1 = t;
  13751. + }
  13752. +
  13753. + gsr = gen_rtx_REG (DImode, SPARC_GSR_REG);
  13754. +
  13755. + fcmp = gen_rtx_UNSPEC (Pmode,
  13756. + gen_rtvec (1, gen_rtx_fmt_ee (code, mode, cop0, cop1)),
  13757. + fcode);
  13758. +
  13759. + cmask = gen_rtx_UNSPEC (DImode,
  13760. + gen_rtvec (2, mask, gsr),
  13761. + ccode);
  13762. +
  13763. + bshuf = gen_rtx_UNSPEC (mode,
  13764. + gen_rtvec (3, operands[1], operands[2], gsr),
  13765. + UNSPEC_BSHUFFLE);
  13766. +
  13767. + emit_insn (gen_rtx_SET (mask, fcmp));
  13768. + emit_insn (gen_rtx_SET (gsr, cmask));
  13769. +
  13770. + emit_insn (gen_rtx_SET (operands[0], bshuf));
  13771. +}
  13772. +
  13773. +/* On sparc, any mode which naturally allocates into the float
  13774. + registers should return 4 here. */
  13775. +
  13776. +unsigned int
  13777. +sparc_regmode_natural_size (machine_mode mode)
  13778. +{
  13779. + int size = UNITS_PER_WORD;
  13780. +
  13781. + if (TARGET_ARCH64)
  13782. + {
  13783. + enum mode_class mclass = GET_MODE_CLASS (mode);
  13784. +
  13785. + if (mclass == MODE_FLOAT || mclass == MODE_VECTOR_INT)
  13786. + size = 4;
  13787. + }
  13788. +
  13789. + return size;
  13790. +}
  13791. +
  13792. +/* Implement TARGET_HARD_REGNO_NREGS.
  13793. +
  13794. + On SPARC, ordinary registers hold 32 bits worth; this means both
  13795. + integer and floating point registers. On v9, integer regs hold 64
  13796. + bits worth; floating point regs hold 32 bits worth (this includes the
  13797. + new fp regs as even the odd ones are included in the hard register
  13798. + count). */
  13799. +
  13800. +static unsigned int
  13801. +sparc_hard_regno_nregs (unsigned int regno, machine_mode mode)
  13802. +{
  13803. + if (regno == SPARC_GSR_REG)
  13804. + return 1;
  13805. + if (TARGET_ARCH64)
  13806. + {
  13807. + if (SPARC_INT_REG_P (regno) || regno == FRAME_POINTER_REGNUM)
  13808. + return CEIL (GET_MODE_SIZE (mode), UNITS_PER_WORD);
  13809. + return CEIL (GET_MODE_SIZE (mode), 4);
  13810. + }
  13811. + return CEIL (GET_MODE_SIZE (mode), UNITS_PER_WORD);
  13812. +}
  13813. +
  13814. +/* Implement TARGET_HARD_REGNO_MODE_OK.
  13815. +
  13816. + ??? Because of the funny way we pass parameters we should allow certain
  13817. + ??? types of float/complex values to be in integer registers during
  13818. + ??? RTL generation. This only matters on arch32. */
  13819. +
  13820. +static bool
  13821. +sparc_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
  13822. +{
  13823. + return (hard_regno_mode_classes[regno] & sparc_mode_class[mode]) != 0;
  13824. +}
  13825. +
  13826. +/* Implement TARGET_MODES_TIEABLE_P.
  13827. +
  13828. + For V9 we have to deal with the fact that only the lower 32 floating
  13829. + point registers are 32-bit addressable. */
  13830. +
  13831. +static bool
  13832. +sparc_modes_tieable_p (machine_mode mode1, machine_mode mode2)
  13833. +{
  13834. + enum mode_class mclass1, mclass2;
  13835. + unsigned short size1, size2;
  13836. +
  13837. + if (mode1 == mode2)
  13838. + return true;
  13839. +
  13840. + mclass1 = GET_MODE_CLASS (mode1);
  13841. + mclass2 = GET_MODE_CLASS (mode2);
  13842. + if (mclass1 != mclass2)
  13843. + return false;
  13844. +
  13845. + if (! TARGET_V9)
  13846. + return true;
  13847. +
  13848. + /* Classes are the same and we are V9 so we have to deal with upper
  13849. + vs. lower floating point registers. If one of the modes is a
  13850. + 4-byte mode, and the other is not, we have to mark them as not
  13851. + tieable because only the lower 32 floating point register are
  13852. + addressable 32-bits at a time.
  13853. +
  13854. + We can't just test explicitly for SFmode, otherwise we won't
  13855. + cover the vector mode cases properly. */
  13856. +
  13857. + if (mclass1 != MODE_FLOAT && mclass1 != MODE_VECTOR_INT)
  13858. + return true;
  13859. +
  13860. + size1 = GET_MODE_SIZE (mode1);
  13861. + size2 = GET_MODE_SIZE (mode2);
  13862. + if ((size1 > 4 && size2 == 4)
  13863. + || (size2 > 4 && size1 == 4))
  13864. + return false;
  13865. +
  13866. + return true;
  13867. +}
  13868. +
  13869. +/* Implement TARGET_CSTORE_MODE. */
  13870. +
  13871. +static scalar_int_mode
  13872. +sparc_cstore_mode (enum insn_code icode ATTRIBUTE_UNUSED)
  13873. +{
  13874. + return (TARGET_ARCH64 ? DImode : SImode);
  13875. +}
  13876. +
  13877. +/* Return the compound expression made of T1 and T2. */
  13878. +
  13879. +static inline tree
  13880. +compound_expr (tree t1, tree t2)
  13881. +{
  13882. + return build2 (COMPOUND_EXPR, void_type_node, t1, t2);
  13883. +}
  13884. +
  13885. +/* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
  13886. +
  13887. +static void
  13888. +sparc_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
  13889. +{
  13890. + if (!TARGET_FPU)
  13891. + return;
  13892. +
  13893. + const unsigned HOST_WIDE_INT accrued_exception_mask = 0x1f << 5;
  13894. + const unsigned HOST_WIDE_INT trap_enable_mask = 0x1f << 23;
  13895. +
  13896. + /* We generate the equivalent of feholdexcept (&fenv_var):
  13897. +
  13898. + unsigned int fenv_var;
  13899. + __builtin_store_fsr (&fenv_var);
  13900. +
  13901. + unsigned int tmp1_var;
  13902. + tmp1_var = fenv_var & ~(accrued_exception_mask | trap_enable_mask);
  13903. +
  13904. + __builtin_load_fsr (&tmp1_var); */
  13905. +
  13906. + tree fenv_var = create_tmp_var_raw (unsigned_type_node);
  13907. + TREE_ADDRESSABLE (fenv_var) = 1;
  13908. + tree fenv_addr = build_fold_addr_expr (fenv_var);
  13909. + tree stfsr = sparc_builtins[SPARC_BUILTIN_STFSR];
  13910. + tree hold_stfsr
  13911. + = build4 (TARGET_EXPR, unsigned_type_node, fenv_var,
  13912. + build_call_expr (stfsr, 1, fenv_addr), NULL_TREE, NULL_TREE);
  13913. +
  13914. + tree tmp1_var = create_tmp_var_raw (unsigned_type_node);
  13915. + TREE_ADDRESSABLE (tmp1_var) = 1;
  13916. + tree masked_fenv_var
  13917. + = build2 (BIT_AND_EXPR, unsigned_type_node, fenv_var,
  13918. + build_int_cst (unsigned_type_node,
  13919. + ~(accrued_exception_mask | trap_enable_mask)));
  13920. + tree hold_mask
  13921. + = build4 (TARGET_EXPR, unsigned_type_node, tmp1_var, masked_fenv_var,
  13922. + NULL_TREE, NULL_TREE);
  13923. +
  13924. + tree tmp1_addr = build_fold_addr_expr (tmp1_var);
  13925. + tree ldfsr = sparc_builtins[SPARC_BUILTIN_LDFSR];
  13926. + tree hold_ldfsr = build_call_expr (ldfsr, 1, tmp1_addr);
  13927. +
  13928. + *hold = compound_expr (compound_expr (hold_stfsr, hold_mask), hold_ldfsr);
  13929. +
  13930. + /* We reload the value of tmp1_var to clear the exceptions:
  13931. +
  13932. + __builtin_load_fsr (&tmp1_var); */
  13933. +
  13934. + *clear = build_call_expr (ldfsr, 1, tmp1_addr);
  13935. +
  13936. + /* We generate the equivalent of feupdateenv (&fenv_var):
  13937. +
  13938. + unsigned int tmp2_var;
  13939. + __builtin_store_fsr (&tmp2_var);
  13940. +
  13941. + __builtin_load_fsr (&fenv_var);
  13942. +
  13943. + if (SPARC_LOW_FE_EXCEPT_VALUES)
  13944. + tmp2_var >>= 5;
  13945. + __atomic_feraiseexcept ((int) tmp2_var); */
  13946. +
  13947. + tree tmp2_var = create_tmp_var_raw (unsigned_type_node);
  13948. + TREE_ADDRESSABLE (tmp2_var) = 1;
  13949. + tree tmp2_addr = build_fold_addr_expr (tmp2_var);
  13950. + tree update_stfsr
  13951. + = build4 (TARGET_EXPR, unsigned_type_node, tmp2_var,
  13952. + build_call_expr (stfsr, 1, tmp2_addr), NULL_TREE, NULL_TREE);
  13953. +
  13954. + tree update_ldfsr = build_call_expr (ldfsr, 1, fenv_addr);
  13955. +
  13956. + tree atomic_feraiseexcept
  13957. + = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
  13958. + tree update_call
  13959. + = build_call_expr (atomic_feraiseexcept, 1,
  13960. + fold_convert (integer_type_node, tmp2_var));
  13961. +
  13962. + if (SPARC_LOW_FE_EXCEPT_VALUES)
  13963. + {
  13964. + tree shifted_tmp2_var
  13965. + = build2 (RSHIFT_EXPR, unsigned_type_node, tmp2_var,
  13966. + build_int_cst (unsigned_type_node, 5));
  13967. + tree update_shift
  13968. + = build2 (MODIFY_EXPR, void_type_node, tmp2_var, shifted_tmp2_var);
  13969. + update_call = compound_expr (update_shift, update_call);
  13970. + }
  13971. +
  13972. + *update
  13973. + = compound_expr (compound_expr (update_stfsr, update_ldfsr), update_call);
  13974. +}
  13975. +
  13976. +/* Implement TARGET_CAN_CHANGE_MODE_CLASS. Borrowed from the PA port.
  13977. +
  13978. + SImode loads to floating-point registers are not zero-extended.
  13979. + The definition for LOAD_EXTEND_OP specifies that integer loads
  13980. + narrower than BITS_PER_WORD will be zero-extended. As a result,
  13981. + we inhibit changes from SImode unless they are to a mode that is
  13982. + identical in size.
  13983. +
  13984. + Likewise for SFmode, since word-mode paradoxical subregs are
  13985. + problematic on big-endian architectures. */
  13986. +
  13987. +static bool
  13988. +sparc_can_change_mode_class (machine_mode from, machine_mode to,
  13989. + reg_class_t rclass)
  13990. +{
  13991. + if (TARGET_ARCH64
  13992. + && GET_MODE_SIZE (from) == 4
  13993. + && GET_MODE_SIZE (to) != 4)
  13994. + return !reg_classes_intersect_p (rclass, FP_REGS);
  13995. + return true;
  13996. +}
  13997. +
  13998. +/* Implement TARGET_CONSTANT_ALIGNMENT. */
  13999. +
  14000. +static HOST_WIDE_INT
  14001. +sparc_constant_alignment (const_tree exp, HOST_WIDE_INT align)
  14002. +{
  14003. + if (TREE_CODE (exp) == STRING_CST)
  14004. + return MAX (align, FASTEST_ALIGNMENT);
  14005. + return align;
  14006. +}
  14007. +
  14008. +#include "gt-sparc.h"
  14009. diff -Nur gcc-10.3.0.orig/gcc/config/sparc/sparc.md gcc-10.3.0/gcc/config/sparc/sparc.md
  14010. --- gcc-10.3.0.orig/gcc/config/sparc/sparc.md 2021-04-08 13:56:28.205742322 +0200
  14011. +++ gcc-10.3.0/gcc/config/sparc/sparc.md 2021-04-09 07:51:37.936504607 +0200
  14012. @@ -1601,7 +1601,10 @@
  14013. (clobber (reg:P O7_REG))]
  14014. "REGNO (operands[0]) == INTVAL (operands[3])"
  14015. {
  14016. - return output_load_pcrel_sym (operands);
  14017. + if (flag_delayed_branch)
  14018. + return "sethi\t%%hi(%a1-4), %0\n\tcall\t%a2\n\t add\t%0, %%lo(%a1+4), %0";
  14019. + else
  14020. + return "sethi\t%%hi(%a1-8), %0\n\tadd\t%0, %%lo(%a1-4), %0\n\tcall\t%a2\n\t nop";
  14021. }
  14022. [(set (attr "type") (const_string "multi"))
  14023. (set (attr "length")
  14024. diff -Nur gcc-10.3.0.orig/gcc/config/sparc/sparc.md.orig gcc-10.3.0/gcc/config/sparc/sparc.md.orig
  14025. --- gcc-10.3.0.orig/gcc/config/sparc/sparc.md.orig 1970-01-01 01:00:00.000000000 +0100
  14026. +++ gcc-10.3.0/gcc/config/sparc/sparc.md.orig 2021-04-08 13:56:28.205742322 +0200
  14027. @@ -0,0 +1,9524 @@
  14028. +;; Machine description for SPARC.
  14029. +;; Copyright (C) 1987-2020 Free Software Foundation, Inc.
  14030. +;; Contributed by Michael Tiemann (tiemann@cygnus.com)
  14031. +;; 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
  14032. +;; at Cygnus Support.
  14033. +
  14034. +;; This file is part of GCC.
  14035. +
  14036. +;; GCC is free software; you can redistribute it and/or modify
  14037. +;; it under the terms of the GNU General Public License as published by
  14038. +;; the Free Software Foundation; either version 3, or (at your option)
  14039. +;; any later version.
  14040. +
  14041. +;; GCC is distributed in the hope that it will be useful,
  14042. +;; but WITHOUT ANY WARRANTY; without even the implied warranty of
  14043. +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14044. +;; GNU General Public License for more details.
  14045. +
  14046. +;; You should have received a copy of the GNU General Public License
  14047. +;; along with GCC; see the file COPYING3. If not see
  14048. +;; <http://www.gnu.org/licenses/>.
  14049. +
  14050. +(define_c_enum "unspec" [
  14051. + UNSPEC_MOVE_PIC
  14052. + UNSPEC_UPDATE_RETURN
  14053. + UNSPEC_LOAD_PCREL_SYM
  14054. + UNSPEC_FRAME_BLOCKAGE
  14055. + UNSPEC_MOVE_PIC_LABEL
  14056. + UNSPEC_SETH44
  14057. + UNSPEC_SETM44
  14058. + UNSPEC_SETHH
  14059. + UNSPEC_SETLM
  14060. + UNSPEC_EMB_HISUM
  14061. + UNSPEC_EMB_TEXTUHI
  14062. + UNSPEC_EMB_TEXTHI
  14063. + UNSPEC_EMB_TEXTULO
  14064. + UNSPEC_EMB_SETHM
  14065. + UNSPEC_MOVE_GOTDATA
  14066. +
  14067. + UNSPEC_MEMBAR
  14068. + UNSPEC_ATOMIC
  14069. +
  14070. + UNSPEC_TLSGD
  14071. + UNSPEC_TLSLDM
  14072. + UNSPEC_TLSLDO
  14073. + UNSPEC_TLSIE
  14074. + UNSPEC_TLSLE
  14075. + UNSPEC_TLSLD_BASE
  14076. +
  14077. + UNSPEC_FPACK16
  14078. + UNSPEC_FPACK32
  14079. + UNSPEC_FPACKFIX
  14080. + UNSPEC_FEXPAND
  14081. + UNSPEC_MUL16AU
  14082. + UNSPEC_MUL16AL
  14083. + UNSPEC_MUL8UL
  14084. + UNSPEC_MULDUL
  14085. + UNSPEC_ALIGNDATA
  14086. + UNSPEC_FCMP
  14087. + UNSPEC_PDIST
  14088. + UNSPEC_EDGE8
  14089. + UNSPEC_EDGE8L
  14090. + UNSPEC_EDGE16
  14091. + UNSPEC_EDGE16L
  14092. + UNSPEC_EDGE32
  14093. + UNSPEC_EDGE32L
  14094. + UNSPEC_ARRAY8
  14095. + UNSPEC_ARRAY16
  14096. + UNSPEC_ARRAY32
  14097. +
  14098. + UNSPEC_SP_SET
  14099. + UNSPEC_SP_TEST
  14100. +
  14101. + UNSPEC_EDGE8N
  14102. + UNSPEC_EDGE8LN
  14103. + UNSPEC_EDGE16N
  14104. + UNSPEC_EDGE16LN
  14105. + UNSPEC_EDGE32N
  14106. + UNSPEC_EDGE32LN
  14107. + UNSPEC_BSHUFFLE
  14108. + UNSPEC_CMASK8
  14109. + UNSPEC_CMASK16
  14110. + UNSPEC_CMASK32
  14111. + UNSPEC_FCHKSM16
  14112. + UNSPEC_PDISTN
  14113. + UNSPEC_FUCMP
  14114. + UNSPEC_FHADD
  14115. + UNSPEC_FHSUB
  14116. + UNSPEC_XMUL
  14117. + UNSPEC_MUL8
  14118. + UNSPEC_MUL8SU
  14119. + UNSPEC_MULDSU
  14120. +
  14121. + UNSPEC_ADDV
  14122. + UNSPEC_SUBV
  14123. + UNSPEC_NEGV
  14124. +
  14125. + UNSPEC_DICTUNPACK
  14126. + UNSPEC_FPCMPSHL
  14127. + UNSPEC_FPUCMPSHL
  14128. + UNSPEC_FPCMPDESHL
  14129. + UNSPEC_FPCMPURSHL
  14130. +])
  14131. +
  14132. +(define_c_enum "unspecv" [
  14133. + UNSPECV_BLOCKAGE
  14134. +
  14135. + UNSPECV_SPECULATION_BARRIER
  14136. +
  14137. + UNSPECV_PROBE_STACK_RANGE
  14138. +
  14139. + UNSPECV_FLUSHW
  14140. + UNSPECV_SAVEW
  14141. +
  14142. + UNSPECV_FLUSH
  14143. +
  14144. + UNSPECV_LDSTUB
  14145. + UNSPECV_SWAP
  14146. + UNSPECV_CAS
  14147. +
  14148. + UNSPECV_LDFSR
  14149. + UNSPECV_STFSR
  14150. +])
  14151. +
  14152. +(define_constants
  14153. + [(G0_REG 0)
  14154. + (G1_REG 1)
  14155. + (G2_REG 2)
  14156. + (G3_REG 3)
  14157. + (G4_REG 4)
  14158. + (G5_REG 5)
  14159. + (G6_REG 6)
  14160. + (G7_REG 7)
  14161. + (O0_REG 8)
  14162. + (O1_REG 9)
  14163. + (O2_REG 10)
  14164. + (O3_REG 11)
  14165. + (O4_REG 12)
  14166. + (O5_REG 13)
  14167. + (O6_REG 14)
  14168. + (O7_REG 15)
  14169. + (L0_REG 16)
  14170. + (L1_REG 17)
  14171. + (L2_REG 18)
  14172. + (L3_REG 19)
  14173. + (L4_REG 20)
  14174. + (L5_REG 21)
  14175. + (L6_REG 22)
  14176. + (L7_REG 23)
  14177. + (I0_REG 24)
  14178. + (I1_REG 25)
  14179. + (I2_REG 26)
  14180. + (I3_REG 27)
  14181. + (I4_REG 28)
  14182. + (I5_REG 29)
  14183. + (I6_REG 30)
  14184. + (I7_REG 31)
  14185. + (F0_REG 32)
  14186. + (F1_REG 33)
  14187. + (F2_REG 34)
  14188. + (F3_REG 35)
  14189. + (F4_REG 36)
  14190. + (F5_REG 37)
  14191. + (F6_REG 38)
  14192. + (F7_REG 39)
  14193. + (F8_REG 40)
  14194. + (F9_REG 41)
  14195. + (F10_REG 42)
  14196. + (F11_REG 43)
  14197. + (F12_REG 44)
  14198. + (F13_REG 45)
  14199. + (F14_REG 46)
  14200. + (F15_REG 47)
  14201. + (F16_REG 48)
  14202. + (F17_REG 49)
  14203. + (F18_REG 50)
  14204. + (F19_REG 51)
  14205. + (F20_REG 52)
  14206. + (F21_REG 53)
  14207. + (F22_REG 54)
  14208. + (F23_REG 55)
  14209. + (F24_REG 56)
  14210. + (F25_REG 57)
  14211. + (F26_REG 58)
  14212. + (F27_REG 59)
  14213. + (F28_REG 60)
  14214. + (F29_REG 61)
  14215. + (F30_REG 62)
  14216. + (F31_REG 63)
  14217. + (F32_REG 64)
  14218. + (F34_REG 66)
  14219. + (F36_REG 68)
  14220. + (F38_REG 70)
  14221. + (F40_REG 72)
  14222. + (F42_REG 74)
  14223. + (F44_REG 76)
  14224. + (F46_REG 78)
  14225. + (F48_REG 80)
  14226. + (F50_REG 82)
  14227. + (F52_REG 84)
  14228. + (F54_REG 86)
  14229. + (F56_REG 88)
  14230. + (F58_REG 90)
  14231. + (F60_REG 92)
  14232. + (F62_REG 94)
  14233. + (FCC0_REG 96)
  14234. + (FCC1_REG 97)
  14235. + (FCC2_REG 98)
  14236. + (FCC3_REG 99)
  14237. + (CC_REG 100)
  14238. + (SFP_REG 101)
  14239. + (GSR_REG 102)
  14240. + ])
  14241. +
  14242. +(define_mode_iterator I [QI HI SI DI])
  14243. +(define_mode_iterator P [(SI "TARGET_ARCH32") (DI "TARGET_ARCH64")])
  14244. +(define_mode_iterator W [SI (DI "TARGET_ARCH64")])
  14245. +(define_mode_iterator F [SF DF TF])
  14246. +
  14247. +;; The upper 32 fp regs on the v9 can't hold SFmode values. To deal with this
  14248. +;; a second register class, EXTRA_FP_REGS, exists for the v9 chip. The name
  14249. +;; is a bit of a misnomer as it covers all 64 fp regs. The corresponding
  14250. +;; constraint letter is 'e'. To avoid any confusion, 'e' is used instead of
  14251. +;; 'f' for all DF/TFmode values, including those that are specific to the v8.
  14252. +
  14253. +;; Attribute for cpu type.
  14254. +;; These must match the values of enum sparc_processor_type in sparc-opts.h.
  14255. +(define_attr "cpu"
  14256. + "v7,
  14257. + cypress,
  14258. + v8,
  14259. + supersparc,
  14260. + hypersparc,
  14261. + leon,
  14262. + leon3,
  14263. + leon3v7,
  14264. + sparclite,
  14265. + f930,
  14266. + f934,
  14267. + sparclite86x,
  14268. + sparclet,
  14269. + tsc701,
  14270. + v9,
  14271. + ultrasparc,
  14272. + ultrasparc3,
  14273. + niagara,
  14274. + niagara2,
  14275. + niagara3,
  14276. + niagara4,
  14277. + niagara7,
  14278. + m8"
  14279. + (const (symbol_ref "sparc_cpu_attr")))
  14280. +
  14281. +;; Attribute for the instruction set.
  14282. +;; At present we only need to distinguish v9/!v9, but for clarity we
  14283. +;; test TARGET_V8 too.
  14284. +(define_attr "isa" "v7,v8,v9,sparclet"
  14285. + (const
  14286. + (cond [(symbol_ref "TARGET_V9") (const_string "v9")
  14287. + (symbol_ref "TARGET_V8") (const_string "v8")
  14288. + (symbol_ref "TARGET_SPARCLET") (const_string "sparclet")]
  14289. + (const_string "v7"))))
  14290. +
  14291. +(define_attr "cpu_feature" "none,fpu,fpunotv9,v9,vis,vis3,vis4,vis4b"
  14292. + (const_string "none"))
  14293. +
  14294. +(define_attr "lra" "disabled,enabled"
  14295. + (const_string "enabled"))
  14296. +
  14297. +(define_attr "enabled" ""
  14298. + (cond [(eq_attr "cpu_feature" "none")
  14299. + (cond [(eq_attr "lra" "disabled") (symbol_ref "!TARGET_LRA")] (const_int 1))
  14300. + (eq_attr "cpu_feature" "fpu") (symbol_ref "TARGET_FPU")
  14301. + (eq_attr "cpu_feature" "fpunotv9") (symbol_ref "TARGET_FPU && !TARGET_V9")
  14302. + (eq_attr "cpu_feature" "v9") (symbol_ref "TARGET_V9")
  14303. + (eq_attr "cpu_feature" "vis") (symbol_ref "TARGET_VIS")
  14304. + (eq_attr "cpu_feature" "vis3") (symbol_ref "TARGET_VIS3")
  14305. + (eq_attr "cpu_feature" "vis4") (symbol_ref "TARGET_VIS4")
  14306. + (eq_attr "cpu_feature" "vis4b") (symbol_ref "TARGET_VIS4B")]
  14307. + (const_int 0)))
  14308. +
  14309. +;; The SPARC instructions used by the backend are organized into a
  14310. +;; hierarchy using the insn attributes "type" and "subtype".
  14311. +;;
  14312. +;; The mnemonics used in the list below are the architectural names
  14313. +;; used in the Oracle SPARC Architecture specs. A / character
  14314. +;; separates the type from the subtype where appropriate. For
  14315. +;; brevity, text enclosed in {} denotes alternatives, while text
  14316. +;; enclosed in [] is optional.
  14317. +;;
  14318. +;; Please keep this list updated. It is of great help for keeping the
  14319. +;; correctness and coherence of the DFA schedulers.
  14320. +;;
  14321. +;; ialu: <empty>
  14322. +;; ialuX: ADD[X]C SUB[X]C
  14323. +;; shift: SLL[X] SRL[X] SRA[X]
  14324. +;; cmove: MOV{A,N,NE,E,G,LE,GE,L,GU,LEU,CC,CS,POS,NEG,VC,VS}
  14325. +;; MOVF{A,N,U,G,UG,L,UL,LG,NE,E,UE,GE,UGE,LE,ULE,O}
  14326. +;; MOVR{Z,LEZ,LZ,NZ,GZ,GEZ}
  14327. +;; compare: ADDcc ADDCcc ANDcc ORcc SUBcc SUBCcc XORcc XNORcc
  14328. +;; imul: MULX SMUL[cc] UMUL UMULXHI XMULX XMULXHI
  14329. +;; idiv: UDIVX SDIVX
  14330. +;; flush: FLUSH
  14331. +;; load/regular: LD{UB,UH,UW} LDFSR
  14332. +;; load/prefetch: PREFETCH
  14333. +;; fpload: LDF LDDF LDQF
  14334. +;; sload: LD{SB,SH,SW}
  14335. +;; store: ST{B,H,W,X} STFSR
  14336. +;; fpstore: STF STDF STQF
  14337. +;; cbcond: CWB{NE,E,G,LE,GE,L,GU,LEU,CC,CS,POS,NEG,VC,VS}
  14338. +;; CXB{NE,E,G,LE,GE,L,GU,LEU,CC,CS,POS,NEG,VC,VS}
  14339. +;; uncond_branch: BA BPA JMPL
  14340. +;; branch: B{NE,E,G,LE,GE,L,GU,LEU,CC,CS,POS,NEG,VC,VS}
  14341. +;; BP{NE,E,G,LE,GE,L,GU,LEU,CC,CS,POS,NEG,VC,VS}
  14342. +;; FB{U,G,UG,L,UL,LG,NE,BE,UE,GE,UGE,LE,ULE,O}
  14343. +;; call: CALL
  14344. +;; return: RESTORE RETURN
  14345. +;; fpmove: FABS{s,d,q} FMOV{s,d,q} FNEG{s,d,q}
  14346. +;; fpcmove: FMOV{S,D,Q}{icc,xcc,fcc}
  14347. +;; fpcrmove: FMOVR{s,d,q}{Z,LEZ,LZ,NZ,GZ,GEZ}
  14348. +;; fp: FADD{s,d,q} FSUB{s,d,q} FHSUB{s,d} FNHADD{s,d} FNADD{s,d}
  14349. +;; FiTO{s,d,q} FsTO{i,x,d,q} FdTO{i,x,s,q} FxTO{d,s,q} FqTO{i,x,s,d}
  14350. +;; fpcmp: FCMP{s,d,q} FCMPE{s,d,q}
  14351. +;; fpmul: FMADD{s,d} FMSUB{s,d} FMUL{s,d,q} FNMADD{s,d}
  14352. +;; FNMSUB{s,d} FNMUL{s,d} FNsMULd FsMULd
  14353. +;; FdMULq
  14354. +;; array: ARRAY{8,16,32}
  14355. +;; bmask: BMASK
  14356. +;; edge: EDGE{8,16,32}[L]cc
  14357. +;; edgen: EDGE{8,16,32}[L]n
  14358. +;; fpdivs: FDIV{s,q}
  14359. +;; fpsqrts: FSQRT{s,q}
  14360. +;; fpdivd: FDIVd
  14361. +;; fpsqrtd: FSQRTd
  14362. +;; lzd: LZCNT
  14363. +;; fga/addsub64: FP{ADD,SUB}64
  14364. +;; fga/fpu: FCHKSM16 FEXPANd FMEAN16 FPMERGE
  14365. +;; FS{LL,RA,RL}{16,32}
  14366. +;; fga/maxmin: FP{MAX,MIN}[U]{8,16,32}
  14367. +;; fga/cmask: CMASK{8,16,32}
  14368. +;; fga/other: BSHUFFLE FALIGNDATAg FP{ADD,SUB}[S]{8,16,32}
  14369. +;; FP{ADD,SUB}US{8,16} DICTUNPACK
  14370. +;; gsr/reg: RDGSR WRGSR
  14371. +;; gsr/alignaddr: ALIGNADDRESS[_LITTLE]
  14372. +;; vismv/double: FSRC2d
  14373. +;; vismv/single: MOVwTOs FSRC2s
  14374. +;; vismv/movstouw: MOVsTOuw
  14375. +;; vismv/movxtod: MOVxTOd
  14376. +;; vismv/movdtox: MOVdTOx
  14377. +;; visl/single: F{AND,NAND,NOR,OR,NOT1}s
  14378. +;; F{AND,OR}NOT{1,2}s
  14379. +;; FONEs F{ZERO,XNOR,XOR}s FNOT2s
  14380. +;; visl/double: FONEd FZEROd FNOT1d F{OR,AND,XOR}d F{NOR,NAND,XNOR}d
  14381. +;; F{OR,AND}NOT1d F{OR,AND}NOT2d
  14382. +;; viscmp: FPCMP{LE,GT,NE,EQ}{8,16,32} FPCMPU{LE,GT,NE,EQ}{8,16,32}
  14383. +;; FPCMP{LE,GT,EQ,NE}{8,16,32}SHL FPCMPU{LE,GT,EQ,NE}{8,16,32}SHL
  14384. +;; FPCMPDE{8,16,32}SHL FPCMPUR{8,16,32}SHL
  14385. +;; fgm_pack: FPACKFIX FPACK{8,16,32}
  14386. +;; fgm_mul: FMUL8SUx16 FMUL8ULx16 FMUL8x16 FMUL8x16AL
  14387. +;; FMUL8x16AU FMULD8SUx16 FMULD8ULx16
  14388. +;; pdist: PDIST
  14389. +;; pdistn: PDISTN
  14390. +
  14391. +(define_attr "type"
  14392. + "ialu,compare,shift,
  14393. + load,sload,store,
  14394. + uncond_branch,branch,call,sibcall,call_no_delay_slot,return,
  14395. + cbcond,uncond_cbcond,
  14396. + imul,idiv,
  14397. + fpload,fpstore,
  14398. + fp,fpmove,
  14399. + fpcmove,fpcrmove,
  14400. + fpcmp,
  14401. + fpmul,fpdivs,fpdivd,
  14402. + fpsqrts,fpsqrtd,
  14403. + fga,visl,vismv,viscmp,
  14404. + fgm_pack,fgm_mul,pdist,pdistn,edge,edgen,gsr,array,bmask,
  14405. + cmove,
  14406. + ialuX,
  14407. + multi,savew,flushw,iflush,trap,lzd"
  14408. + (const_string "ialu"))
  14409. +
  14410. +(define_attr "subtype"
  14411. + "single,double,movstouw,movxtod,movdtox,
  14412. + addsub64,cmask,fpu,maxmin,other,
  14413. + reg,alignaddr,
  14414. + prefetch,regular"
  14415. + (const_string "single"))
  14416. +
  14417. +;; True if branch/call has empty delay slot and will emit a nop in it
  14418. +(define_attr "empty_delay_slot" "false,true"
  14419. + (symbol_ref "(empty_delay_slot (insn)
  14420. + ? EMPTY_DELAY_SLOT_TRUE : EMPTY_DELAY_SLOT_FALSE)"))
  14421. +
  14422. +;; True if we are making use of compare-and-branch instructions.
  14423. +;; True if we should emit a nop after a cbcond instruction
  14424. +(define_attr "emit_cbcond_nop" "false,true"
  14425. + (symbol_ref "(emit_cbcond_nop (insn)
  14426. + ? EMIT_CBCOND_NOP_TRUE : EMIT_CBCOND_NOP_FALSE)"))
  14427. +
  14428. +(define_attr "branch_type" "none,icc,fcc,reg"
  14429. + (const_string "none"))
  14430. +
  14431. +(define_attr "pic" "false,true"
  14432. + (symbol_ref "(flag_pic != 0
  14433. + ? PIC_TRUE : PIC_FALSE)"))
  14434. +
  14435. +(define_attr "calls_alloca" "false,true"
  14436. + (symbol_ref "(cfun->calls_alloca != 0
  14437. + ? CALLS_ALLOCA_TRUE : CALLS_ALLOCA_FALSE)"))
  14438. +
  14439. +(define_attr "calls_eh_return" "false,true"
  14440. + (symbol_ref "(crtl->calls_eh_return != 0
  14441. + ? CALLS_EH_RETURN_TRUE : CALLS_EH_RETURN_FALSE)"))
  14442. +
  14443. +(define_attr "leaf_function" "false,true"
  14444. + (symbol_ref "(crtl->uses_only_leaf_regs != 0
  14445. + ? LEAF_FUNCTION_TRUE : LEAF_FUNCTION_FALSE)"))
  14446. +
  14447. +(define_attr "delayed_branch" "false,true"
  14448. + (symbol_ref "(flag_delayed_branch != 0
  14449. + ? DELAYED_BRANCH_TRUE : DELAYED_BRANCH_FALSE)"))
  14450. +
  14451. +(define_attr "flat" "false,true"
  14452. + (symbol_ref "(TARGET_FLAT != 0
  14453. + ? FLAT_TRUE : FLAT_FALSE)"))
  14454. +
  14455. +(define_attr "fix_ut699" "false,true"
  14456. + (symbol_ref "(sparc_fix_ut699 != 0
  14457. + ? FIX_UT699_TRUE : FIX_UT699_FALSE)"))
  14458. +
  14459. +(define_attr "fix_b2bst" "false,true"
  14460. + (symbol_ref "(sparc_fix_b2bst != 0
  14461. + ? FIX_B2BST_TRUE : FIX_B2BST_FALSE)"))
  14462. +
  14463. +(define_attr "fix_lost_divsqrt" "false,true"
  14464. + (symbol_ref "(sparc_fix_lost_divsqrt != 0
  14465. + ? FIX_LOST_DIVSQRT_TRUE : FIX_LOST_DIVSQRT_FALSE)"))
  14466. +
  14467. +(define_attr "fix_gr712rc" "false,true"
  14468. + (symbol_ref "(sparc_fix_gr712rc != 0
  14469. + ? FIX_GR712RC_TRUE : FIX_GR712RC_FALSE)"))
  14470. +
  14471. +;; Length (in # of insns).
  14472. +;; Beware that setting a length greater or equal to 3 for conditional branches
  14473. +;; has a side-effect (see output_cbranch and output_v9branch).
  14474. +(define_attr "length" ""
  14475. + (cond [(eq_attr "type" "uncond_branch,call")
  14476. + (if_then_else (eq_attr "empty_delay_slot" "true")
  14477. + (const_int 2)
  14478. + (const_int 1))
  14479. + (eq_attr "type" "sibcall")
  14480. + (if_then_else (ior (eq_attr "leaf_function" "true")
  14481. + (eq_attr "flat" "true"))
  14482. + (if_then_else (eq_attr "empty_delay_slot" "true")
  14483. + (const_int 3)
  14484. + (const_int 2))
  14485. + (if_then_else (eq_attr "empty_delay_slot" "true")
  14486. + (const_int 2)
  14487. + (const_int 1)))
  14488. + (eq_attr "branch_type" "icc")
  14489. + (if_then_else (match_operand 0 "v9_comparison_operator" "")
  14490. + (if_then_else (lt (pc) (match_dup 1))
  14491. + (if_then_else (lt (minus (match_dup 1) (pc)) (const_int 260000))
  14492. + (if_then_else (eq_attr "empty_delay_slot" "true")
  14493. + (const_int 2)
  14494. + (const_int 1))
  14495. + (if_then_else (eq_attr "empty_delay_slot" "true")
  14496. + (const_int 4)
  14497. + (const_int 3)))
  14498. + (if_then_else (lt (minus (pc) (match_dup 1)) (const_int 260000))
  14499. + (if_then_else (eq_attr "empty_delay_slot" "true")
  14500. + (const_int 2)
  14501. + (const_int 1))
  14502. + (if_then_else (eq_attr "empty_delay_slot" "true")
  14503. + (const_int 4)
  14504. + (const_int 3))))
  14505. + (if_then_else (eq_attr "empty_delay_slot" "true")
  14506. + (const_int 2)
  14507. + (const_int 1)))
  14508. + (eq_attr "branch_type" "fcc")
  14509. + (if_then_else (match_operand 0 "fcc0_register_operand" "")
  14510. + (if_then_else (eq_attr "empty_delay_slot" "true")
  14511. + (if_then_else (not (match_test "TARGET_V9"))
  14512. + (const_int 3)
  14513. + (const_int 2))
  14514. + (if_then_else (not (match_test "TARGET_V9"))
  14515. + (const_int 2)
  14516. + (const_int 1)))
  14517. + (if_then_else (lt (pc) (match_dup 2))
  14518. + (if_then_else (lt (minus (match_dup 2) (pc)) (const_int 260000))
  14519. + (if_then_else (eq_attr "empty_delay_slot" "true")
  14520. + (const_int 2)
  14521. + (const_int 1))
  14522. + (if_then_else (eq_attr "empty_delay_slot" "true")
  14523. + (const_int 4)
  14524. + (const_int 3)))
  14525. + (if_then_else (lt (minus (pc) (match_dup 2)) (const_int 260000))
  14526. + (if_then_else (eq_attr "empty_delay_slot" "true")
  14527. + (const_int 2)
  14528. + (const_int 1))
  14529. + (if_then_else (eq_attr "empty_delay_slot" "true")
  14530. + (const_int 4)
  14531. + (const_int 3)))))
  14532. + (eq_attr "branch_type" "reg")
  14533. + (if_then_else (lt (pc) (match_dup 2))
  14534. + (if_then_else (lt (minus (match_dup 2) (pc)) (const_int 32000))
  14535. + (if_then_else (eq_attr "empty_delay_slot" "true")
  14536. + (const_int 2)
  14537. + (const_int 1))
  14538. + (if_then_else (eq_attr "empty_delay_slot" "true")
  14539. + (const_int 4)
  14540. + (const_int 3)))
  14541. + (if_then_else (lt (minus (pc) (match_dup 2)) (const_int 32000))
  14542. + (if_then_else (eq_attr "empty_delay_slot" "true")
  14543. + (const_int 2)
  14544. + (const_int 1))
  14545. + (if_then_else (eq_attr "empty_delay_slot" "true")
  14546. + (const_int 4)
  14547. + (const_int 3))))
  14548. + (eq_attr "type" "cbcond")
  14549. + (if_then_else (lt (pc) (match_dup 3))
  14550. + (if_then_else (lt (minus (match_dup 3) (pc)) (const_int 500))
  14551. + (if_then_else (eq_attr "emit_cbcond_nop" "true")
  14552. + (const_int 2)
  14553. + (const_int 1))
  14554. + (const_int 4))
  14555. + (if_then_else (lt (minus (pc) (match_dup 3)) (const_int 500))
  14556. + (if_then_else (eq_attr "emit_cbcond_nop" "true")
  14557. + (const_int 2)
  14558. + (const_int 1))
  14559. + (const_int 4)))
  14560. + (eq_attr "type" "uncond_cbcond")
  14561. + (if_then_else (lt (pc) (match_dup 0))
  14562. + (if_then_else (lt (minus (match_dup 0) (pc)) (const_int 500))
  14563. + (if_then_else (eq_attr "emit_cbcond_nop" "true")
  14564. + (const_int 2)
  14565. + (const_int 1))
  14566. + (const_int 1))
  14567. + (if_then_else (lt (minus (pc) (match_dup 0)) (const_int 500))
  14568. + (if_then_else (eq_attr "emit_cbcond_nop" "true")
  14569. + (const_int 2)
  14570. + (const_int 1))
  14571. + (const_int 1)))
  14572. + ] (const_int 1)))
  14573. +
  14574. +;; FP precision.
  14575. +(define_attr "fptype" "single,double"
  14576. + (const_string "single"))
  14577. +
  14578. +;; FP precision specific to the UT699.
  14579. +(define_attr "fptype_ut699" "none,single"
  14580. + (const_string "none"))
  14581. +
  14582. +;; UltraSPARC-III integer load type.
  14583. +(define_attr "us3load_type" "2cycle,3cycle"
  14584. + (const_string "2cycle"))
  14585. +
  14586. +(define_asm_attributes
  14587. + [(set_attr "length" "2")
  14588. + (set_attr "type" "multi")])
  14589. +
  14590. +;; Attributes for branch scheduling
  14591. +(define_attr "tls_delay_slot" "false,true"
  14592. + (symbol_ref "((TARGET_GNU_TLS && HAVE_GNU_LD) != 0
  14593. + ? TLS_DELAY_SLOT_TRUE : TLS_DELAY_SLOT_FALSE)"))
  14594. +
  14595. +(define_attr "in_sibcall_delay" "false,true"
  14596. + (symbol_ref "(eligible_for_sibcall_delay (insn)
  14597. + ? IN_SIBCALL_DELAY_TRUE : IN_SIBCALL_DELAY_FALSE)"))
  14598. +
  14599. +(define_attr "in_return_delay" "false,true"
  14600. + (symbol_ref "(eligible_for_return_delay (insn)
  14601. + ? IN_RETURN_DELAY_TRUE : IN_RETURN_DELAY_FALSE)"))
  14602. +
  14603. +;; ??? !v9: Should implement the notion of predelay slots for floating-point
  14604. +;; branches. This would allow us to remove the nop always inserted before
  14605. +;; a floating point branch.
  14606. +
  14607. +;; ??? It is OK for fill_simple_delay_slots to put load/store instructions
  14608. +;; in a delay slot, but it is not OK for fill_eager_delay_slots to do so.
  14609. +;; This is because doing so will add several pipeline stalls to the path
  14610. +;; that the load/store did not come from. Unfortunately, there is no way
  14611. +;; to prevent fill_eager_delay_slots from using load/store without completely
  14612. +;; disabling them. For the SPEC benchmark set, this is a serious lose,
  14613. +;; because it prevents us from moving back the final store of inner loops.
  14614. +
  14615. +(define_attr "in_branch_delay" "false,true"
  14616. + (cond [(eq_attr "type" "uncond_branch,branch,cbcond,uncond_cbcond,call,sibcall,call_no_delay_slot,multi")
  14617. + (const_string "false")
  14618. + (and (eq_attr "fix_lost_divsqrt" "true")
  14619. + (eq_attr "type" "fpdivs,fpsqrts,fpdivd,fpsqrtd"))
  14620. + (const_string "false")
  14621. + (and (eq_attr "fix_b2bst" "true") (eq_attr "type" "store,fpstore"))
  14622. + (const_string "false")
  14623. + (and (eq_attr "fix_ut699" "true") (eq_attr "type" "load,sload"))
  14624. + (const_string "false")
  14625. + (and (eq_attr "fix_ut699" "true")
  14626. + (and (eq_attr "type" "fpload,fp,fpmove,fpmul,fpdivs,fpsqrts")
  14627. + (ior (eq_attr "fptype" "single")
  14628. + (eq_attr "fptype_ut699" "single"))))
  14629. + (const_string "false")
  14630. + (eq_attr "length" "1")
  14631. + (const_string "true")
  14632. + ] (const_string "false")))
  14633. +
  14634. +(define_attr "in_integer_branch_annul_delay" "false,true"
  14635. + (cond [(and (eq_attr "fix_gr712rc" "true")
  14636. + (eq_attr "type" "fp,fpcmp,fpmove,fpcmove,fpmul,
  14637. + fpdivs,fpsqrts,fpdivd,fpsqrtd"))
  14638. + (const_string "false")
  14639. + (eq_attr "in_branch_delay" "true")
  14640. + (const_string "true")
  14641. + ] (const_string "false")))
  14642. +
  14643. +(define_delay (eq_attr "type" "sibcall")
  14644. + [(eq_attr "in_sibcall_delay" "true") (nil) (nil)])
  14645. +
  14646. +(define_delay (eq_attr "type" "return")
  14647. + [(eq_attr "in_return_delay" "true") (nil) (nil)])
  14648. +
  14649. +(define_delay (ior (eq_attr "type" "call") (eq_attr "type" "uncond_branch"))
  14650. + [(eq_attr "in_branch_delay" "true") (nil) (nil)])
  14651. +
  14652. +(define_delay (and (eq_attr "type" "branch") (not (eq_attr "branch_type" "icc")))
  14653. + [(eq_attr "in_branch_delay" "true")
  14654. + (nil)
  14655. + (eq_attr "in_branch_delay" "true")])
  14656. +
  14657. +(define_delay (and (eq_attr "type" "branch") (eq_attr "branch_type" "icc"))
  14658. + [(eq_attr "in_branch_delay" "true")
  14659. + (nil)
  14660. + (eq_attr "in_integer_branch_annul_delay" "true")])
  14661. +
  14662. +;; Include SPARC DFA schedulers
  14663. +
  14664. +(include "cypress.md")
  14665. +(include "supersparc.md")
  14666. +(include "hypersparc.md")
  14667. +(include "leon.md")
  14668. +(include "sparclet.md")
  14669. +(include "ultra1_2.md")
  14670. +(include "ultra3.md")
  14671. +(include "niagara.md")
  14672. +(include "niagara2.md")
  14673. +(include "niagara4.md")
  14674. +(include "niagara7.md")
  14675. +(include "m8.md")
  14676. +
  14677. +
  14678. +;; Operand and operator predicates and constraints
  14679. +
  14680. +(include "predicates.md")
  14681. +(include "constraints.md")
  14682. +
  14683. +
  14684. +;; Compare instructions.
  14685. +
  14686. +;; These are just the DEFINE_INSNs to match the patterns and the
  14687. +;; DEFINE_SPLITs for some of the scc insns that actually require
  14688. +;; more than one machine instruction. DEFINE_EXPANDs are further down.
  14689. +
  14690. +(define_insn "*cmpsi_insn"
  14691. + [(set (reg:CC CC_REG)
  14692. + (compare:CC (match_operand:SI 0 "register_operand" "r")
  14693. + (match_operand:SI 1 "arith_operand" "rI")))]
  14694. + ""
  14695. + "cmp\t%0, %1"
  14696. + [(set_attr "type" "compare")])
  14697. +
  14698. +(define_insn "*cmpdi_sp64"
  14699. + [(set (reg:CCX CC_REG)
  14700. + (compare:CCX (match_operand:DI 0 "register_operand" "r")
  14701. + (match_operand:DI 1 "arith_operand" "rI")))]
  14702. + "TARGET_ARCH64"
  14703. + "cmp\t%0, %1"
  14704. + [(set_attr "type" "compare")])
  14705. +
  14706. +(define_insn "*cmpsi_sne"
  14707. + [(set (reg:CCC CC_REG)
  14708. + (compare:CCC (not:SI (match_operand:SI 0 "arith_operand" "rI"))
  14709. + (const_int -1)))]
  14710. + ""
  14711. + "cmp\t%%g0, %0"
  14712. + [(set_attr "type" "compare")])
  14713. +
  14714. +(define_insn "*cmpdi_sne"
  14715. + [(set (reg:CCXC CC_REG)
  14716. + (compare:CCXC (not:DI (match_operand:DI 0 "arith_operand" "rI"))
  14717. + (const_int -1)))]
  14718. + "TARGET_ARCH64"
  14719. + "cmp\t%%g0, %0"
  14720. + [(set_attr "type" "compare")])
  14721. +
  14722. +(define_insn "*cmpsf_fpe"
  14723. + [(set (match_operand:CCFPE 0 "fcc_register_operand" "=c")
  14724. + (compare:CCFPE (match_operand:SF 1 "register_operand" "f")
  14725. + (match_operand:SF 2 "register_operand" "f")))]
  14726. + "TARGET_FPU"
  14727. +{
  14728. + if (TARGET_V9)
  14729. + return "fcmpes\t%0, %1, %2";
  14730. + return "fcmpes\t%1, %2";
  14731. +}
  14732. + [(set_attr "type" "fpcmp")])
  14733. +
  14734. +(define_insn "*cmpdf_fpe"
  14735. + [(set (match_operand:CCFPE 0 "fcc_register_operand" "=c")
  14736. + (compare:CCFPE (match_operand:DF 1 "register_operand" "e")
  14737. + (match_operand:DF 2 "register_operand" "e")))]
  14738. + "TARGET_FPU"
  14739. +{
  14740. + if (TARGET_V9)
  14741. + return "fcmped\t%0, %1, %2";
  14742. + return "fcmped\t%1, %2";
  14743. +}
  14744. + [(set_attr "type" "fpcmp")
  14745. + (set_attr "fptype" "double")])
  14746. +
  14747. +(define_insn "*cmptf_fpe"
  14748. + [(set (match_operand:CCFPE 0 "fcc_register_operand" "=c")
  14749. + (compare:CCFPE (match_operand:TF 1 "register_operand" "e")
  14750. + (match_operand:TF 2 "register_operand" "e")))]
  14751. + "TARGET_FPU && TARGET_HARD_QUAD"
  14752. +{
  14753. + if (TARGET_V9)
  14754. + return "fcmpeq\t%0, %1, %2";
  14755. + return "fcmpeq\t%1, %2";
  14756. +}
  14757. + [(set_attr "type" "fpcmp")])
  14758. +
  14759. +(define_insn "*cmpsf_fp"
  14760. + [(set (match_operand:CCFP 0 "fcc_register_operand" "=c")
  14761. + (compare:CCFP (match_operand:SF 1 "register_operand" "f")
  14762. + (match_operand:SF 2 "register_operand" "f")))]
  14763. + "TARGET_FPU"
  14764. +{
  14765. + if (TARGET_V9)
  14766. + return "fcmps\t%0, %1, %2";
  14767. + return "fcmps\t%1, %2";
  14768. +}
  14769. + [(set_attr "type" "fpcmp")])
  14770. +
  14771. +(define_insn "*cmpdf_fp"
  14772. + [(set (match_operand:CCFP 0 "fcc_register_operand" "=c")
  14773. + (compare:CCFP (match_operand:DF 1 "register_operand" "e")
  14774. + (match_operand:DF 2 "register_operand" "e")))]
  14775. + "TARGET_FPU"
  14776. +{
  14777. + if (TARGET_V9)
  14778. + return "fcmpd\t%0, %1, %2";
  14779. + return "fcmpd\t%1, %2";
  14780. +}
  14781. + [(set_attr "type" "fpcmp")
  14782. + (set_attr "fptype" "double")])
  14783. +
  14784. +(define_insn "*cmptf_fp"
  14785. + [(set (match_operand:CCFP 0 "fcc_register_operand" "=c")
  14786. + (compare:CCFP (match_operand:TF 1 "register_operand" "e")
  14787. + (match_operand:TF 2 "register_operand" "e")))]
  14788. + "TARGET_FPU && TARGET_HARD_QUAD"
  14789. +{
  14790. + if (TARGET_V9)
  14791. + return "fcmpq\t%0, %1, %2";
  14792. + return "fcmpq\t%1, %2";
  14793. +}
  14794. + [(set_attr "type" "fpcmp")])
  14795. +
  14796. +;; Next come the scc insns.
  14797. +
  14798. +;; Note that the boolean result (operand 0) takes on DImode
  14799. +;; (not SImode) when TARGET_ARCH64.
  14800. +
  14801. +(define_expand "cstoresi4"
  14802. + [(use (match_operator 1 "comparison_operator"
  14803. + [(match_operand:SI 2 "compare_operand" "")
  14804. + (match_operand:SI 3 "arith_operand" "")]))
  14805. + (clobber (match_operand:SI 0 "cstore_result_operand"))]
  14806. + ""
  14807. +{
  14808. + if (GET_CODE (operands[2]) == ZERO_EXTRACT && operands[3] != const0_rtx)
  14809. + operands[2] = force_reg (SImode, operands[2]);
  14810. + if (emit_scc_insn (operands)) DONE; else FAIL;
  14811. +})
  14812. +
  14813. +(define_expand "cstoredi4"
  14814. + [(use (match_operator 1 "comparison_operator"
  14815. + [(match_operand:DI 2 "compare_operand" "")
  14816. + (match_operand:DI 3 "arith_operand" "")]))
  14817. + (clobber (match_operand:SI 0 "cstore_result_operand"))]
  14818. + "TARGET_ARCH64"
  14819. +{
  14820. + if (GET_CODE (operands[2]) == ZERO_EXTRACT && operands[3] != const0_rtx)
  14821. + operands[2] = force_reg (DImode, operands[2]);
  14822. + if (emit_scc_insn (operands)) DONE; else FAIL;
  14823. +})
  14824. +
  14825. +(define_expand "cstore<F:mode>4"
  14826. + [(use (match_operator 1 "comparison_operator"
  14827. + [(match_operand:F 2 "register_operand" "")
  14828. + (match_operand:F 3 "register_operand" "")]))
  14829. + (clobber (match_operand:SI 0 "cstore_result_operand"))]
  14830. + "TARGET_FPU"
  14831. +{
  14832. + if (emit_scc_insn (operands)) DONE; else FAIL;
  14833. +})
  14834. +
  14835. +;; The SNE and SEQ patterns are special because they can be done
  14836. +;; without any branching and do not involve a COMPARE.
  14837. +
  14838. +(define_insn_and_split "*snesi<W:mode>_zero"
  14839. + [(set (match_operand:W 0 "register_operand" "=r")
  14840. + (ne:W (match_operand:SI 1 "register_operand" "r")
  14841. + (const_int 0)))
  14842. + (clobber (reg:CC CC_REG))]
  14843. + ""
  14844. + "#"
  14845. + ""
  14846. + [(set (reg:CCC CC_REG) (compare:CCC (not:SI (match_dup 1)) (const_int -1)))
  14847. + (set (match_dup 0) (ltu:W (reg:CCC CC_REG) (const_int 0)))]
  14848. + ""
  14849. + [(set_attr "length" "2")])
  14850. +
  14851. +(define_insn_and_split "*neg_snesi<W:mode>_zero"
  14852. + [(set (match_operand:W 0 "register_operand" "=r")
  14853. + (neg:W (ne:W (match_operand:SI 1 "register_operand" "r")
  14854. + (const_int 0))))
  14855. + (clobber (reg:CC CC_REG))]
  14856. + ""
  14857. + "#"
  14858. + ""
  14859. + [(set (reg:CCC CC_REG) (compare:CCC (not:SI (match_dup 1)) (const_int -1)))
  14860. + (set (match_dup 0) (neg:W (ltu:W (reg:CCC CC_REG) (const_int 0))))]
  14861. + ""
  14862. + [(set_attr "length" "2")])
  14863. +
  14864. +(define_insn_and_split "*snedi<W:mode>_zero"
  14865. + [(set (match_operand:W 0 "register_operand" "=&r")
  14866. + (ne:W (match_operand:DI 1 "register_operand" "r")
  14867. + (const_int 0)))]
  14868. + "TARGET_ARCH64 && !TARGET_VIS3"
  14869. + "#"
  14870. + "&& !reg_overlap_mentioned_p (operands[1], operands[0])"
  14871. + [(set (match_dup 0) (const_int 0))
  14872. + (set (match_dup 0) (if_then_else:W (ne:DI (match_dup 1) (const_int 0))
  14873. + (const_int 1)
  14874. + (match_dup 0)))]
  14875. + ""
  14876. + [(set_attr "length" "2")])
  14877. +
  14878. +(define_insn_and_split "*snedi<W:mode>_zero_vis3"
  14879. + [(set (match_operand:W 0 "register_operand" "=r")
  14880. + (ne:W (match_operand:DI 1 "register_operand" "r")
  14881. + (const_int 0)))
  14882. + (clobber (reg:CCX CC_REG))]
  14883. + "TARGET_ARCH64 && TARGET_VIS3"
  14884. + "#"
  14885. + ""
  14886. + [(set (reg:CCXC CC_REG) (compare:CCXC (not:DI (match_dup 1)) (const_int -1)))
  14887. + (set (match_dup 0) (ltu:W (reg:CCXC CC_REG) (const_int 0)))]
  14888. + ""
  14889. + [(set_attr "length" "2")])
  14890. +
  14891. +(define_insn_and_split "*neg_snedi<W:mode>_zero"
  14892. + [(set (match_operand:W 0 "register_operand" "=&r")
  14893. + (neg:W (ne:W (match_operand:DI 1 "register_operand" "r")
  14894. + (const_int 0))))]
  14895. + "TARGET_ARCH64 && !TARGET_SUBXC"
  14896. + "#"
  14897. + "&& !reg_overlap_mentioned_p (operands[1], operands[0])"
  14898. + [(set (match_dup 0) (const_int 0))
  14899. + (set (match_dup 0) (if_then_else:W (ne:DI (match_dup 1) (const_int 0))
  14900. + (const_int -1)
  14901. + (match_dup 0)))]
  14902. + ""
  14903. + [(set_attr "length" "2")])
  14904. +
  14905. +(define_insn_and_split "*neg_snedi<W:mode>_zero_subxc"
  14906. + [(set (match_operand:W 0 "register_operand" "=&r")
  14907. + (neg:W (ne:W (match_operand:DI 1 "register_operand" "r")
  14908. + (const_int 0))))
  14909. + (clobber (reg:CCX CC_REG))]
  14910. + "TARGET_ARCH64 && TARGET_SUBXC"
  14911. + "#"
  14912. + ""
  14913. + [(set (reg:CCXC CC_REG) (compare:CCXC (not:DI (match_dup 1)) (const_int -1)))
  14914. + (set (match_dup 0) (neg:W (ltu:W (reg:CCXC CC_REG) (const_int 0))))]
  14915. + ""
  14916. + [(set_attr "length" "2")])
  14917. +
  14918. +(define_insn_and_split "*seqsi<W:mode>_zero"
  14919. + [(set (match_operand:W 0 "register_operand" "=r")
  14920. + (eq:W (match_operand:SI 1 "register_operand" "r")
  14921. + (const_int 0)))
  14922. + (clobber (reg:CC CC_REG))]
  14923. + ""
  14924. + "#"
  14925. + ""
  14926. + [(set (reg:CCC CC_REG) (compare:CCC (not:SI (match_dup 1)) (const_int -1)))
  14927. + (set (match_dup 0) (geu:W (reg:CCC CC_REG) (const_int 0)))]
  14928. + ""
  14929. + [(set_attr "length" "2")])
  14930. +
  14931. +(define_insn_and_split "*neg_seqsi<W:mode>_zero"
  14932. + [(set (match_operand:W 0 "register_operand" "=r")
  14933. + (neg:W (eq:W (match_operand:SI 1 "register_operand" "r")
  14934. + (const_int 0))))
  14935. + (clobber (reg:CC CC_REG))]
  14936. + ""
  14937. + "#"
  14938. + ""
  14939. + [(set (reg:CCC CC_REG) (compare:CCC (not:SI (match_dup 1)) (const_int -1)))
  14940. + (set (match_dup 0) (neg:W (geu:W (reg:CCC CC_REG) (const_int 0))))]
  14941. + ""
  14942. + [(set_attr "length" "2")])
  14943. +
  14944. +(define_insn_and_split "*seqdi<W:mode>_zero"
  14945. + [(set (match_operand:W 0 "register_operand" "=&r")
  14946. + (eq:W (match_operand:DI 1 "register_operand" "r")
  14947. + (const_int 0)))]
  14948. + "TARGET_ARCH64"
  14949. + "#"
  14950. + "&& !reg_overlap_mentioned_p (operands[1], operands[0])"
  14951. + [(set (match_dup 0) (const_int 0))
  14952. + (set (match_dup 0) (if_then_else:W (eq:DI (match_dup 1) (const_int 0))
  14953. + (const_int 1)
  14954. + (match_dup 0)))]
  14955. + ""
  14956. + [(set_attr "length" "2")])
  14957. +
  14958. +(define_insn_and_split "*neg_seqdi<W:mode>_zero"
  14959. + [(set (match_operand:W 0 "register_operand" "=&r")
  14960. + (neg:W (eq:W (match_operand:DI 1 "register_operand" "r")
  14961. + (const_int 0))))]
  14962. + "TARGET_ARCH64"
  14963. + "#"
  14964. + "&& !reg_overlap_mentioned_p (operands[1], operands[0])"
  14965. + [(set (match_dup 0) (const_int 0))
  14966. + (set (match_dup 0) (if_then_else:W (eq:DI (match_dup 1) (const_int 0))
  14967. + (const_int -1)
  14968. + (match_dup 0)))]
  14969. + ""
  14970. + [(set_attr "length" "2")])
  14971. +
  14972. +;; We can also do (x + (i == 0)) and related, so put them in.
  14973. +
  14974. +(define_insn_and_split "*plus_snesi<W:mode>_zero"
  14975. + [(set (match_operand:W 0 "register_operand" "=r")
  14976. + (plus:W (ne:W (match_operand:SI 1 "register_operand" "r")
  14977. + (const_int 0))
  14978. + (match_operand:W 2 "register_operand" "r")))
  14979. + (clobber (reg:CC CC_REG))]
  14980. + ""
  14981. + "#"
  14982. + ""
  14983. + [(set (reg:CCC CC_REG) (compare:CCC (not:SI (match_dup 1)) (const_int -1)))
  14984. + (set (match_dup 0) (plus:W (ltu:W (reg:CCC CC_REG) (const_int 0))
  14985. + (match_dup 2)))]
  14986. + ""
  14987. + [(set_attr "length" "2")])
  14988. +
  14989. +(define_insn_and_split "*plus_plus_snesi<W:mode>_zero"
  14990. + [(set (match_operand:W 0 "register_operand" "=r")
  14991. + (plus:W (plus:W (ne:W (match_operand:SI 1 "register_operand" "r")
  14992. + (const_int 0))
  14993. + (match_operand:W 2 "register_operand" "r"))
  14994. + (match_operand:W 3 "register_operand" "r")))
  14995. + (clobber (reg:CC CC_REG))]
  14996. + ""
  14997. + "#"
  14998. + ""
  14999. + [(set (reg:CCC CC_REG) (compare:CCC (not:SI (match_dup 1)) (const_int -1)))
  15000. + (set (match_dup 0) (plus:W (plus:W (ltu:W (reg:CCC CC_REG) (const_int 0))
  15001. + (match_dup 2))
  15002. + (match_dup 3)))]
  15003. + ""
  15004. + [(set_attr "length" "2")])
  15005. +
  15006. +(define_insn_and_split "*plus_snedi<W:mode>_zero"
  15007. + [(set (match_operand:W 0 "register_operand" "=r")
  15008. + (plus:W (ne:W (match_operand:DI 1 "register_operand" "r")
  15009. + (const_int 0))
  15010. + (match_operand:W 2 "register_operand" "r")))
  15011. + (clobber (reg:CCX CC_REG))]
  15012. + "TARGET_ARCH64 && TARGET_VIS3"
  15013. + "#"
  15014. + ""
  15015. + [(set (reg:CCXC CC_REG) (compare:CCXC (not:DI (match_dup 1)) (const_int -1)))
  15016. + (set (match_dup 0) (plus:W (ltu:W (reg:CCXC CC_REG) (const_int 0))
  15017. + (match_dup 2)))]
  15018. + ""
  15019. + [(set_attr "length" "2")])
  15020. +
  15021. +(define_insn_and_split "*plus_plus_snedi<W:mode>_zero"
  15022. + [(set (match_operand:W 0 "register_operand" "=r")
  15023. + (plus:W (plus:W (ne:W (match_operand:DI 1 "register_operand" "r")
  15024. + (const_int 0))
  15025. + (match_operand:W 2 "register_operand" "r"))
  15026. + (match_operand:W 3 "register_operand" "r")))
  15027. + (clobber (reg:CCX CC_REG))]
  15028. + "TARGET_ARCH64 && TARGET_VIS3"
  15029. + "#"
  15030. + ""
  15031. + [(set (reg:CCXC CC_REG) (compare:CCXC (not:DI (match_dup 1)) (const_int -1)))
  15032. + (set (match_dup 0) (plus:W (plus:W (ltu:W (reg:CCXC CC_REG) (const_int 0))
  15033. + (match_dup 2))
  15034. + (match_dup 3)))]
  15035. + ""
  15036. + [(set_attr "length" "2")])
  15037. +
  15038. +(define_insn_and_split "*minus_snesi<W:mode>_zero"
  15039. + [(set (match_operand:W 0 "register_operand" "=r")
  15040. + (minus:W (match_operand:W 2 "register_operand" "r")
  15041. + (ne:W (match_operand:SI 1 "register_operand" "r")
  15042. + (const_int 0))))
  15043. + (clobber (reg:CC CC_REG))]
  15044. + ""
  15045. + "#"
  15046. + ""
  15047. + [(set (reg:CCC CC_REG) (compare:CCC (not:SI (match_dup 1)) (const_int -1)))
  15048. + (set (match_dup 0) (minus:W (match_dup 2)
  15049. + (ltu:W (reg:CCC CC_REG) (const_int 0))))]
  15050. + ""
  15051. + [(set_attr "length" "2")])
  15052. +
  15053. +(define_insn_and_split "*minus_minus_snesi<W:mode>_zero"
  15054. + [(set (match_operand:W 0 "register_operand" "=r")
  15055. + (minus:W (minus:W (match_operand:W 2 "register_operand" "r")
  15056. + (ne:W (match_operand:SI 1 "register_operand" "r")
  15057. + (const_int 0)))
  15058. + (match_operand:W 3 "register_operand" "r")))
  15059. + (clobber (reg:CC CC_REG))]
  15060. + ""
  15061. + "#"
  15062. + ""
  15063. + [(set (reg:CCC CC_REG) (compare:CCC (not:SI (match_dup 1)) (const_int -1)))
  15064. + (set (match_dup 0) (minus:W (minus:W (match_dup 2)
  15065. + (ltu:W (reg:CCC CC_REG) (const_int 0)))
  15066. + (match_dup 3)))]
  15067. + ""
  15068. + [(set_attr "length" "2")])
  15069. +
  15070. +(define_insn_and_split "*minus_snedi<W:mode>_zero"
  15071. + [(set (match_operand:W 0 "register_operand" "=r")
  15072. + (minus:W (match_operand:W 2 "register_operand" "r")
  15073. + (ne:W (match_operand:DI 1 "register_operand" "r")
  15074. + (const_int 0))))
  15075. + (clobber (reg:CCX CC_REG))]
  15076. + "TARGET_ARCH64 && TARGET_SUBXC"
  15077. + "#"
  15078. + ""
  15079. + [(set (reg:CCXC CC_REG) (compare:CCXC (not:DI (match_dup 1)) (const_int -1)))
  15080. + (set (match_dup 0) (minus:W (match_dup 2)
  15081. + (ltu:W (reg:CCXC CC_REG) (const_int 0))))]
  15082. + ""
  15083. + [(set_attr "length" "2")])
  15084. +
  15085. +(define_insn_and_split "*minus_minus_snedi<W:mode>_zero"
  15086. + [(set (match_operand:W 0 "register_operand" "=r")
  15087. + (minus:W (minus:W (match_operand:W 2 "register_operand" "r")
  15088. + (ne:W (match_operand:DI 1 "register_operand" "r")
  15089. + (const_int 0)))
  15090. + (match_operand:W 3 "register_operand" "r")))
  15091. + (clobber (reg:CCX CC_REG))]
  15092. + "TARGET_ARCH64 && TARGET_SUBXC"
  15093. + "#"
  15094. + ""
  15095. + [(set (reg:CCXC CC_REG) (compare:CCXC (not:DI (match_dup 1)) (const_int -1)))
  15096. + (set (match_dup 0) (minus:W (minus:W (match_dup 2)
  15097. + (ltu:W (reg:CCXC CC_REG) (const_int 0)))
  15098. + (match_dup 3)))]
  15099. + ""
  15100. + [(set_attr "length" "2")])
  15101. +
  15102. +(define_insn_and_split "*plus_seqsi<W:mode>_zero"
  15103. + [(set (match_operand:W 0 "register_operand" "=r")
  15104. + (plus:W (eq:W (match_operand:SI 1 "register_operand" "r")
  15105. + (const_int 0))
  15106. + (match_operand:W 2 "register_operand" "r")))
  15107. + (clobber (reg:CC CC_REG))]
  15108. + ""
  15109. + "#"
  15110. + ""
  15111. + [(set (reg:CCC CC_REG) (compare:CCC (not:SI (match_dup 1)) (const_int -1)))
  15112. + (set (match_dup 0) (plus:W (geu:W (reg:CCC CC_REG) (const_int 0))
  15113. + (match_dup 2)))]
  15114. + ""
  15115. + [(set_attr "length" "2")])
  15116. +
  15117. +(define_insn_and_split "*minus_seqsi<W:mode>_zero"
  15118. + [(set (match_operand:W 0 "register_operand" "=r")
  15119. + (minus:W (match_operand:W 2 "register_operand" "r")
  15120. + (eq:W (match_operand:SI 1 "register_operand" "r")
  15121. + (const_int 0))))
  15122. + (clobber (reg:CC CC_REG))]
  15123. + ""
  15124. + "#"
  15125. + ""
  15126. + [(set (reg:CCC CC_REG) (compare:CCC (not:SI (match_dup 1)) (const_int -1)))
  15127. + (set (match_dup 0) (minus:W (match_dup 2)
  15128. + (geu:W (reg:CCC CC_REG) (const_int 0))))]
  15129. + ""
  15130. + [(set_attr "length" "2")])
  15131. +
  15132. +;; We can also do GEU and LTU directly, but these operate after a compare.
  15133. +
  15134. +(define_insn "*sltu<W:mode>_insn"
  15135. + [(set (match_operand:W 0 "register_operand" "=r")
  15136. + (ltu:W (match_operand 1 "icc_register_operand" "X") (const_int 0)))]
  15137. + "GET_MODE (operands[1]) == CCmode || GET_MODE (operands[1]) == CCCmode"
  15138. + "addx\t%%g0, 0, %0"
  15139. + [(set_attr "type" "ialuX")])
  15140. +
  15141. +(define_insn "*plus_sltu<W:mode>"
  15142. + [(set (match_operand:W 0 "register_operand" "=r")
  15143. + (plus:W (ltu:W (match_operand 2 "icc_register_operand" "X")
  15144. + (const_int 0))
  15145. + (match_operand:W 1 "arith_operand" "rI")))]
  15146. + "GET_MODE (operands[2]) == CCmode || GET_MODE (operands[2]) == CCCmode"
  15147. + "addx\t%%g0, %1, %0"
  15148. + [(set_attr "type" "ialuX")])
  15149. +
  15150. +(define_insn "*plus_plus_sltu<W:mode>"
  15151. + [(set (match_operand:W 0 "register_operand" "=r")
  15152. + (plus:W (plus:W (ltu:W (match_operand 3 "icc_register_operand" "X")
  15153. + (const_int 0))
  15154. + (match_operand:W 1 "register_operand" "%r"))
  15155. + (match_operand:W 2 "arith_operand" "rI")))]
  15156. + "GET_MODE (operands[3]) == CCmode || GET_MODE (operands[3]) == CCCmode"
  15157. + "addx\t%1, %2, %0"
  15158. + [(set_attr "type" "ialuX")])
  15159. +
  15160. +(define_insn "*neg_sgeu<W:mode>"
  15161. + [(set (match_operand:W 0 "register_operand" "=r")
  15162. + (neg:W (geu:W (match_operand 1 "icc_register_operand" "X")
  15163. + (const_int 0))))]
  15164. + "GET_MODE (operands[1]) == CCmode || GET_MODE (operands[1]) == CCCmode"
  15165. + "addx\t%%g0, -1, %0"
  15166. + [(set_attr "type" "ialuX")])
  15167. +
  15168. +(define_insn "*neg_sgeusidi"
  15169. + [(set (match_operand:DI 0 "register_operand" "=r")
  15170. + (sign_extend:DI (neg:SI (geu:SI (match_operand 1 "icc_register_operand" "X")
  15171. + (const_int 0)))))]
  15172. + "TARGET_ARCH64
  15173. + && (GET_MODE (operands[1]) == CCmode || GET_MODE (operands[1]) == CCCmode)"
  15174. + "addx\t%%g0, -1, %0"
  15175. + [(set_attr "type" "ialuX")])
  15176. +
  15177. +(define_insn "*minus_sgeu<W:mode>"
  15178. + [(set (match_operand:W 0 "register_operand" "=r")
  15179. + (minus:W (match_operand:W 1 "register_operand" "r")
  15180. + (geu:W (match_operand 2 "icc_register_operand" "X")
  15181. + (const_int 0))))]
  15182. + "GET_MODE (operands[2]) == CCmode || GET_MODE (operands[2]) == CCCmode"
  15183. + "addx\t%1, -1, %0"
  15184. + [(set_attr "type" "ialuX")])
  15185. +
  15186. +(define_insn "*addx<W:mode>"
  15187. + [(set (match_operand:W 0 "register_operand" "=r")
  15188. + (plus:W (plus:W (match_operand:W 1 "register_operand" "%r")
  15189. + (match_operand:W 2 "arith_operand" "rI"))
  15190. + (ltu:W (match_operand 3 "icc_register_operand" "X")
  15191. + (const_int 0))))]
  15192. + "GET_MODE (operands[3]) == CCmode || GET_MODE (operands[3]) == CCCmode"
  15193. + "addx\t%1, %2, %0"
  15194. + [(set_attr "type" "ialuX")])
  15195. +
  15196. +(define_insn "*sltu<W:mode>_insn_vis3"
  15197. + [(set (match_operand:W 0 "register_operand" "=r")
  15198. + (ltu:W (match_operand 1 "icc_register_operand" "X") (const_int 0)))]
  15199. + "TARGET_ARCH64 && TARGET_VIS3
  15200. + && (GET_MODE (operands[1]) == CCXmode || GET_MODE (operands[1]) == CCXCmode)"
  15201. + "addxc\t%%g0, %%g0, %0"
  15202. + [(set_attr "type" "ialuX")])
  15203. +
  15204. +(define_insn "*plus_sltu<W:mode>_vis3"
  15205. + [(set (match_operand:W 0 "register_operand" "=r")
  15206. + (plus:W (ltu:W (match_operand 2 "icc_register_operand" "X")
  15207. + (const_int 0))
  15208. + (match_operand:W 1 "register_operand" "r")))]
  15209. + "TARGET_ARCH64 && TARGET_VIS3
  15210. + && (GET_MODE (operands[2]) == CCXmode || GET_MODE (operands[2]) == CCXCmode)"
  15211. + "addxc\t%%g0, %1, %0"
  15212. + [(set_attr "type" "ialuX")])
  15213. +
  15214. +(define_insn "*plus_plus_sltu<W:mode>_vis3"
  15215. + [(set (match_operand:W 0 "register_operand" "=r")
  15216. + (plus:W (plus:W (ltu:W (match_operand 3 "icc_register_operand" "X")
  15217. + (const_int 0))
  15218. + (match_operand:W 1 "register_operand" "%r"))
  15219. + (match_operand:W 2 "register_operand" "r")))]
  15220. + "TARGET_ARCH64 && TARGET_VIS3
  15221. + && (GET_MODE (operands[3]) == CCXmode || GET_MODE (operands[3]) == CCXCmode)"
  15222. + "addxc\t%1, %2, %0"
  15223. + [(set_attr "type" "ialuX")])
  15224. +
  15225. +(define_insn "*addxc<W:mode>"
  15226. + [(set (match_operand:W 0 "register_operand" "=r")
  15227. + (plus:W (plus:W (match_operand:W 1 "register_operand" "%r")
  15228. + (match_operand:W 2 "register_operand" "r"))
  15229. + (ltu:W (match_operand 3 "icc_register_operand" "X")
  15230. + (const_int 0))))]
  15231. + "TARGET_ARCH64 && TARGET_VIS3
  15232. + && (GET_MODE (operands[3]) == CCXmode || GET_MODE (operands[3]) == CCXCmode)"
  15233. + "addxc\t%1, %2, %0"
  15234. + [(set_attr "type" "ialuX")])
  15235. +
  15236. +(define_insn "*neg_sltu<W:mode>"
  15237. + [(set (match_operand:W 0 "register_operand" "=r")
  15238. + (neg:W (ltu:W (match_operand 1 "icc_register_operand" "X")
  15239. + (const_int 0))))]
  15240. + "GET_MODE (operands[1]) == CCmode || GET_MODE (operands[1]) == CCCmode"
  15241. + "subx\t%%g0, 0, %0"
  15242. + [(set_attr "type" "ialuX")])
  15243. +
  15244. +(define_insn "*neg_sltusidi"
  15245. + [(set (match_operand:DI 0 "register_operand" "=r")
  15246. + (sign_extend:DI (neg:SI (ltu:SI (match_operand 1 "icc_register_operand" "X")
  15247. + (const_int 0)))))]
  15248. + "TARGET_ARCH64
  15249. + && (GET_MODE (operands[1]) == CCmode || GET_MODE (operands[1]) == CCCmode)"
  15250. + "subx\t%%g0, 0, %0"
  15251. + [(set_attr "type" "ialuX")])
  15252. +
  15253. +(define_insn "*minus_neg_sltu<W:mode>"
  15254. + [(set (match_operand:W 0 "register_operand" "=r")
  15255. + (minus:W (neg:W (ltu:W (match_operand 2 "icc_register_operand" "X")
  15256. + (const_int 0)))
  15257. + (match_operand:W 1 "arith_operand" "rI")))]
  15258. + "GET_MODE (operands[2]) == CCmode || GET_MODE (operands[2]) == CCCmode"
  15259. + "subx\t%%g0, %1, %0"
  15260. + [(set_attr "type" "ialuX")])
  15261. +
  15262. +(define_insn "*neg_plus_sltu<W:mode>"
  15263. + [(set (match_operand:W 0 "register_operand" "=r")
  15264. + (neg:W (plus:W (ltu:W (match_operand 2 "icc_register_operand" "X")
  15265. + (const_int 0))
  15266. + (match_operand:W 1 "arith_operand" "rI"))))]
  15267. + "GET_MODE (operands[2]) == CCmode || GET_MODE (operands[2]) == CCCmode"
  15268. + "subx\t%%g0, %1, %0"
  15269. + [(set_attr "type" "ialuX")])
  15270. +
  15271. +(define_insn "*minus_sltu<W:mode>"
  15272. + [(set (match_operand:W 0 "register_operand" "=r")
  15273. + (minus:W (match_operand:W 1 "register_operand" "r")
  15274. + (ltu:W (match_operand 2 "icc_register_operand" "X")
  15275. + (const_int 0))))]
  15276. + "GET_MODE (operands[2]) == CCmode || GET_MODE (operands[2]) == CCCmode"
  15277. + "subx\t%1, 0, %0"
  15278. + [(set_attr "type" "ialuX")])
  15279. +
  15280. +(define_insn "*minus_minus_sltu<W:mode>"
  15281. + [(set (match_operand:W 0 "register_operand" "=r")
  15282. + (minus:W (minus:W (match_operand:W 1 "register_or_zero_operand" "rJ")
  15283. + (ltu:W (match_operand 3 "icc_register_operand" "X")
  15284. + (const_int 0)))
  15285. + (match_operand:W 2 "arith_operand" "rI")))]
  15286. + "GET_MODE (operands[3]) == CCmode || GET_MODE (operands[3]) == CCCmode"
  15287. + "subx\t%r1, %2, %0"
  15288. + [(set_attr "type" "ialuX")])
  15289. +
  15290. +(define_insn "*sgeu<W:mode>_insn"
  15291. + [(set (match_operand:W 0 "register_operand" "=r")
  15292. + (geu:W (match_operand 1 "icc_register_operand" "X") (const_int 0)))]
  15293. + "GET_MODE (operands[1]) == CCmode || GET_MODE (operands[1]) == CCCmode"
  15294. + "subx\t%%g0, -1, %0"
  15295. + [(set_attr "type" "ialuX")])
  15296. +
  15297. +(define_insn "*plus_sgeu<W:mode>"
  15298. + [(set (match_operand:W 0 "register_operand" "=r")
  15299. + (plus:W (geu:W (match_operand 2 "icc_register_operand" "X")
  15300. + (const_int 0))
  15301. + (match_operand:W 1 "register_operand" "r")))]
  15302. + "GET_MODE (operands[2]) == CCmode || GET_MODE (operands[2]) == CCCmode"
  15303. + "subx\t%1, -1, %0"
  15304. + [(set_attr "type" "ialuX")])
  15305. +
  15306. +(define_insn "*subx<W:mode>"
  15307. + [(set (match_operand:W 0 "register_operand" "=r")
  15308. + (minus:W (minus:W (match_operand:W 1 "register_or_zero_operand" "rJ")
  15309. + (match_operand:W 2 "arith_operand" "rI"))
  15310. + (ltu:W (match_operand 3 "icc_register_operand" "X")
  15311. + (const_int 0))))]
  15312. + "GET_MODE (operands[3]) == CCmode || GET_MODE (operands[3]) == CCCmode"
  15313. + "subx\t%r1, %2, %0"
  15314. + [(set_attr "type" "ialuX")])
  15315. +
  15316. +(define_insn "*neg_sltu<W:mode>_subxc"
  15317. + [(set (match_operand:W 0 "register_operand" "=r")
  15318. + (neg:W (ltu:W (match_operand 1 "icc_register_operand" "X")
  15319. + (const_int 0))))]
  15320. + "TARGET_ARCH64 && TARGET_SUBXC
  15321. + && (GET_MODE (operands[1]) == CCXmode || GET_MODE (operands[1]) == CCXCmode)"
  15322. + "subxc\t%%g0, %%g0, %0"
  15323. + [(set_attr "type" "ialuX")])
  15324. +
  15325. +(define_insn "*minus_neg_sltu<W:mode>_subxc"
  15326. + [(set (match_operand:W 0 "register_operand" "=r")
  15327. + (minus:W (neg:W (ltu:W (match_operand 2 "icc_register_operand" "X")
  15328. + (const_int 0)))
  15329. + (match_operand:W 1 "register_operand" "r")))]
  15330. + "TARGET_ARCH64 && TARGET_SUBXC
  15331. + && (GET_MODE (operands[2]) == CCXmode || GET_MODE (operands[2]) == CCXCmode)"
  15332. + "subxc\t%%g0, %1, %0"
  15333. + [(set_attr "type" "ialuX")])
  15334. +
  15335. +(define_insn "*neg_plus_sltu<W:mode>_subxc"
  15336. + [(set (match_operand:W 0 "register_operand" "=r")
  15337. + (neg:W (plus:W (ltu:W (match_operand 2 "icc_register_operand" "X")
  15338. + (const_int 0))
  15339. + (match_operand:W 1 "register_operand" "r"))))]
  15340. + "TARGET_ARCH64 && TARGET_SUBXC
  15341. + && (GET_MODE (operands[2]) == CCXmode || GET_MODE (operands[2]) == CCXCmode)"
  15342. + "subxc\t%%g0, %1, %0"
  15343. + [(set_attr "type" "ialuX")])
  15344. +
  15345. +(define_insn "*minus_sltu<W:mode>_subxc"
  15346. + [(set (match_operand:W 0 "register_operand" "=r")
  15347. + (minus:W (match_operand:W 1 "register_operand" "r")
  15348. + (ltu:W (match_operand 2 "icc_register_operand" "X")
  15349. + (const_int 0))))]
  15350. + "TARGET_ARCH64 && TARGET_SUBXC
  15351. + && (GET_MODE (operands[2]) == CCXmode || GET_MODE (operands[2]) == CCXCmode)"
  15352. + "subxc\t%1, %%g0, %0"
  15353. + [(set_attr "type" "ialuX")])
  15354. +
  15355. +(define_insn "*minus_minus_sltu<W:mode>_subxc"
  15356. + [(set (match_operand:W 0 "register_operand" "=r")
  15357. + (minus:W (minus:W (match_operand:W 1 "register_or_zero_operand" "rJ")
  15358. + (ltu:W (match_operand 3 "icc_register_operand" "X")
  15359. + (const_int 0)))
  15360. + (match_operand:W 2 "register_operand" "r")))]
  15361. + "TARGET_ARCH64 && TARGET_SUBXC
  15362. + && (GET_MODE (operands[3]) == CCXmode || GET_MODE (operands[3]) == CCXCmode)"
  15363. + "subxc\t%r1, %2, %0"
  15364. + [(set_attr "type" "ialuX")])
  15365. +
  15366. +(define_insn "*subxc<W:mode>"
  15367. + [(set (match_operand:W 0 "register_operand" "=r")
  15368. + (minus:W (minus:W (match_operand:W 1 "register_or_zero_operand" "rJ")
  15369. + (match_operand:W 2 "register_operand" "r"))
  15370. + (ltu:W (match_operand 3 "icc_register_operand" "X")
  15371. + (const_int 0))))]
  15372. + "TARGET_ARCH64 && TARGET_SUBXC
  15373. + && (GET_MODE (operands[3]) == CCXmode || GET_MODE (operands[3]) == CCXCmode)"
  15374. + "subxc\t%r1, %2, %0"
  15375. + [(set_attr "type" "ialuX")])
  15376. +
  15377. +(define_split
  15378. + [(set (match_operand:W 0 "register_operand" "")
  15379. + (match_operator:W 1 "icc_comparison_operator"
  15380. + [(match_operand 2 "icc_register_operand" "") (const_int 0)]))]
  15381. + "TARGET_V9
  15382. + /* 64-bit LTU is better implemented using addxc with VIS3. */
  15383. + && !(GET_CODE (operands[1]) == LTU
  15384. + && (GET_MODE (operands[2]) == CCXmode
  15385. + || GET_MODE (operands[2]) == CCXCmode)
  15386. + && TARGET_VIS3)
  15387. + /* 32-bit LTU/GEU are better implemented using addx/subx. */
  15388. + && !((GET_CODE (operands[1]) == LTU || GET_CODE (operands[1]) == GEU)
  15389. + && (GET_MODE (operands[2]) == CCmode
  15390. + || GET_MODE (operands[2]) == CCCmode))"
  15391. + [(set (match_dup 0) (const_int 0))
  15392. + (set (match_dup 0)
  15393. + (if_then_else:SI (match_op_dup:W 1 [(match_dup 2) (const_int 0)])
  15394. + (const_int 1)
  15395. + (match_dup 0)))]
  15396. + "")
  15397. +
  15398. +;; These control RTL generation for conditional jump insns
  15399. +
  15400. +(define_expand "cbranchcc4"
  15401. + [(set (pc)
  15402. + (if_then_else (match_operator 0 "comparison_operator"
  15403. + [(match_operand 1 "compare_operand" "")
  15404. + (match_operand 2 "const_zero_operand" "")])
  15405. + (label_ref (match_operand 3 "" ""))
  15406. + (pc)))]
  15407. + ""
  15408. + "")
  15409. +
  15410. +(define_expand "cbranchsi4"
  15411. + [(use (match_operator 0 "comparison_operator"
  15412. + [(match_operand:SI 1 "compare_operand" "")
  15413. + (match_operand:SI 2 "arith_operand" "")]))
  15414. + (use (match_operand 3 ""))]
  15415. + ""
  15416. +{
  15417. + if (GET_CODE (operands[1]) == ZERO_EXTRACT && operands[2] != const0_rtx)
  15418. + operands[1] = force_reg (SImode, operands[1]);
  15419. + emit_conditional_branch_insn (operands);
  15420. + DONE;
  15421. +})
  15422. +
  15423. +(define_expand "cbranchdi4"
  15424. + [(use (match_operator 0 "comparison_operator"
  15425. + [(match_operand:DI 1 "compare_operand" "")
  15426. + (match_operand:DI 2 "arith_operand" "")]))
  15427. + (use (match_operand 3 ""))]
  15428. + "TARGET_ARCH64"
  15429. +{
  15430. + if (GET_CODE (operands[1]) == ZERO_EXTRACT && operands[2] != const0_rtx)
  15431. + operands[1] = force_reg (DImode, operands[1]);
  15432. + emit_conditional_branch_insn (operands);
  15433. + DONE;
  15434. +})
  15435. +
  15436. +(define_expand "cbranch<F:mode>4"
  15437. + [(use (match_operator 0 "comparison_operator"
  15438. + [(match_operand:F 1 "register_operand" "")
  15439. + (match_operand:F 2 "register_operand" "")]))
  15440. + (use (match_operand 3 ""))]
  15441. + "TARGET_FPU"
  15442. +{
  15443. + emit_conditional_branch_insn (operands);
  15444. + DONE;
  15445. +})
  15446. +
  15447. +
  15448. +;; Now match both normal and inverted jump.
  15449. +
  15450. +;; XXX fpcmp nop braindamage
  15451. +(define_insn "*normal_branch"
  15452. + [(set (pc)
  15453. + (if_then_else (match_operator 0 "icc_comparison_operator"
  15454. + [(reg CC_REG) (const_int 0)])
  15455. + (label_ref (match_operand 1 "" ""))
  15456. + (pc)))]
  15457. + ""
  15458. +{
  15459. + return output_cbranch (operands[0], operands[1], 1, 0,
  15460. + final_sequence && INSN_ANNULLED_BRANCH_P (insn),
  15461. + insn);
  15462. +}
  15463. + [(set_attr "type" "branch")
  15464. + (set_attr "branch_type" "icc")])
  15465. +
  15466. +;; XXX fpcmp nop braindamage
  15467. +(define_insn "*inverted_branch"
  15468. + [(set (pc)
  15469. + (if_then_else (match_operator 0 "icc_comparison_operator"
  15470. + [(reg CC_REG) (const_int 0)])
  15471. + (pc)
  15472. + (label_ref (match_operand 1 "" ""))))]
  15473. + ""
  15474. +{
  15475. + return output_cbranch (operands[0], operands[1], 1, 1,
  15476. + final_sequence && INSN_ANNULLED_BRANCH_P (insn),
  15477. + insn);
  15478. +}
  15479. + [(set_attr "type" "branch")
  15480. + (set_attr "branch_type" "icc")])
  15481. +
  15482. +;; XXX fpcmp nop braindamage
  15483. +(define_insn "*normal_fp_branch"
  15484. + [(set (pc)
  15485. + (if_then_else (match_operator 1 "comparison_operator"
  15486. + [(match_operand:CCFP 0 "fcc_register_operand" "c")
  15487. + (const_int 0)])
  15488. + (label_ref (match_operand 2 "" ""))
  15489. + (pc)))]
  15490. + ""
  15491. +{
  15492. + return output_cbranch (operands[1], operands[2], 2, 0,
  15493. + final_sequence && INSN_ANNULLED_BRANCH_P (insn),
  15494. + insn);
  15495. +}
  15496. + [(set_attr "type" "branch")
  15497. + (set_attr "branch_type" "fcc")])
  15498. +
  15499. +;; XXX fpcmp nop braindamage
  15500. +(define_insn "*inverted_fp_branch"
  15501. + [(set (pc)
  15502. + (if_then_else (match_operator 1 "comparison_operator"
  15503. + [(match_operand:CCFP 0 "fcc_register_operand" "c")
  15504. + (const_int 0)])
  15505. + (pc)
  15506. + (label_ref (match_operand 2 "" ""))))]
  15507. + ""
  15508. +{
  15509. + return output_cbranch (operands[1], operands[2], 2, 1,
  15510. + final_sequence && INSN_ANNULLED_BRANCH_P (insn),
  15511. + insn);
  15512. +}
  15513. + [(set_attr "type" "branch")
  15514. + (set_attr "branch_type" "fcc")])
  15515. +
  15516. +;; XXX fpcmp nop braindamage
  15517. +(define_insn "*normal_fpe_branch"
  15518. + [(set (pc)
  15519. + (if_then_else (match_operator 1 "comparison_operator"
  15520. + [(match_operand:CCFPE 0 "fcc_register_operand" "c")
  15521. + (const_int 0)])
  15522. + (label_ref (match_operand 2 "" ""))
  15523. + (pc)))]
  15524. + ""
  15525. +{
  15526. + return output_cbranch (operands[1], operands[2], 2, 0,
  15527. + final_sequence && INSN_ANNULLED_BRANCH_P (insn),
  15528. + insn);
  15529. +}
  15530. + [(set_attr "type" "branch")
  15531. + (set_attr "branch_type" "fcc")])
  15532. +
  15533. +;; XXX fpcmp nop braindamage
  15534. +(define_insn "*inverted_fpe_branch"
  15535. + [(set (pc)
  15536. + (if_then_else (match_operator 1 "comparison_operator"
  15537. + [(match_operand:CCFPE 0 "fcc_register_operand" "c")
  15538. + (const_int 0)])
  15539. + (pc)
  15540. + (label_ref (match_operand 2 "" ""))))]
  15541. + ""
  15542. +{
  15543. + return output_cbranch (operands[1], operands[2], 2, 1,
  15544. + final_sequence && INSN_ANNULLED_BRANCH_P (insn),
  15545. + insn);
  15546. +}
  15547. + [(set_attr "type" "branch")
  15548. + (set_attr "branch_type" "fcc")])
  15549. +
  15550. +;; SPARC V9-specific jump insns. None of these are guaranteed to be
  15551. +;; in the architecture.
  15552. +
  15553. +(define_insn "*cbcond_sp32"
  15554. + [(set (pc)
  15555. + (if_then_else (match_operator 0 "comparison_operator"
  15556. + [(match_operand:SI 1 "register_operand" "r")
  15557. + (match_operand:SI 2 "arith5_operand" "rA")])
  15558. + (label_ref (match_operand 3 "" ""))
  15559. + (pc)))]
  15560. + "TARGET_CBCOND"
  15561. +{
  15562. + return output_cbcond (operands[0], operands[3], insn);
  15563. +}
  15564. + [(set_attr "type" "cbcond")])
  15565. +
  15566. +(define_insn "*cbcond_sp64"
  15567. + [(set (pc)
  15568. + (if_then_else (match_operator 0 "comparison_operator"
  15569. + [(match_operand:DI 1 "register_operand" "r")
  15570. + (match_operand:DI 2 "arith5_operand" "rA")])
  15571. + (label_ref (match_operand 3 "" ""))
  15572. + (pc)))]
  15573. + "TARGET_ARCH64 && TARGET_CBCOND"
  15574. +{
  15575. + return output_cbcond (operands[0], operands[3], insn);
  15576. +}
  15577. + [(set_attr "type" "cbcond")])
  15578. +
  15579. +;; There are no 32-bit brreg insns.
  15580. +
  15581. +(define_insn "*normal_int_branch_sp64"
  15582. + [(set (pc)
  15583. + (if_then_else (match_operator 0 "v9_register_comparison_operator"
  15584. + [(match_operand:DI 1 "register_operand" "r")
  15585. + (const_int 0)])
  15586. + (label_ref (match_operand 2 "" ""))
  15587. + (pc)))]
  15588. + "TARGET_ARCH64"
  15589. +{
  15590. + return output_v9branch (operands[0], operands[2], 1, 2, 0,
  15591. + final_sequence && INSN_ANNULLED_BRANCH_P (insn),
  15592. + insn);
  15593. +}
  15594. + [(set_attr "type" "branch")
  15595. + (set_attr "branch_type" "reg")])
  15596. +
  15597. +(define_insn "*inverted_int_branch_sp64"
  15598. + [(set (pc)
  15599. + (if_then_else (match_operator 0 "v9_register_comparison_operator"
  15600. + [(match_operand:DI 1 "register_operand" "r")
  15601. + (const_int 0)])
  15602. + (pc)
  15603. + (label_ref (match_operand 2 "" ""))))]
  15604. + "TARGET_ARCH64"
  15605. +{
  15606. + return output_v9branch (operands[0], operands[2], 1, 2, 1,
  15607. + final_sequence && INSN_ANNULLED_BRANCH_P (insn),
  15608. + insn);
  15609. +}
  15610. + [(set_attr "type" "branch")
  15611. + (set_attr "branch_type" "reg")])
  15612. +
  15613. +
  15614. +;; Load in operand 0 the (absolute) address of operand 1, which is a symbolic
  15615. +;; value subject to a PC-relative relocation. Operand 2 is a helper function
  15616. +;; that adds the PC value at the call point to register #(operand 3).
  15617. +;;
  15618. +;; Even on V9 we use this call sequence with a stub, instead of "rd %pc, ..."
  15619. +;; because the RDPC instruction is extremely expensive and incurs a complete
  15620. +;; instruction pipeline flush.
  15621. +
  15622. +(define_insn "load_pcrel_sym<P:mode>"
  15623. + [(set (match_operand:P 0 "register_operand" "=r")
  15624. + (unspec:P [(match_operand:P 1 "symbolic_operand" "")
  15625. + (match_operand:P 2 "call_address_operand" "")
  15626. + (match_operand:P 3 "const_int_operand" "")]
  15627. + UNSPEC_LOAD_PCREL_SYM))
  15628. + (clobber (reg:P O7_REG))]
  15629. + "REGNO (operands[0]) == INTVAL (operands[3])"
  15630. +{
  15631. + return output_load_pcrel_sym (operands);
  15632. +}
  15633. + [(set (attr "type") (const_string "multi"))
  15634. + (set (attr "length")
  15635. + (if_then_else (eq_attr "delayed_branch" "true")
  15636. + (const_int 3)
  15637. + (const_int 4)))])
  15638. +
  15639. +
  15640. +;; Integer move instructions
  15641. +
  15642. +(define_expand "movqi"
  15643. + [(set (match_operand:QI 0 "nonimmediate_operand" "")
  15644. + (match_operand:QI 1 "general_operand" ""))]
  15645. + ""
  15646. +{
  15647. + if (sparc_expand_move (QImode, operands))
  15648. + DONE;
  15649. +})
  15650. +
  15651. +(define_insn "*movqi_insn"
  15652. + [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,m")
  15653. + (match_operand:QI 1 "input_operand" "rI,m,rJ"))]
  15654. + "(register_operand (operands[0], QImode)
  15655. + || register_or_zero_operand (operands[1], QImode))"
  15656. + "@
  15657. + mov\t%1, %0
  15658. + ldub\t%1, %0
  15659. + stb\t%r1, %0"
  15660. + [(set_attr "type" "*,load,store")
  15661. + (set_attr "subtype" "*,regular,*")
  15662. + (set_attr "us3load_type" "*,3cycle,*")])
  15663. +
  15664. +(define_expand "movhi"
  15665. + [(set (match_operand:HI 0 "nonimmediate_operand" "")
  15666. + (match_operand:HI 1 "general_operand" ""))]
  15667. + ""
  15668. +{
  15669. + if (sparc_expand_move (HImode, operands))
  15670. + DONE;
  15671. +})
  15672. +
  15673. +(define_insn "*movhi_insn"
  15674. + [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,r,m")
  15675. + (match_operand:HI 1 "input_operand" "rI,K,m,rJ"))]
  15676. + "(register_operand (operands[0], HImode)
  15677. + || register_or_zero_operand (operands[1], HImode))"
  15678. + "@
  15679. + mov\t%1, %0
  15680. + sethi\t%%hi(%a1), %0
  15681. + lduh\t%1, %0
  15682. + sth\t%r1, %0"
  15683. + [(set_attr "type" "*,*,load,store")
  15684. + (set_attr "subtype" "*,*,regular,*")
  15685. + (set_attr "us3load_type" "*,*,3cycle,*")])
  15686. +
  15687. +;; We always work with constants here.
  15688. +(define_insn "*movhi_lo_sum"
  15689. + [(set (match_operand:HI 0 "register_operand" "=r")
  15690. + (ior:HI (match_operand:HI 1 "register_operand" "%r")
  15691. + (match_operand:HI 2 "small_int_operand" "I")))]
  15692. + ""
  15693. + "or\t%1, %2, %0")
  15694. +
  15695. +(define_expand "movsi"
  15696. + [(set (match_operand:SI 0 "nonimmediate_operand" "")
  15697. + (match_operand:SI 1 "general_operand" ""))]
  15698. + ""
  15699. +{
  15700. + if (sparc_expand_move (SImode, operands))
  15701. + DONE;
  15702. +})
  15703. +
  15704. +(define_insn "*movsi_insn"
  15705. + [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r, m, r,*f,?*f,?*f, m,d,d")
  15706. + (match_operand:SI 1 "input_operand" "rI,K,m,rJ,*f, r, f, m,?*f,J,P"))]
  15707. + "register_operand (operands[0], SImode)
  15708. + || register_or_zero_or_all_ones_operand (operands[1], SImode)"
  15709. + "@
  15710. + mov\t%1, %0
  15711. + sethi\t%%hi(%a1), %0
  15712. + ld\t%1, %0
  15713. + st\t%r1, %0
  15714. + movstouw\t%1, %0
  15715. + movwtos\t%1, %0
  15716. + fmovs\t%1, %0
  15717. + ld\t%1, %0
  15718. + st\t%1, %0
  15719. + fzeros\t%0
  15720. + fones\t%0"
  15721. + [(set_attr "type" "*,*,load,store,vismv,vismv,fpmove,fpload,fpstore,visl,visl")
  15722. + (set_attr "subtype" "*,*,regular,*,movstouw,single,*,*,*,single,single")
  15723. + (set_attr "cpu_feature" "*,*,*,*,vis3,vis3,*,*,*,vis,vis")])
  15724. +
  15725. +(define_insn "*movsi_lo_sum"
  15726. + [(set (match_operand:SI 0 "register_operand" "=r")
  15727. + (lo_sum:SI (match_operand:SI 1 "register_operand" "r")
  15728. + (match_operand:SI 2 "immediate_operand" "in")))]
  15729. + "!flag_pic"
  15730. + "or\t%1, %%lo(%a2), %0")
  15731. +
  15732. +(define_insn "*movsi_high"
  15733. + [(set (match_operand:SI 0 "register_operand" "=r")
  15734. + (high:SI (match_operand:SI 1 "immediate_operand" "in")))]
  15735. + "!flag_pic"
  15736. + "sethi\t%%hi(%a1), %0")
  15737. +
  15738. +;; The next two patterns must wrap the SYMBOL_REF in an UNSPEC
  15739. +;; so that CSE won't optimize the address computation away.
  15740. +(define_insn "movsi_lo_sum_pic"
  15741. + [(set (match_operand:SI 0 "register_operand" "=r")
  15742. + (lo_sum:SI (match_operand:SI 1 "register_operand" "r")
  15743. + (unspec:SI [(match_operand:SI 2 "immediate_operand" "in")]
  15744. + UNSPEC_MOVE_PIC)))]
  15745. + "flag_pic"
  15746. +{
  15747. +#ifdef HAVE_AS_SPARC_GOTDATA_OP
  15748. + return "xor\t%1, %%gdop_lox10(%a2), %0";
  15749. +#else
  15750. + return "or\t%1, %%lo(%a2), %0";
  15751. +#endif
  15752. +})
  15753. +
  15754. +(define_insn "movsi_high_pic"
  15755. + [(set (match_operand:SI 0 "register_operand" "=r")
  15756. + (high:SI (unspec:SI [(match_operand 1 "" "")] UNSPEC_MOVE_PIC)))]
  15757. + "flag_pic && check_pic (1)"
  15758. +{
  15759. +#ifdef HAVE_AS_SPARC_GOTDATA_OP
  15760. + return "sethi\t%%gdop_hix22(%a1), %0";
  15761. +#else
  15762. + return "sethi\t%%hi(%a1), %0";
  15763. +#endif
  15764. +})
  15765. +
  15766. +(define_insn "movsi_pic_gotdata_op"
  15767. + [(set (match_operand:SI 0 "register_operand" "=r")
  15768. + (unspec:SI [(match_operand:SI 1 "register_operand" "r")
  15769. + (match_operand:SI 2 "register_operand" "r")
  15770. + (match_operand 3 "symbolic_operand" "")]
  15771. + UNSPEC_MOVE_GOTDATA))]
  15772. + "flag_pic && check_pic (1)"
  15773. +{
  15774. +#ifdef HAVE_AS_SPARC_GOTDATA_OP
  15775. + return "ld\t[%1 + %2], %0, %%gdop(%a3)";
  15776. +#else
  15777. + return "ld\t[%1 + %2], %0";
  15778. +#endif
  15779. +}
  15780. + [(set_attr "type" "load")
  15781. + (set_attr "subtype" "regular")])
  15782. +
  15783. +(define_expand "movsi_pic_label_ref"
  15784. + [(set (match_dup 3) (high:SI
  15785. + (unspec:SI [(match_operand:SI 1 "symbolic_operand" "")
  15786. + (match_dup 2)] UNSPEC_MOVE_PIC_LABEL)))
  15787. + (set (match_dup 4) (lo_sum:SI (match_dup 3)
  15788. + (unspec:SI [(match_dup 1) (match_dup 2)] UNSPEC_MOVE_PIC_LABEL)))
  15789. + (set (match_operand:SI 0 "register_operand" "=r")
  15790. + (minus:SI (match_dup 5) (match_dup 4)))]
  15791. + "flag_pic"
  15792. +{
  15793. + crtl->uses_pic_offset_table = 1;
  15794. + operands[2] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
  15795. + if (!can_create_pseudo_p ())
  15796. + {
  15797. + operands[3] = operands[0];
  15798. + operands[4] = operands[0];
  15799. + }
  15800. + else
  15801. + {
  15802. + operands[3] = gen_reg_rtx (SImode);
  15803. + operands[4] = gen_reg_rtx (SImode);
  15804. + }
  15805. + operands[5] = pic_offset_table_rtx;
  15806. +})
  15807. +
  15808. +(define_insn "*movsi_high_pic_label_ref"
  15809. + [(set (match_operand:SI 0 "register_operand" "=r")
  15810. + (high:SI
  15811. + (unspec:SI [(match_operand:SI 1 "symbolic_operand" "")
  15812. + (match_operand:SI 2 "" "")] UNSPEC_MOVE_PIC_LABEL)))]
  15813. + "flag_pic"
  15814. + "sethi\t%%hi(%a2-(%a1-.)), %0")
  15815. +
  15816. +(define_insn "*movsi_lo_sum_pic_label_ref"
  15817. + [(set (match_operand:SI 0 "register_operand" "=r")
  15818. + (lo_sum:SI (match_operand:SI 1 "register_operand" "r")
  15819. + (unspec:SI [(match_operand:SI 2 "symbolic_operand" "")
  15820. + (match_operand:SI 3 "" "")] UNSPEC_MOVE_PIC_LABEL)))]
  15821. + "flag_pic"
  15822. + "or\t%1, %%lo(%a3-(%a2-.)), %0")
  15823. +
  15824. +;; Set up the PIC register for VxWorks.
  15825. +
  15826. +(define_expand "vxworks_load_got"
  15827. + [(set (match_dup 0)
  15828. + (high:SI (match_dup 1)))
  15829. + (set (match_dup 0)
  15830. + (mem:SI (lo_sum:SI (match_dup 0) (match_dup 1))))
  15831. + (set (match_dup 0)
  15832. + (mem:SI (lo_sum:SI (match_dup 0) (match_dup 2))))]
  15833. + "TARGET_VXWORKS_RTP"
  15834. +{
  15835. + operands[0] = pic_offset_table_rtx;
  15836. + operands[1] = gen_rtx_SYMBOL_REF (SImode, VXWORKS_GOTT_BASE);
  15837. + operands[2] = gen_rtx_SYMBOL_REF (SImode, VXWORKS_GOTT_INDEX);
  15838. +})
  15839. +
  15840. +(define_expand "movdi"
  15841. + [(set (match_operand:DI 0 "nonimmediate_operand" "")
  15842. + (match_operand:DI 1 "general_operand" ""))]
  15843. + ""
  15844. +{
  15845. + if (sparc_expand_move (DImode, operands))
  15846. + DONE;
  15847. +})
  15848. +
  15849. +;; Be careful, fmovd does not exist when !v9.
  15850. +;; We match MEM moves directly when we have correct even
  15851. +;; numbered registers, but fall into splits otherwise.
  15852. +;; The constraint ordering here is really important to
  15853. +;; avoid insane problems in reload, especially for patterns
  15854. +;; of the form:
  15855. +;;
  15856. +;; (set (mem:DI (plus:SI (reg:SI 30 %fp)
  15857. +;; (const_int -5016)))
  15858. +;; (reg:DI 2 %g2))
  15859. +;;
  15860. +
  15861. +(define_insn "*movdi_insn_sp32"
  15862. + [(set (match_operand:DI 0 "nonimmediate_operand"
  15863. + "=T,o,U,T,r,o,r,r,?*f, T,?*f, o,?*e,?*e, r,?*f,?*e, T,*b,*b")
  15864. + (match_operand:DI 1 "input_operand"
  15865. + " J,J,T,U,o,r,i,r, T,?*f, o,?*f, *e, *e,?*f, r, T,?*e, J, P"))]
  15866. + "TARGET_ARCH32
  15867. + && (register_operand (operands[0], DImode)
  15868. + || register_or_zero_operand (operands[1], DImode))"
  15869. + "@
  15870. + stx\t%r1, %0
  15871. + #
  15872. + ldd\t%1, %0
  15873. + std\t%1, %0
  15874. + ldd\t%1, %0
  15875. + std\t%1, %0
  15876. + #
  15877. + #
  15878. + ldd\t%1, %0
  15879. + std\t%1, %0
  15880. + #
  15881. + #
  15882. + fmovd\t%1, %0
  15883. + #
  15884. + #
  15885. + #
  15886. + ldd\t%1, %0
  15887. + std\t%1, %0
  15888. + fzero\t%0
  15889. + fone\t%0"
  15890. + [(set_attr "type" "store,*,load,store,load,store,*,*,fpload,fpstore,*,*,fpmove,*,*,*,fpload,fpstore,visl,
  15891. +visl")
  15892. + (set_attr "subtype" "*,*,regular,*,regular,*,*,*,*,*,*,*,*,*,*,*,*,*,double,double")
  15893. + (set_attr "length" "*,2,*,*,*,*,2,2,*,*,2,2,*,2,2,2,*,*,*,*")
  15894. + (set_attr "fptype" "*,*,*,*,*,*,*,*,*,*,*,*,double,*,*,*,*,*,double,double")
  15895. + (set_attr "cpu_feature" "v9,*,*,*,*,*,*,*,fpu,fpu,fpu,fpu,v9,fpunotv9,vis3,vis3,fpu,fpu,vis,vis")
  15896. + (set_attr "lra" "*,*,disabled,disabled,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*")])
  15897. +
  15898. +(define_insn "*movdi_insn_sp64"
  15899. + [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r, m, r,*e,?*e,?*e, W,b,b")
  15900. + (match_operand:DI 1 "input_operand" "rI,N,m,rJ,*e, r, *e, W,?*e,J,P"))]
  15901. + "TARGET_ARCH64
  15902. + && (register_operand (operands[0], DImode)
  15903. + || register_or_zero_or_all_ones_operand (operands[1], DImode))"
  15904. + "@
  15905. + mov\t%1, %0
  15906. + sethi\t%%hi(%a1), %0
  15907. + ldx\t%1, %0
  15908. + stx\t%r1, %0
  15909. + movdtox\t%1, %0
  15910. + movxtod\t%1, %0
  15911. + fmovd\t%1, %0
  15912. + ldd\t%1, %0
  15913. + std\t%1, %0
  15914. + fzero\t%0
  15915. + fone\t%0"
  15916. + [(set_attr "type" "*,*,load,store,vismv,vismv,fpmove,fpload,fpstore,visl,visl")
  15917. + (set_attr "subtype" "*,*,regular,*,movdtox,movxtod,*,*,*,double,double")
  15918. + (set_attr "fptype" "*,*,*,*,*,*,double,*,*,double,double")
  15919. + (set_attr "cpu_feature" "*,*,*,*,vis3,vis3,*,*,*,vis,vis")])
  15920. +
  15921. +(define_expand "movdi_pic_label_ref"
  15922. + [(set (match_dup 3) (high:DI
  15923. + (unspec:DI [(match_operand:DI 1 "symbolic_operand" "")
  15924. + (match_dup 2)] UNSPEC_MOVE_PIC_LABEL)))
  15925. + (set (match_dup 4) (lo_sum:DI (match_dup 3)
  15926. + (unspec:DI [(match_dup 1) (match_dup 2)] UNSPEC_MOVE_PIC_LABEL)))
  15927. + (set (match_operand:DI 0 "register_operand" "=r")
  15928. + (minus:DI (match_dup 5) (match_dup 4)))]
  15929. + "TARGET_ARCH64 && flag_pic"
  15930. +{
  15931. + crtl->uses_pic_offset_table = 1;
  15932. + operands[2] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
  15933. + if (!can_create_pseudo_p ())
  15934. + {
  15935. + operands[3] = operands[0];
  15936. + operands[4] = operands[0];
  15937. + }
  15938. + else
  15939. + {
  15940. + operands[3] = gen_reg_rtx (DImode);
  15941. + operands[4] = gen_reg_rtx (DImode);
  15942. + }
  15943. + operands[5] = pic_offset_table_rtx;
  15944. +})
  15945. +
  15946. +(define_insn "*movdi_high_pic_label_ref"
  15947. + [(set (match_operand:DI 0 "register_operand" "=r")
  15948. + (high:DI
  15949. + (unspec:DI [(match_operand:DI 1 "symbolic_operand" "")
  15950. + (match_operand:DI 2 "" "")] UNSPEC_MOVE_PIC_LABEL)))]
  15951. + "TARGET_ARCH64 && flag_pic"
  15952. + "sethi\t%%hi(%a2-(%a1-.)), %0")
  15953. +
  15954. +(define_insn "*movdi_lo_sum_pic_label_ref"
  15955. + [(set (match_operand:DI 0 "register_operand" "=r")
  15956. + (lo_sum:DI (match_operand:DI 1 "register_operand" "r")
  15957. + (unspec:DI [(match_operand:DI 2 "symbolic_operand" "")
  15958. + (match_operand:DI 3 "" "")] UNSPEC_MOVE_PIC_LABEL)))]
  15959. + "TARGET_ARCH64 && flag_pic"
  15960. + "or\t%1, %%lo(%a3-(%a2-.)), %0")
  15961. +
  15962. +;; SPARC-v9 code model support insns. See sparc_emit_set_symbolic_const64
  15963. +;; in sparc.c to see what is going on here... PIC stuff comes first.
  15964. +
  15965. +(define_insn "movdi_lo_sum_pic"
  15966. + [(set (match_operand:DI 0 "register_operand" "=r")
  15967. + (lo_sum:DI (match_operand:DI 1 "register_operand" "r")
  15968. + (unspec:DI [(match_operand:DI 2 "immediate_operand" "in")]
  15969. + UNSPEC_MOVE_PIC)))]
  15970. + "TARGET_ARCH64 && flag_pic"
  15971. +{
  15972. +#ifdef HAVE_AS_SPARC_GOTDATA_OP
  15973. + return "xor\t%1, %%gdop_lox10(%a2), %0";
  15974. +#else
  15975. + return "or\t%1, %%lo(%a2), %0";
  15976. +#endif
  15977. +})
  15978. +
  15979. +(define_insn "movdi_high_pic"
  15980. + [(set (match_operand:DI 0 "register_operand" "=r")
  15981. + (high:DI (unspec:DI [(match_operand 1 "" "")] UNSPEC_MOVE_PIC)))]
  15982. + "TARGET_ARCH64 && flag_pic && check_pic (1)"
  15983. +{
  15984. +#ifdef HAVE_AS_SPARC_GOTDATA_OP
  15985. + return "sethi\t%%gdop_hix22(%a1), %0";
  15986. +#else
  15987. + return "sethi\t%%hi(%a1), %0";
  15988. +#endif
  15989. +})
  15990. +
  15991. +(define_insn "movdi_pic_gotdata_op"
  15992. + [(set (match_operand:DI 0 "register_operand" "=r")
  15993. + (unspec:DI [(match_operand:DI 1 "register_operand" "r")
  15994. + (match_operand:DI 2 "register_operand" "r")
  15995. + (match_operand 3 "symbolic_operand" "")]
  15996. + UNSPEC_MOVE_GOTDATA))]
  15997. + "TARGET_ARCH64 && flag_pic && check_pic (1)"
  15998. +{
  15999. +#ifdef HAVE_AS_SPARC_GOTDATA_OP
  16000. + return "ldx\t[%1 + %2], %0, %%gdop(%a3)";
  16001. +#else
  16002. + return "ldx\t[%1 + %2], %0";
  16003. +#endif
  16004. +}
  16005. + [(set_attr "type" "load")
  16006. + (set_attr "subtype" "regular")])
  16007. +
  16008. +(define_insn "*sethi_di_medlow_embmedany_pic"
  16009. + [(set (match_operand:DI 0 "register_operand" "=r")
  16010. + (high:DI (match_operand:DI 1 "medium_pic_operand" "")))]
  16011. + "(TARGET_CM_MEDLOW || TARGET_CM_EMBMEDANY) && flag_pic && check_pic (1)"
  16012. + "sethi\t%%hi(%a1), %0")
  16013. +
  16014. +(define_insn "*sethi_di_medlow"
  16015. + [(set (match_operand:DI 0 "register_operand" "=r")
  16016. + (high:DI (match_operand:DI 1 "symbolic_operand" "")))]
  16017. + "TARGET_CM_MEDLOW && !flag_pic"
  16018. + "sethi\t%%hi(%a1), %0")
  16019. +
  16020. +(define_insn "*losum_di_medlow"
  16021. + [(set (match_operand:DI 0 "register_operand" "=r")
  16022. + (lo_sum:DI (match_operand:DI 1 "register_operand" "r")
  16023. + (match_operand:DI 2 "symbolic_operand" "")))]
  16024. + "TARGET_CM_MEDLOW && !flag_pic"
  16025. + "or\t%1, %%lo(%a2), %0")
  16026. +
  16027. +(define_insn "seth44"
  16028. + [(set (match_operand:DI 0 "register_operand" "=r")
  16029. + (high:DI (unspec:DI [(match_operand:DI 1 "symbolic_operand" "")]
  16030. + UNSPEC_SETH44)))]
  16031. + "TARGET_CM_MEDMID && !flag_pic"
  16032. + "sethi\t%%h44(%a1), %0")
  16033. +
  16034. +(define_insn "setm44"
  16035. + [(set (match_operand:DI 0 "register_operand" "=r")
  16036. + (lo_sum:DI (match_operand:DI 1 "register_operand" "r")
  16037. + (unspec:DI [(match_operand:DI 2 "symbolic_operand" "")]
  16038. + UNSPEC_SETM44)))]
  16039. + "TARGET_CM_MEDMID && !flag_pic"
  16040. + "or\t%1, %%m44(%a2), %0")
  16041. +
  16042. +(define_insn "setl44"
  16043. + [(set (match_operand:DI 0 "register_operand" "=r")
  16044. + (lo_sum:DI (match_operand:DI 1 "register_operand" "r")
  16045. + (match_operand:DI 2 "symbolic_operand" "")))]
  16046. + "TARGET_CM_MEDMID && !flag_pic"
  16047. + "or\t%1, %%l44(%a2), %0")
  16048. +
  16049. +(define_insn "sethh"
  16050. + [(set (match_operand:DI 0 "register_operand" "=r")
  16051. + (high:DI (unspec:DI [(match_operand:DI 1 "symbolic_operand" "")]
  16052. + UNSPEC_SETHH)))]
  16053. + "TARGET_CM_MEDANY && !flag_pic"
  16054. + "sethi\t%%hh(%a1), %0")
  16055. +
  16056. +(define_insn "setlm"
  16057. + [(set (match_operand:DI 0 "register_operand" "=r")
  16058. + (high:DI (unspec:DI [(match_operand:DI 1 "symbolic_operand" "")]
  16059. + UNSPEC_SETLM)))]
  16060. + "TARGET_CM_MEDANY && !flag_pic"
  16061. + "sethi\t%%lm(%a1), %0")
  16062. +
  16063. +(define_insn "sethm"
  16064. + [(set (match_operand:DI 0 "register_operand" "=r")
  16065. + (lo_sum:DI (match_operand:DI 1 "register_operand" "r")
  16066. + (unspec:DI [(match_operand:DI 2 "symbolic_operand" "")]
  16067. + UNSPEC_EMB_SETHM)))]
  16068. + "TARGET_CM_MEDANY && !flag_pic"
  16069. + "or\t%1, %%hm(%a2), %0")
  16070. +
  16071. +(define_insn "setlo"
  16072. + [(set (match_operand:DI 0 "register_operand" "=r")
  16073. + (lo_sum:DI (match_operand:DI 1 "register_operand" "r")
  16074. + (match_operand:DI 2 "symbolic_operand" "")))]
  16075. + "TARGET_CM_MEDANY && !flag_pic"
  16076. + "or\t%1, %%lo(%a2), %0")
  16077. +
  16078. +(define_insn "embmedany_sethi"
  16079. + [(set (match_operand:DI 0 "register_operand" "=r")
  16080. + (high:DI (unspec:DI [(match_operand:DI 1 "data_segment_operand" "")]
  16081. + UNSPEC_EMB_HISUM)))]
  16082. + "TARGET_CM_EMBMEDANY && !flag_pic"
  16083. + "sethi\t%%hi(%a1), %0")
  16084. +
  16085. +(define_insn "embmedany_losum"
  16086. + [(set (match_operand:DI 0 "register_operand" "=r")
  16087. + (lo_sum:DI (match_operand:DI 1 "register_operand" "r")
  16088. + (match_operand:DI 2 "data_segment_operand" "")))]
  16089. + "TARGET_CM_EMBMEDANY && !flag_pic"
  16090. + "add\t%1, %%lo(%a2), %0")
  16091. +
  16092. +(define_insn "embmedany_brsum"
  16093. + [(set (match_operand:DI 0 "register_operand" "=r")
  16094. + (unspec:DI [(match_operand:DI 1 "register_operand" "r")]
  16095. + UNSPEC_EMB_HISUM))]
  16096. + "TARGET_CM_EMBMEDANY && !flag_pic"
  16097. + "add\t%1, %_, %0")
  16098. +
  16099. +(define_insn "embmedany_textuhi"
  16100. + [(set (match_operand:DI 0 "register_operand" "=r")
  16101. + (high:DI (unspec:DI [(match_operand:DI 1 "text_segment_operand" "")]
  16102. + UNSPEC_EMB_TEXTUHI)))]
  16103. + "TARGET_CM_EMBMEDANY && !flag_pic"
  16104. + "sethi\t%%uhi(%a1), %0")
  16105. +
  16106. +(define_insn "embmedany_texthi"
  16107. + [(set (match_operand:DI 0 "register_operand" "=r")
  16108. + (high:DI (unspec:DI [(match_operand:DI 1 "text_segment_operand" "")]
  16109. + UNSPEC_EMB_TEXTHI)))]
  16110. + "TARGET_CM_EMBMEDANY && !flag_pic"
  16111. + "sethi\t%%hi(%a1), %0")
  16112. +
  16113. +(define_insn "embmedany_textulo"
  16114. + [(set (match_operand:DI 0 "register_operand" "=r")
  16115. + (lo_sum:DI (match_operand:DI 1 "register_operand" "r")
  16116. + (unspec:DI [(match_operand:DI 2 "text_segment_operand" "")]
  16117. + UNSPEC_EMB_TEXTULO)))]
  16118. + "TARGET_CM_EMBMEDANY && !flag_pic"
  16119. + "or\t%1, %%ulo(%a2), %0")
  16120. +
  16121. +(define_insn "embmedany_textlo"
  16122. + [(set (match_operand:DI 0 "register_operand" "=r")
  16123. + (lo_sum:DI (match_operand:DI 1 "register_operand" "r")
  16124. + (match_operand:DI 2 "text_segment_operand" "")))]
  16125. + "TARGET_CM_EMBMEDANY && !flag_pic"
  16126. + "or\t%1, %%lo(%a2), %0")
  16127. +
  16128. +;; Now some patterns to help reload out a bit.
  16129. +(define_expand "reload_indi"
  16130. + [(parallel [(match_operand:DI 0 "register_operand" "=r")
  16131. + (match_operand:DI 1 "immediate_operand" "")
  16132. + (match_operand:TI 2 "register_operand" "=&r")])]
  16133. + "(TARGET_CM_MEDANY || TARGET_CM_EMBMEDANY) && !flag_pic"
  16134. +{
  16135. + sparc_emit_set_symbolic_const64 (operands[0], operands[1], operands[2]);
  16136. + DONE;
  16137. +})
  16138. +
  16139. +(define_expand "reload_outdi"
  16140. + [(parallel [(match_operand:DI 0 "register_operand" "=r")
  16141. + (match_operand:DI 1 "immediate_operand" "")
  16142. + (match_operand:TI 2 "register_operand" "=&r")])]
  16143. + "(TARGET_CM_MEDANY || TARGET_CM_EMBMEDANY) && !flag_pic"
  16144. +{
  16145. + sparc_emit_set_symbolic_const64 (operands[0], operands[1], operands[2]);
  16146. + DONE;
  16147. +})
  16148. +
  16149. +;; Split up putting CONSTs and REGs into DI regs when !arch64
  16150. +(define_split
  16151. + [(set (match_operand:DI 0 "register_operand" "")
  16152. + (match_operand:DI 1 "const_int_operand" ""))]
  16153. + "reload_completed
  16154. + && TARGET_ARCH32
  16155. + && ((GET_CODE (operands[0]) == REG
  16156. + && SPARC_INT_REG_P (REGNO (operands[0])))
  16157. + || (GET_CODE (operands[0]) == SUBREG
  16158. + && GET_CODE (SUBREG_REG (operands[0])) == REG
  16159. + && SPARC_INT_REG_P (REGNO (SUBREG_REG (operands[0])))))"
  16160. + [(clobber (const_int 0))]
  16161. +{
  16162. + HOST_WIDE_INT low = trunc_int_for_mode (INTVAL (operands[1]), SImode);
  16163. + HOST_WIDE_INT high = trunc_int_for_mode (INTVAL (operands[1]) >> 32, SImode);
  16164. + rtx high_part = gen_highpart (SImode, operands[0]);
  16165. + rtx low_part = gen_lowpart (SImode, operands[0]);
  16166. +
  16167. + emit_move_insn_1 (high_part, GEN_INT (high));
  16168. +
  16169. + /* Slick... but this loses if the constant can be done in one insn. */
  16170. + if (low == high && !SPARC_SETHI32_P (high) && !SPARC_SIMM13_P (high))
  16171. + emit_move_insn_1 (low_part, high_part);
  16172. + else
  16173. + emit_move_insn_1 (low_part, GEN_INT (low));
  16174. +
  16175. + DONE;
  16176. +})
  16177. +
  16178. +(define_split
  16179. + [(set (match_operand:DI 0 "register_operand" "")
  16180. + (match_operand:DI 1 "register_operand" ""))]
  16181. + "reload_completed
  16182. + && (!TARGET_V9
  16183. + || (TARGET_ARCH32
  16184. + && sparc_split_reg_reg_legitimate (operands[0], operands[1])))"
  16185. + [(clobber (const_int 0))]
  16186. +{
  16187. + sparc_split_reg_reg (operands[0], operands[1], SImode);
  16188. + DONE;
  16189. +})
  16190. +
  16191. +;; Now handle the cases of memory moves from/to non-even
  16192. +;; DI mode register pairs.
  16193. +(define_split
  16194. + [(set (match_operand:DI 0 "register_operand" "")
  16195. + (match_operand:DI 1 "memory_operand" ""))]
  16196. + "reload_completed
  16197. + && TARGET_ARCH32
  16198. + && sparc_split_reg_mem_legitimate (operands[0], operands[1])"
  16199. + [(clobber (const_int 0))]
  16200. +{
  16201. + sparc_split_reg_mem (operands[0], operands[1], SImode);
  16202. + DONE;
  16203. +})
  16204. +
  16205. +(define_split
  16206. + [(set (match_operand:DI 0 "memory_operand" "")
  16207. + (match_operand:DI 1 "register_operand" ""))]
  16208. + "reload_completed
  16209. + && TARGET_ARCH32
  16210. + && sparc_split_reg_mem_legitimate (operands[1], operands[0])"
  16211. + [(clobber (const_int 0))]
  16212. +{
  16213. + sparc_split_mem_reg (operands[0], operands[1], SImode);
  16214. + DONE;
  16215. +})
  16216. +
  16217. +(define_split
  16218. + [(set (match_operand:DI 0 "memory_operand" "")
  16219. + (match_operand:DI 1 "const_zero_operand" ""))]
  16220. + "reload_completed
  16221. + && (!TARGET_V9
  16222. + || (TARGET_ARCH32
  16223. + && !mem_min_alignment (operands[0], 8)))
  16224. + && offsettable_memref_p (operands[0])"
  16225. + [(clobber (const_int 0))]
  16226. +{
  16227. + emit_move_insn_1 (adjust_address (operands[0], SImode, 0), const0_rtx);
  16228. + emit_move_insn_1 (adjust_address (operands[0], SImode, 4), const0_rtx);
  16229. + DONE;
  16230. +})
  16231. +
  16232. +(define_expand "movti"
  16233. + [(set (match_operand:TI 0 "nonimmediate_operand" "")
  16234. + (match_operand:TI 1 "general_operand" ""))]
  16235. + "TARGET_ARCH64"
  16236. +{
  16237. + if (sparc_expand_move (TImode, operands))
  16238. + DONE;
  16239. +})
  16240. +
  16241. +;; We need to prevent reload from splitting TImode moves, because it
  16242. +;; might decide to overwrite a pointer with the value it points to.
  16243. +;; In that case we have to do the loads in the appropriate order so
  16244. +;; that the pointer is not destroyed too early.
  16245. +
  16246. +(define_insn "*movti_insn_sp64"
  16247. + [(set (match_operand:TI 0 "nonimmediate_operand" "=r , o,?*e,?o,b")
  16248. + (match_operand:TI 1 "input_operand" "roJ,rJ, eo, e,J"))]
  16249. + "TARGET_ARCH64
  16250. + && !TARGET_HARD_QUAD
  16251. + && (register_operand (operands[0], TImode)
  16252. + || register_or_zero_operand (operands[1], TImode))"
  16253. + "#"
  16254. + [(set_attr "length" "2,2,2,2,2")
  16255. + (set_attr "cpu_feature" "*,*,fpu,fpu,vis")])
  16256. +
  16257. +(define_insn "*movti_insn_sp64_hq"
  16258. + [(set (match_operand:TI 0 "nonimmediate_operand" "=r , o,?*e,?*e,?m,b")
  16259. + (match_operand:TI 1 "input_operand" "roJ,rJ, e, m, e,J"))]
  16260. + "TARGET_ARCH64
  16261. + && TARGET_HARD_QUAD
  16262. + && (register_operand (operands[0], TImode)
  16263. + || register_or_zero_operand (operands[1], TImode))"
  16264. + "@
  16265. + #
  16266. + #
  16267. + fmovq\t%1, %0
  16268. + ldq\t%1, %0
  16269. + stq\t%1, %0
  16270. + #"
  16271. + [(set_attr "type" "*,*,fpmove,fpload,fpstore,*")
  16272. + (set_attr "length" "2,2,*,*,*,2")])
  16273. +
  16274. +;; Now all the splits to handle multi-insn TI mode moves.
  16275. +(define_split
  16276. + [(set (match_operand:TI 0 "register_operand" "")
  16277. + (match_operand:TI 1 "register_operand" ""))]
  16278. + "reload_completed
  16279. + && ((TARGET_FPU
  16280. + && !TARGET_HARD_QUAD)
  16281. + || (!fp_register_operand (operands[0], TImode)
  16282. + && !fp_register_operand (operands[1], TImode)))"
  16283. + [(clobber (const_int 0))]
  16284. +{
  16285. + rtx set_dest = operands[0];
  16286. + rtx set_src = operands[1];
  16287. + rtx dest1, dest2;
  16288. + rtx src1, src2;
  16289. +
  16290. + dest1 = gen_highpart (DImode, set_dest);
  16291. + dest2 = gen_lowpart (DImode, set_dest);
  16292. + src1 = gen_highpart (DImode, set_src);
  16293. + src2 = gen_lowpart (DImode, set_src);
  16294. +
  16295. + /* Now emit using the real source and destination we found, swapping
  16296. + the order if we detect overlap. */
  16297. + if (reg_overlap_mentioned_p (dest1, src2))
  16298. + {
  16299. + emit_insn (gen_movdi (dest2, src2));
  16300. + emit_insn (gen_movdi (dest1, src1));
  16301. + }
  16302. + else
  16303. + {
  16304. + emit_insn (gen_movdi (dest1, src1));
  16305. + emit_insn (gen_movdi (dest2, src2));
  16306. + }
  16307. + DONE;
  16308. +})
  16309. +
  16310. +(define_split
  16311. + [(set (match_operand:TI 0 "nonimmediate_operand" "")
  16312. + (match_operand:TI 1 "const_zero_operand" ""))]
  16313. + "reload_completed"
  16314. + [(clobber (const_int 0))]
  16315. +{
  16316. + rtx set_dest = operands[0];
  16317. + rtx dest1, dest2;
  16318. +
  16319. + switch (GET_CODE (set_dest))
  16320. + {
  16321. + case REG:
  16322. + dest1 = gen_highpart (DImode, set_dest);
  16323. + dest2 = gen_lowpart (DImode, set_dest);
  16324. + break;
  16325. + case MEM:
  16326. + dest1 = adjust_address (set_dest, DImode, 0);
  16327. + dest2 = adjust_address (set_dest, DImode, 8);
  16328. + break;
  16329. + default:
  16330. + gcc_unreachable ();
  16331. + }
  16332. +
  16333. + emit_insn (gen_movdi (dest1, const0_rtx));
  16334. + emit_insn (gen_movdi (dest2, const0_rtx));
  16335. + DONE;
  16336. +})
  16337. +
  16338. +(define_split
  16339. + [(set (match_operand:TI 0 "register_operand" "")
  16340. + (match_operand:TI 1 "memory_operand" ""))]
  16341. + "reload_completed
  16342. + && offsettable_memref_p (operands[1])
  16343. + && (!TARGET_HARD_QUAD
  16344. + || !fp_register_operand (operands[0], TImode))"
  16345. + [(clobber (const_int 0))]
  16346. +{
  16347. + rtx word0 = adjust_address (operands[1], DImode, 0);
  16348. + rtx word1 = adjust_address (operands[1], DImode, 8);
  16349. + rtx set_dest, dest1, dest2;
  16350. +
  16351. + set_dest = operands[0];
  16352. +
  16353. + dest1 = gen_highpart (DImode, set_dest);
  16354. + dest2 = gen_lowpart (DImode, set_dest);
  16355. +
  16356. + /* Now output, ordering such that we don't clobber any registers
  16357. + mentioned in the address. */
  16358. + if (reg_overlap_mentioned_p (dest1, word1))
  16359. +
  16360. + {
  16361. + emit_insn (gen_movdi (dest2, word1));
  16362. + emit_insn (gen_movdi (dest1, word0));
  16363. + }
  16364. + else
  16365. + {
  16366. + emit_insn (gen_movdi (dest1, word0));
  16367. + emit_insn (gen_movdi (dest2, word1));
  16368. + }
  16369. + DONE;
  16370. +})
  16371. +
  16372. +(define_split
  16373. + [(set (match_operand:TI 0 "memory_operand" "")
  16374. + (match_operand:TI 1 "register_operand" ""))]
  16375. + "reload_completed
  16376. + && offsettable_memref_p (operands[0])
  16377. + && (!TARGET_HARD_QUAD
  16378. + || !fp_register_operand (operands[1], TImode))"
  16379. + [(clobber (const_int 0))]
  16380. +{
  16381. + rtx set_src = operands[1];
  16382. +
  16383. + emit_insn (gen_movdi (adjust_address (operands[0], DImode, 0),
  16384. + gen_highpart (DImode, set_src)));
  16385. + emit_insn (gen_movdi (adjust_address (operands[0], DImode, 8),
  16386. + gen_lowpart (DImode, set_src)));
  16387. + DONE;
  16388. +})
  16389. +
  16390. +
  16391. +;; Floating point move instructions
  16392. +
  16393. +(define_expand "movsf"
  16394. + [(set (match_operand:SF 0 "nonimmediate_operand" "")
  16395. + (match_operand:SF 1 "general_operand" ""))]
  16396. + ""
  16397. +{
  16398. + if (sparc_expand_move (SFmode, operands))
  16399. + DONE;
  16400. +})
  16401. +
  16402. +(define_insn "*movsf_insn"
  16403. + [(set (match_operand:SF 0 "nonimmediate_operand" "=d,d,f, *r,*r,*r,*r, f,f,*r,m, m")
  16404. + (match_operand:SF 1 "input_operand" "G,C,f,*rR, Q, S, f,*r,m, m,f,*rG"))]
  16405. + "(register_operand (operands[0], SFmode)
  16406. + || register_or_zero_or_all_ones_operand (operands[1], SFmode))"
  16407. +{
  16408. + if (GET_CODE (operands[1]) == CONST_DOUBLE
  16409. + && (which_alternative == 3
  16410. + || which_alternative == 4
  16411. + || which_alternative == 5))
  16412. + {
  16413. + long i;
  16414. +
  16415. + REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (operands[1]), i);
  16416. + operands[1] = GEN_INT (i);
  16417. + }
  16418. +
  16419. + switch (which_alternative)
  16420. + {
  16421. + case 0:
  16422. + return "fzeros\t%0";
  16423. + case 1:
  16424. + return "fones\t%0";
  16425. + case 2:
  16426. + return "fmovs\t%1, %0";
  16427. + case 3:
  16428. + return "mov\t%1, %0";
  16429. + case 4:
  16430. + return "sethi\t%%hi(%a1), %0";
  16431. + case 5:
  16432. + return "#";
  16433. + case 6:
  16434. + return "movstouw\t%1, %0";
  16435. + case 7:
  16436. + return "movwtos\t%1, %0";
  16437. + case 8:
  16438. + case 9:
  16439. + return "ld\t%1, %0";
  16440. + case 10:
  16441. + case 11:
  16442. + return "st\t%r1, %0";
  16443. + default:
  16444. + gcc_unreachable ();
  16445. + }
  16446. +}
  16447. + [(set_attr "type" "visl,visl,fpmove,*,*,*,vismv,vismv,fpload,load,fpstore,store")
  16448. + (set_attr "subtype" "single,single,*,*,*,*,movstouw,single,*,regular,*,*")
  16449. + (set_attr "cpu_feature" "vis,vis,fpu,*,*,*,vis3,vis3,fpu,*,fpu,*")])
  16450. +
  16451. +;; The following 3 patterns build SFmode constants in integer registers.
  16452. +
  16453. +(define_insn "*movsf_lo_sum"
  16454. + [(set (match_operand:SF 0 "register_operand" "=r")
  16455. + (lo_sum:SF (match_operand:SF 1 "register_operand" "r")
  16456. + (match_operand:SF 2 "fp_const_high_losum_operand" "S")))]
  16457. + ""
  16458. +{
  16459. + long i;
  16460. +
  16461. + REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (operands[2]), i);
  16462. + operands[2] = GEN_INT (i);
  16463. + return "or\t%1, %%lo(%a2), %0";
  16464. +})
  16465. +
  16466. +(define_insn "*movsf_high"
  16467. + [(set (match_operand:SF 0 "register_operand" "=r")
  16468. + (high:SF (match_operand:SF 1 "fp_const_high_losum_operand" "S")))]
  16469. + ""
  16470. +{
  16471. + long i;
  16472. +
  16473. + REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (operands[1]), i);
  16474. + operands[1] = GEN_INT (i);
  16475. + return "sethi\t%%hi(%1), %0";
  16476. +})
  16477. +
  16478. +(define_split
  16479. + [(set (match_operand:SF 0 "register_operand" "")
  16480. + (match_operand:SF 1 "fp_const_high_losum_operand" ""))]
  16481. + "REG_P (operands[0]) && SPARC_INT_REG_P (REGNO (operands[0]))"
  16482. + [(set (match_dup 0) (high:SF (match_dup 1)))
  16483. + (set (match_dup 0) (lo_sum:SF (match_dup 0) (match_dup 1)))])
  16484. +
  16485. +(define_expand "movdf"
  16486. + [(set (match_operand:DF 0 "nonimmediate_operand" "")
  16487. + (match_operand:DF 1 "general_operand" ""))]
  16488. + ""
  16489. +{
  16490. + if (sparc_expand_move (DFmode, operands))
  16491. + DONE;
  16492. +})
  16493. +
  16494. +(define_insn "*movdf_insn_sp32"
  16495. + [(set (match_operand:DF 0 "nonimmediate_operand"
  16496. + "=T,o,b,b,e,e,*r, f, e,T,U,T, f,o, *r,*r, o")
  16497. + (match_operand:DF 1 "input_operand"
  16498. + " G,G,G,C,e,e, f,*r,T#F,e,T,U,o#F,f,*rF, o,*r"))]
  16499. + "TARGET_ARCH32
  16500. + && (register_operand (operands[0], DFmode)
  16501. + || register_or_zero_or_all_ones_operand (operands[1], DFmode))"
  16502. + "@
  16503. + stx\t%r1, %0
  16504. + #
  16505. + fzero\t%0
  16506. + fone\t%0
  16507. + fmovd\t%1, %0
  16508. + #
  16509. + #
  16510. + #
  16511. + ldd\t%1, %0
  16512. + std\t%1, %0
  16513. + ldd\t%1, %0
  16514. + std\t%1, %0
  16515. + #
  16516. + #
  16517. + #
  16518. + ldd\t%1, %0
  16519. + std\t%1, %0"
  16520. + [(set_attr "type" "store,*,visl,visl,fpmove,*,*,*,fpload,fpstore,load,store,*,*,*,load,store")
  16521. + (set_attr "subtype" "*,*,double,double,*,*,*,*,*,*,regular,*,*,*,*,regular,*")
  16522. + (set_attr "length" "*,2,*,*,*,2,2,2,*,*,*,*,2,2,2,*,*")
  16523. + (set_attr "fptype" "*,*,double,double,double,*,*,*,*,*,*,*,*,*,*,*,*")
  16524. + (set_attr "cpu_feature" "v9,*,vis,vis,v9,fpunotv9,vis3,vis3,fpu,fpu,*,*,fpu,fpu,*,*,*")
  16525. + (set_attr "lra" "*,*,*,*,*,*,*,*,*,*,disabled,disabled,*,*,*,*,*")])
  16526. +
  16527. +(define_insn "*movdf_insn_sp64"
  16528. + [(set (match_operand:DF 0 "nonimmediate_operand" "=b,b,e,*r, e, e,W, *r,*r, m,*r")
  16529. + (match_operand:DF 1 "input_operand" "G,C,e, e,*r,W#F,e,*rG, m,*rG, F"))]
  16530. + "TARGET_ARCH64
  16531. + && (register_operand (operands[0], DFmode)
  16532. + || register_or_zero_or_all_ones_operand (operands[1], DFmode))"
  16533. + "@
  16534. + fzero\t%0
  16535. + fone\t%0
  16536. + fmovd\t%1, %0
  16537. + movdtox\t%1, %0
  16538. + movxtod\t%1, %0
  16539. + ldd\t%1, %0
  16540. + std\t%1, %0
  16541. + mov\t%r1, %0
  16542. + ldx\t%1, %0
  16543. + stx\t%r1, %0
  16544. + #"
  16545. + [(set_attr "type" "visl,visl,fpmove,vismv,vismv,load,store,*,load,store,*")
  16546. + (set_attr "subtype" "double,double,*,movdtox,movxtod,regular,*,*,regular,*,*")
  16547. + (set_attr "length" "*,*,*,*,*,*,*,*,*,*,2")
  16548. + (set_attr "fptype" "double,double,double,double,double,*,*,*,*,*,*")
  16549. + (set_attr "cpu_feature" "vis,vis,fpu,vis3,vis3,fpu,fpu,*,*,*,*")])
  16550. +
  16551. +;; This pattern builds DFmode constants in integer registers.
  16552. +(define_split
  16553. + [(set (match_operand:DF 0 "register_operand" "")
  16554. + (match_operand:DF 1 "const_double_operand" ""))]
  16555. + "reload_completed
  16556. + && REG_P (operands[0])
  16557. + && SPARC_INT_REG_P (REGNO (operands[0]))
  16558. + && !const_zero_operand (operands[1], GET_MODE (operands[0]))"
  16559. + [(clobber (const_int 0))]
  16560. +{
  16561. + operands[0] = gen_raw_REG (DImode, REGNO (operands[0]));
  16562. +
  16563. + if (TARGET_ARCH64)
  16564. + {
  16565. + rtx tem = simplify_subreg (DImode, operands[1], DFmode, 0);
  16566. + emit_insn (gen_movdi (operands[0], tem));
  16567. + }
  16568. + else
  16569. + {
  16570. + rtx hi = simplify_subreg (SImode, operands[1], DFmode, 0);
  16571. + rtx lo = simplify_subreg (SImode, operands[1], DFmode, 4);
  16572. + rtx high_part = gen_highpart (SImode, operands[0]);
  16573. + rtx low_part = gen_lowpart (SImode, operands[0]);
  16574. +
  16575. + gcc_assert (GET_CODE (hi) == CONST_INT);
  16576. + gcc_assert (GET_CODE (lo) == CONST_INT);
  16577. +
  16578. + emit_move_insn_1 (high_part, hi);
  16579. +
  16580. + /* Slick... but this loses if the constant can be done in one insn. */
  16581. + if (lo == hi
  16582. + && !SPARC_SETHI32_P (INTVAL (hi))
  16583. + && !SPARC_SIMM13_P (INTVAL (hi)))
  16584. + emit_move_insn_1 (low_part, high_part);
  16585. + else
  16586. + emit_move_insn_1 (low_part, lo);
  16587. + }
  16588. + DONE;
  16589. +})
  16590. +
  16591. +;; Ok, now the splits to handle all the multi insn and
  16592. +;; mis-aligned memory address cases.
  16593. +;; In these splits please take note that we must be
  16594. +;; careful when V9 but not ARCH64 because the integer
  16595. +;; register DFmode cases must be handled.
  16596. +(define_split
  16597. + [(set (match_operand:DF 0 "register_operand" "")
  16598. + (match_operand:DF 1 "const_zero_operand" ""))]
  16599. + "reload_completed
  16600. + && TARGET_ARCH32
  16601. + && ((GET_CODE (operands[0]) == REG
  16602. + && SPARC_INT_REG_P (REGNO (operands[0])))
  16603. + || (GET_CODE (operands[0]) == SUBREG
  16604. + && GET_CODE (SUBREG_REG (operands[0])) == REG
  16605. + && SPARC_INT_REG_P (REGNO (SUBREG_REG (operands[0])))))"
  16606. + [(clobber (const_int 0))]
  16607. +{
  16608. + emit_move_insn_1 (gen_highpart (SFmode, operands[0]), CONST0_RTX (SFmode));
  16609. + emit_move_insn_1 (gen_lowpart (SFmode, operands[0]), CONST0_RTX (SFmode));
  16610. + DONE;
  16611. +})
  16612. +
  16613. +(define_split
  16614. + [(set (match_operand:DF 0 "register_operand" "")
  16615. + (match_operand:DF 1 "register_operand" ""))]
  16616. + "reload_completed
  16617. + && (!TARGET_V9
  16618. + || (TARGET_ARCH32
  16619. + && sparc_split_reg_reg_legitimate (operands[0], operands[1])))"
  16620. + [(clobber (const_int 0))]
  16621. +{
  16622. + sparc_split_reg_reg (operands[0], operands[1], SFmode);
  16623. + DONE;
  16624. +})
  16625. +
  16626. +(define_split
  16627. + [(set (match_operand:DF 0 "register_operand" "")
  16628. + (match_operand:DF 1 "memory_operand" ""))]
  16629. + "reload_completed
  16630. + && TARGET_ARCH32
  16631. + && sparc_split_reg_mem_legitimate (operands[0], operands[1])"
  16632. + [(clobber (const_int 0))]
  16633. +{
  16634. + sparc_split_reg_mem (operands[0], operands[1], SFmode);
  16635. + DONE;
  16636. +})
  16637. +
  16638. +(define_split
  16639. + [(set (match_operand:DF 0 "memory_operand" "")
  16640. + (match_operand:DF 1 "register_operand" ""))]
  16641. + "reload_completed
  16642. + && TARGET_ARCH32
  16643. + && sparc_split_reg_mem_legitimate (operands[1], operands[0])"
  16644. + [(clobber (const_int 0))]
  16645. +{
  16646. + sparc_split_mem_reg (operands[0], operands[1], SFmode);
  16647. + DONE;
  16648. +})
  16649. +
  16650. +(define_split
  16651. + [(set (match_operand:DF 0 "memory_operand" "")
  16652. + (match_operand:DF 1 "const_zero_operand" ""))]
  16653. + "reload_completed
  16654. + && (!TARGET_V9
  16655. + || (TARGET_ARCH32
  16656. + && !mem_min_alignment (operands[0], 8)))
  16657. + && offsettable_memref_p (operands[0])"
  16658. + [(clobber (const_int 0))]
  16659. +{
  16660. + emit_move_insn_1 (adjust_address (operands[0], SFmode, 0), CONST0_RTX (SFmode));
  16661. + emit_move_insn_1 (adjust_address (operands[0], SFmode, 4), CONST0_RTX (SFmode));
  16662. + DONE;
  16663. +})
  16664. +
  16665. +(define_expand "movtf"
  16666. + [(set (match_operand:TF 0 "nonimmediate_operand" "")
  16667. + (match_operand:TF 1 "general_operand" ""))]
  16668. + ""
  16669. +{
  16670. + if (sparc_expand_move (TFmode, operands))
  16671. + DONE;
  16672. +})
  16673. +
  16674. +(define_insn "*movtf_insn_sp32"
  16675. + [(set (match_operand:TF 0 "nonimmediate_operand" "=b, e,o, o, r")
  16676. + (match_operand:TF 1 "input_operand" " G,oe,e,rG,roG"))]
  16677. + "TARGET_ARCH32
  16678. + && (register_operand (operands[0], TFmode)
  16679. + || register_or_zero_operand (operands[1], TFmode))"
  16680. + "#"
  16681. + [(set_attr "length" "4,4,4,4,4")
  16682. + (set_attr "cpu_feature" "fpu,fpu,fpu,*,*")])
  16683. +
  16684. +(define_insn "*movtf_insn_sp64"
  16685. + [(set (match_operand:TF 0 "nonimmediate_operand" "=b, e,o, o, r")
  16686. + (match_operand:TF 1 "input_operand" "G,oe,e,rG,roG"))]
  16687. + "TARGET_ARCH64
  16688. + && !TARGET_HARD_QUAD
  16689. + && (register_operand (operands[0], TFmode)
  16690. + || register_or_zero_operand (operands[1], TFmode))"
  16691. + "#"
  16692. + [(set_attr "length" "2,2,2,2,2")
  16693. + (set_attr "cpu_feature" "fpu,fpu,fpu,*,*")])
  16694. +
  16695. +(define_insn "*movtf_insn_sp64_hq"
  16696. + [(set (match_operand:TF 0 "nonimmediate_operand" "=b,e,e,m, o, r")
  16697. + (match_operand:TF 1 "input_operand" "G,e,m,e,rG,roG"))]
  16698. + "TARGET_ARCH64
  16699. + && TARGET_HARD_QUAD
  16700. + && (register_operand (operands[0], TFmode)
  16701. + || register_or_zero_operand (operands[1], TFmode))"
  16702. + "@
  16703. + #
  16704. + fmovq\t%1, %0
  16705. + ldq\t%1, %0
  16706. + stq\t%1, %0
  16707. + #
  16708. + #"
  16709. + [(set_attr "type" "*,fpmove,fpload,fpstore,*,*")
  16710. + (set_attr "length" "2,*,*,*,2,2")])
  16711. +
  16712. +;; Now all the splits to handle multi-insn TF mode moves.
  16713. +(define_split
  16714. + [(set (match_operand:TF 0 "register_operand" "")
  16715. + (match_operand:TF 1 "register_operand" ""))]
  16716. + "reload_completed
  16717. + && (TARGET_ARCH32
  16718. + || (TARGET_FPU
  16719. + && !TARGET_HARD_QUAD)
  16720. + || (!fp_register_operand (operands[0], TFmode)
  16721. + && !fp_register_operand (operands[1], TFmode)))"
  16722. + [(clobber (const_int 0))]
  16723. +{
  16724. + rtx set_dest = operands[0];
  16725. + rtx set_src = operands[1];
  16726. + rtx dest1, dest2;
  16727. + rtx src1, src2;
  16728. +
  16729. + dest1 = gen_df_reg (set_dest, 0);
  16730. + dest2 = gen_df_reg (set_dest, 1);
  16731. + src1 = gen_df_reg (set_src, 0);
  16732. + src2 = gen_df_reg (set_src, 1);
  16733. +
  16734. + /* Now emit using the real source and destination we found, swapping
  16735. + the order if we detect overlap. */
  16736. + if (reg_overlap_mentioned_p (dest1, src2))
  16737. + {
  16738. + emit_insn (gen_movdf (dest2, src2));
  16739. + emit_insn (gen_movdf (dest1, src1));
  16740. + }
  16741. + else
  16742. + {
  16743. + emit_insn (gen_movdf (dest1, src1));
  16744. + emit_insn (gen_movdf (dest2, src2));
  16745. + }
  16746. + DONE;
  16747. +})
  16748. +
  16749. +(define_split
  16750. + [(set (match_operand:TF 0 "nonimmediate_operand" "")
  16751. + (match_operand:TF 1 "const_zero_operand" ""))]
  16752. + "reload_completed"
  16753. + [(clobber (const_int 0))]
  16754. +{
  16755. + rtx set_dest = operands[0];
  16756. + rtx dest1, dest2;
  16757. +
  16758. + switch (GET_CODE (set_dest))
  16759. + {
  16760. + case REG:
  16761. + dest1 = gen_df_reg (set_dest, 0);
  16762. + dest2 = gen_df_reg (set_dest, 1);
  16763. + break;
  16764. + case MEM:
  16765. + dest1 = adjust_address (set_dest, DFmode, 0);
  16766. + dest2 = adjust_address (set_dest, DFmode, 8);
  16767. + break;
  16768. + default:
  16769. + gcc_unreachable ();
  16770. + }
  16771. +
  16772. + emit_insn (gen_movdf (dest1, CONST0_RTX (DFmode)));
  16773. + emit_insn (gen_movdf (dest2, CONST0_RTX (DFmode)));
  16774. + DONE;
  16775. +})
  16776. +
  16777. +(define_split
  16778. + [(set (match_operand:TF 0 "register_operand" "")
  16779. + (match_operand:TF 1 "memory_operand" ""))]
  16780. + "(reload_completed
  16781. + && offsettable_memref_p (operands[1])
  16782. + && (TARGET_ARCH32
  16783. + || !TARGET_HARD_QUAD
  16784. + || !fp_register_operand (operands[0], TFmode)))"
  16785. + [(clobber (const_int 0))]
  16786. +{
  16787. + rtx word0 = adjust_address (operands[1], DFmode, 0);
  16788. + rtx word1 = adjust_address (operands[1], DFmode, 8);
  16789. + rtx set_dest, dest1, dest2;
  16790. +
  16791. + set_dest = operands[0];
  16792. +
  16793. + dest1 = gen_df_reg (set_dest, 0);
  16794. + dest2 = gen_df_reg (set_dest, 1);
  16795. +
  16796. + /* Now output, ordering such that we don't clobber any registers
  16797. + mentioned in the address. */
  16798. + if (reg_overlap_mentioned_p (dest1, word1))
  16799. +
  16800. + {
  16801. + emit_insn (gen_movdf (dest2, word1));
  16802. + emit_insn (gen_movdf (dest1, word0));
  16803. + }
  16804. + else
  16805. + {
  16806. + emit_insn (gen_movdf (dest1, word0));
  16807. + emit_insn (gen_movdf (dest2, word1));
  16808. + }
  16809. + DONE;
  16810. +})
  16811. +
  16812. +(define_split
  16813. + [(set (match_operand:TF 0 "memory_operand" "")
  16814. + (match_operand:TF 1 "register_operand" ""))]
  16815. + "(reload_completed
  16816. + && offsettable_memref_p (operands[0])
  16817. + && (TARGET_ARCH32
  16818. + || !TARGET_HARD_QUAD
  16819. + || !fp_register_operand (operands[1], TFmode)))"
  16820. + [(clobber (const_int 0))]
  16821. +{
  16822. + rtx set_src = operands[1];
  16823. +
  16824. + emit_insn (gen_movdf (adjust_address (operands[0], DFmode, 0),
  16825. + gen_df_reg (set_src, 0)));
  16826. + emit_insn (gen_movdf (adjust_address (operands[0], DFmode, 8),
  16827. + gen_df_reg (set_src, 1)));
  16828. + DONE;
  16829. +})
  16830. +
  16831. +
  16832. +;; SPARC-V9 conditional move instructions
  16833. +
  16834. +;; We can handle larger constants here for some flavors, but for now we keep
  16835. +;; it simple and only allow those constants supported by all flavors.
  16836. +;; Note that emit_conditional_move canonicalizes operands 2,3 so that operand
  16837. +;; 3 contains the constant if one is present, but we handle either for
  16838. +;; generality (sparc.c puts a constant in operand 2).
  16839. +;;
  16840. +;; Our instruction patterns, on the other hand, canonicalize such that
  16841. +;; operand 3 must be the set destination.
  16842. +
  16843. +(define_expand "mov<I:mode>cc"
  16844. + [(set (match_operand:I 0 "register_operand" "")
  16845. + (if_then_else:I (match_operand 1 "comparison_operator" "")
  16846. + (match_operand:I 2 "arith10_operand" "")
  16847. + (match_operand:I 3 "arith10_operand" "")))]
  16848. + "TARGET_V9 && !(<I:MODE>mode == DImode && TARGET_ARCH32)"
  16849. +{
  16850. + if (!sparc_expand_conditional_move (<I:MODE>mode, operands))
  16851. + FAIL;
  16852. + DONE;
  16853. +})
  16854. +
  16855. +(define_expand "mov<F:mode>cc"
  16856. + [(set (match_operand:F 0 "register_operand" "")
  16857. + (if_then_else:F (match_operand 1 "comparison_operator" "")
  16858. + (match_operand:F 2 "register_operand" "")
  16859. + (match_operand:F 3 "register_operand" "")))]
  16860. + "TARGET_V9 && TARGET_FPU"
  16861. +{
  16862. + if (!sparc_expand_conditional_move (<F:MODE>mode, operands))
  16863. + FAIL;
  16864. + DONE;
  16865. +})
  16866. +
  16867. +(define_insn "*mov<I:mode>_cc_v9"
  16868. + [(set (match_operand:I 0 "register_operand" "=r")
  16869. + (if_then_else:I (match_operator 1 "icc_or_fcc_comparison_operator"
  16870. + [(match_operand 2 "icc_or_fcc_register_operand" "X")
  16871. + (const_int 0)])
  16872. + (match_operand:I 3 "arith11_operand" "rL")
  16873. + (match_operand:I 4 "register_operand" "0")))]
  16874. + "TARGET_V9 && !(<I:MODE>mode == DImode && TARGET_ARCH32)"
  16875. + "mov%C1\t%x2, %3, %0"
  16876. + [(set_attr "type" "cmove")])
  16877. +
  16878. +(define_insn "*mov<I:mode>_cc_reg_sp64"
  16879. + [(set (match_operand:I 0 "register_operand" "=r")
  16880. + (if_then_else:I (match_operator 1 "v9_register_comparison_operator"
  16881. + [(match_operand:DI 2 "register_operand" "r")
  16882. + (const_int 0)])
  16883. + (match_operand:I 3 "arith10_operand" "rM")
  16884. + (match_operand:I 4 "register_operand" "0")))]
  16885. + "TARGET_ARCH64"
  16886. + "movr%D1\t%2, %r3, %0"
  16887. + [(set_attr "type" "cmove")])
  16888. +
  16889. +(define_insn "*movsf_cc_v9"
  16890. + [(set (match_operand:SF 0 "register_operand" "=f")
  16891. + (if_then_else:SF (match_operator 1 "icc_or_fcc_comparison_operator"
  16892. + [(match_operand 2 "icc_or_fcc_register_operand" "X")
  16893. + (const_int 0)])
  16894. + (match_operand:SF 3 "register_operand" "f")
  16895. + (match_operand:SF 4 "register_operand" "0")))]
  16896. + "TARGET_V9 && TARGET_FPU"
  16897. + "fmovs%C1\t%x2, %3, %0"
  16898. + [(set_attr "type" "fpcmove")])
  16899. +
  16900. +(define_insn "*movsf_cc_reg_sp64"
  16901. + [(set (match_operand:SF 0 "register_operand" "=f")
  16902. + (if_then_else:SF (match_operator 1 "v9_register_comparison_operator"
  16903. + [(match_operand:DI 2 "register_operand" "r")
  16904. + (const_int 0)])
  16905. + (match_operand:SF 3 "register_operand" "f")
  16906. + (match_operand:SF 4 "register_operand" "0")))]
  16907. + "TARGET_ARCH64 && TARGET_FPU"
  16908. + "fmovrs%D1\t%2, %3, %0"
  16909. + [(set_attr "type" "fpcrmove")])
  16910. +
  16911. +;; Named because invoked by movtf_cc_v9
  16912. +(define_insn "movdf_cc_v9"
  16913. + [(set (match_operand:DF 0 "register_operand" "=e")
  16914. + (if_then_else:DF (match_operator 1 "icc_or_fcc_comparison_operator"
  16915. + [(match_operand 2 "icc_or_fcc_register_operand" "X")
  16916. + (const_int 0)])
  16917. + (match_operand:DF 3 "register_operand" "e")
  16918. + (match_operand:DF 4 "register_operand" "0")))]
  16919. + "TARGET_V9 && TARGET_FPU"
  16920. + "fmovd%C1\t%x2, %3, %0"
  16921. + [(set_attr "type" "fpcmove")
  16922. + (set_attr "fptype" "double")])
  16923. +
  16924. +;; Named because invoked by movtf_cc_reg_sp64
  16925. +(define_insn "movdf_cc_reg_sp64"
  16926. + [(set (match_operand:DF 0 "register_operand" "=e")
  16927. + (if_then_else:DF (match_operator 1 "v9_register_comparison_operator"
  16928. + [(match_operand:DI 2 "register_operand" "r")
  16929. + (const_int 0)])
  16930. + (match_operand:DF 3 "register_operand" "e")
  16931. + (match_operand:DF 4 "register_operand" "0")))]
  16932. + "TARGET_ARCH64 && TARGET_FPU"
  16933. + "fmovrd%D1\t%2, %3, %0"
  16934. + [(set_attr "type" "fpcrmove")
  16935. + (set_attr "fptype" "double")])
  16936. +
  16937. +(define_insn "*movtf_cc_hq_v9"
  16938. + [(set (match_operand:TF 0 "register_operand" "=e")
  16939. + (if_then_else:TF (match_operator 1 "icc_or_fcc_comparison_operator"
  16940. + [(match_operand 2 "icc_or_fcc_register_operand" "X")
  16941. + (const_int 0)])
  16942. + (match_operand:TF 3 "register_operand" "e")
  16943. + (match_operand:TF 4 "register_operand" "0")))]
  16944. + "TARGET_V9 && TARGET_FPU && TARGET_HARD_QUAD"
  16945. + "fmovq%C1\t%x2, %3, %0"
  16946. + [(set_attr "type" "fpcmove")])
  16947. +
  16948. +(define_insn "*movtf_cc_reg_hq_sp64"
  16949. + [(set (match_operand:TF 0 "register_operand" "=e")
  16950. + (if_then_else:TF (match_operator 1 "v9_register_comparison_operator"
  16951. + [(match_operand:DI 2 "register_operand" "r")
  16952. + (const_int 0)])
  16953. + (match_operand:TF 3 "register_operand" "e")
  16954. + (match_operand:TF 4 "register_operand" "0")))]
  16955. + "TARGET_ARCH64 && TARGET_FPU && TARGET_HARD_QUAD"
  16956. + "fmovrq%D1\t%2, %3, %0"
  16957. + [(set_attr "type" "fpcrmove")])
  16958. +
  16959. +(define_insn_and_split "*movtf_cc_v9"
  16960. + [(set (match_operand:TF 0 "register_operand" "=e")
  16961. + (if_then_else:TF (match_operator 1 "icc_or_fcc_comparison_operator"
  16962. + [(match_operand 2 "icc_or_fcc_register_operand" "X")
  16963. + (const_int 0)])
  16964. + (match_operand:TF 3 "register_operand" "e")
  16965. + (match_operand:TF 4 "register_operand" "0")))]
  16966. + "TARGET_V9 && TARGET_FPU && !TARGET_HARD_QUAD"
  16967. + "#"
  16968. + "&& reload_completed"
  16969. + [(clobber (const_int 0))]
  16970. +{
  16971. + rtx set_dest = operands[0];
  16972. + rtx set_srca = operands[3];
  16973. + rtx dest1, dest2;
  16974. + rtx srca1, srca2;
  16975. +
  16976. + dest1 = gen_df_reg (set_dest, 0);
  16977. + dest2 = gen_df_reg (set_dest, 1);
  16978. + srca1 = gen_df_reg (set_srca, 0);
  16979. + srca2 = gen_df_reg (set_srca, 1);
  16980. +
  16981. + if (reg_overlap_mentioned_p (dest1, srca2))
  16982. + {
  16983. + emit_insn (gen_movdf_cc_v9 (dest2, operands[1], operands[2],
  16984. + srca2, dest2));
  16985. + emit_insn (gen_movdf_cc_v9 (dest1, operands[1], operands[2],
  16986. + srca1, dest1));
  16987. + }
  16988. + else
  16989. + {
  16990. + emit_insn (gen_movdf_cc_v9 (dest1, operands[1], operands[2],
  16991. + srca1, dest1));
  16992. + emit_insn (gen_movdf_cc_v9 (dest2, operands[1], operands[2],
  16993. + srca2, dest2));
  16994. + }
  16995. + DONE;
  16996. +}
  16997. + [(set_attr "length" "2")])
  16998. +
  16999. +(define_insn_and_split "*movtf_cc_reg_sp64"
  17000. + [(set (match_operand:TF 0 "register_operand" "=e")
  17001. + (if_then_else:TF (match_operator 1 "v9_register_comparison_operator"
  17002. + [(match_operand:DI 2 "register_operand" "r")
  17003. + (const_int 0)])
  17004. + (match_operand:TF 3 "register_operand" "e")
  17005. + (match_operand:TF 4 "register_operand" "0")))]
  17006. + "TARGET_ARCH64 && TARGET_FPU && !TARGET_HARD_QUAD"
  17007. + "#"
  17008. + "&& reload_completed"
  17009. + [(clobber (const_int 0))]
  17010. +{
  17011. + rtx set_dest = operands[0];
  17012. + rtx set_srca = operands[3];
  17013. + rtx dest1, dest2;
  17014. + rtx srca1, srca2;
  17015. +
  17016. + dest1 = gen_df_reg (set_dest, 0);
  17017. + dest2 = gen_df_reg (set_dest, 1);
  17018. + srca1 = gen_df_reg (set_srca, 0);
  17019. + srca2 = gen_df_reg (set_srca, 1);
  17020. +
  17021. + if (reg_overlap_mentioned_p (dest1, srca2))
  17022. + {
  17023. + emit_insn (gen_movdf_cc_reg_sp64 (dest2, operands[1], operands[2],
  17024. + srca2, dest2));
  17025. + emit_insn (gen_movdf_cc_reg_sp64 (dest1, operands[1], operands[2],
  17026. + srca1, dest1));
  17027. + }
  17028. + else
  17029. + {
  17030. + emit_insn (gen_movdf_cc_reg_sp64 (dest1, operands[1], operands[2],
  17031. + srca1, dest1));
  17032. + emit_insn (gen_movdf_cc_reg_sp64 (dest2, operands[1], operands[2],
  17033. + srca2, dest2));
  17034. + }
  17035. + DONE;
  17036. +}
  17037. + [(set_attr "length" "2")])
  17038. +
  17039. +
  17040. +;; Zero-extension instructions
  17041. +
  17042. +;; These patterns originally accepted general_operands, however, slightly
  17043. +;; better code is generated by only accepting register_operands, and then
  17044. +;; letting combine generate the ldu[hb] insns.
  17045. +
  17046. +(define_expand "zero_extendhisi2"
  17047. + [(set (match_operand:SI 0 "register_operand" "")
  17048. + (zero_extend:SI (match_operand:HI 1 "register_operand" "")))]
  17049. + ""
  17050. +{
  17051. + rtx temp = gen_reg_rtx (SImode);
  17052. + rtx shift_16 = GEN_INT (16);
  17053. + int op1_subbyte = 0;
  17054. +
  17055. + if (GET_CODE (operand1) == SUBREG)
  17056. + {
  17057. + op1_subbyte = SUBREG_BYTE (operand1);
  17058. + op1_subbyte /= GET_MODE_SIZE (SImode);
  17059. + op1_subbyte *= GET_MODE_SIZE (SImode);
  17060. + operand1 = XEXP (operand1, 0);
  17061. + }
  17062. +
  17063. + emit_insn (gen_ashlsi3 (temp, gen_rtx_SUBREG (SImode, operand1, op1_subbyte),
  17064. + shift_16));
  17065. + emit_insn (gen_lshrsi3 (operand0, temp, shift_16));
  17066. + DONE;
  17067. +})
  17068. +
  17069. +(define_insn "*zero_extendhisi2_insn"
  17070. + [(set (match_operand:SI 0 "register_operand" "=r")
  17071. + (zero_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
  17072. + ""
  17073. + "lduh\t%1, %0"
  17074. + [(set_attr "type" "load")
  17075. + (set_attr "subtype" "regular")
  17076. + (set_attr "us3load_type" "3cycle")])
  17077. +
  17078. +(define_expand "zero_extendqihi2"
  17079. + [(set (match_operand:HI 0 "register_operand" "")
  17080. + (zero_extend:HI (match_operand:QI 1 "register_operand" "")))]
  17081. + ""
  17082. + "")
  17083. +
  17084. +(define_insn "*zero_extendqihi2_insn"
  17085. + [(set (match_operand:HI 0 "register_operand" "=r,r")
  17086. + (zero_extend:HI (match_operand:QI 1 "input_operand" "r,m")))]
  17087. + "GET_CODE (operands[1]) != CONST_INT"
  17088. + "@
  17089. + and\t%1, 0xff, %0
  17090. + ldub\t%1, %0"
  17091. + [(set_attr "type" "*,load")
  17092. + (set_attr "subtype" "*,regular")
  17093. + (set_attr "us3load_type" "*,3cycle")])
  17094. +
  17095. +(define_expand "zero_extendqisi2"
  17096. + [(set (match_operand:SI 0 "register_operand" "")
  17097. + (zero_extend:SI (match_operand:QI 1 "register_operand" "")))]
  17098. + ""
  17099. + "")
  17100. +
  17101. +(define_insn "*zero_extendqisi2_insn"
  17102. + [(set (match_operand:SI 0 "register_operand" "=r,r")
  17103. + (zero_extend:SI (match_operand:QI 1 "input_operand" "r,m")))]
  17104. + "GET_CODE (operands[1]) != CONST_INT"
  17105. + "@
  17106. + and\t%1, 0xff, %0
  17107. + ldub\t%1, %0"
  17108. + [(set_attr "type" "*,load")
  17109. + (set_attr "subtype" "*,regular")
  17110. + (set_attr "us3load_type" "*,3cycle")])
  17111. +
  17112. +(define_expand "zero_extendqidi2"
  17113. + [(set (match_operand:DI 0 "register_operand" "")
  17114. + (zero_extend:DI (match_operand:QI 1 "register_operand" "")))]
  17115. + "TARGET_ARCH64"
  17116. + "")
  17117. +
  17118. +(define_insn "*zero_extendqidi2_insn"
  17119. + [(set (match_operand:DI 0 "register_operand" "=r,r")
  17120. + (zero_extend:DI (match_operand:QI 1 "input_operand" "r,m")))]
  17121. + "TARGET_ARCH64 && GET_CODE (operands[1]) != CONST_INT"
  17122. + "@
  17123. + and\t%1, 0xff, %0
  17124. + ldub\t%1, %0"
  17125. + [(set_attr "type" "*,load")
  17126. + (set_attr "subtype" "*,regular")
  17127. + (set_attr "us3load_type" "*,3cycle")])
  17128. +
  17129. +(define_expand "zero_extendhidi2"
  17130. + [(set (match_operand:DI 0 "register_operand" "")
  17131. + (zero_extend:DI (match_operand:HI 1 "register_operand" "")))]
  17132. + "TARGET_ARCH64"
  17133. +{
  17134. + rtx temp = gen_reg_rtx (DImode);
  17135. + rtx shift_48 = GEN_INT (48);
  17136. + int op1_subbyte = 0;
  17137. +
  17138. + if (GET_CODE (operand1) == SUBREG)
  17139. + {
  17140. + op1_subbyte = SUBREG_BYTE (operand1);
  17141. + op1_subbyte /= GET_MODE_SIZE (DImode);
  17142. + op1_subbyte *= GET_MODE_SIZE (DImode);
  17143. + operand1 = XEXP (operand1, 0);
  17144. + }
  17145. +
  17146. + emit_insn (gen_ashldi3 (temp, gen_rtx_SUBREG (DImode, operand1, op1_subbyte),
  17147. + shift_48));
  17148. + emit_insn (gen_lshrdi3 (operand0, temp, shift_48));
  17149. + DONE;
  17150. +})
  17151. +
  17152. +(define_insn "*zero_extendhidi2_insn"
  17153. + [(set (match_operand:DI 0 "register_operand" "=r")
  17154. + (zero_extend:DI (match_operand:HI 1 "memory_operand" "m")))]
  17155. + "TARGET_ARCH64"
  17156. + "lduh\t%1, %0"
  17157. + [(set_attr "type" "load")
  17158. + (set_attr "subtype" "regular")
  17159. + (set_attr "us3load_type" "3cycle")])
  17160. +
  17161. +;; ??? Write truncdisi pattern using sra?
  17162. +
  17163. +(define_expand "zero_extendsidi2"
  17164. + [(set (match_operand:DI 0 "register_operand" "")
  17165. + (zero_extend:DI (match_operand:SI 1 "register_operand" "")))]
  17166. + ""
  17167. + "")
  17168. +
  17169. +(define_insn "*zero_extendsidi2_insn_sp64"
  17170. + [(set (match_operand:DI 0 "register_operand" "=r,r,r")
  17171. + (zero_extend:DI (match_operand:SI 1 "input_operand" "r,m,*f")))]
  17172. + "TARGET_ARCH64
  17173. + && GET_CODE (operands[1]) != CONST_INT"
  17174. + "@
  17175. + srl\t%1, 0, %0
  17176. + lduw\t%1, %0
  17177. + movstouw\t%1, %0"
  17178. + [(set_attr "type" "shift,load,vismv")
  17179. + (set_attr "subtype" "*,regular,movstouw")
  17180. + (set_attr "cpu_feature" "*,*,vis3")])
  17181. +
  17182. +(define_insn_and_split "*zero_extendsidi2_insn_sp32"
  17183. + [(set (match_operand:DI 0 "register_operand" "=r")
  17184. + (zero_extend:DI (match_operand:SI 1 "register_operand" "r")))]
  17185. + "TARGET_ARCH32"
  17186. + "#"
  17187. + "&& reload_completed"
  17188. + [(set (match_dup 2) (match_dup 1))
  17189. + (set (match_dup 3) (const_int 0))]
  17190. + "operands[2] = gen_lowpart (SImode, operands[0]);
  17191. + operands[3] = gen_highpart (SImode, operands[0]);"
  17192. + [(set_attr "length" "2")])
  17193. +
  17194. +;; Simplify comparisons of extended values.
  17195. +
  17196. +(define_insn "*cmp_zero_extendqisi2"
  17197. + [(set (reg:CC CC_REG)
  17198. + (compare:CC (zero_extend:SI (match_operand:QI 0 "register_operand" "r"))
  17199. + (const_int 0)))]
  17200. + ""
  17201. + "andcc\t%0, 0xff, %%g0"
  17202. + [(set_attr "type" "compare")])
  17203. +
  17204. +(define_insn "*cmp_zero_qi"
  17205. + [(set (reg:CC CC_REG)
  17206. + (compare:CC (match_operand:QI 0 "register_operand" "r")
  17207. + (const_int 0)))]
  17208. + ""
  17209. + "andcc\t%0, 0xff, %%g0"
  17210. + [(set_attr "type" "compare")])
  17211. +
  17212. +(define_insn "*cmp_zero_extendqisi2_set"
  17213. + [(set (reg:CC CC_REG)
  17214. + (compare:CC (zero_extend:SI (match_operand:QI 1 "register_operand" "r"))
  17215. + (const_int 0)))
  17216. + (set (match_operand:SI 0 "register_operand" "=r")
  17217. + (zero_extend:SI (match_dup 1)))]
  17218. + ""
  17219. + "andcc\t%1, 0xff, %0"
  17220. + [(set_attr "type" "compare")])
  17221. +
  17222. +(define_insn "*cmp_zero_extendqisi2_andcc_set"
  17223. + [(set (reg:CC CC_REG)
  17224. + (compare:CC (and:SI (match_operand:SI 1 "register_operand" "r")
  17225. + (const_int 255))
  17226. + (const_int 0)))
  17227. + (set (match_operand:SI 0 "register_operand" "=r")
  17228. + (zero_extend:SI (subreg:QI (match_dup 1) 0)))]
  17229. + ""
  17230. + "andcc\t%1, 0xff, %0"
  17231. + [(set_attr "type" "compare")])
  17232. +
  17233. +(define_insn "*cmp_zero_extendqidi2"
  17234. + [(set (reg:CCX CC_REG)
  17235. + (compare:CCX (zero_extend:DI (match_operand:QI 0 "register_operand" "r"))
  17236. + (const_int 0)))]
  17237. + "TARGET_ARCH64"
  17238. + "andcc\t%0, 0xff, %%g0"
  17239. + [(set_attr "type" "compare")])
  17240. +
  17241. +(define_insn "*cmp_zero_qi_sp64"
  17242. + [(set (reg:CCX CC_REG)
  17243. + (compare:CCX (match_operand:QI 0 "register_operand" "r")
  17244. + (const_int 0)))]
  17245. + "TARGET_ARCH64"
  17246. + "andcc\t%0, 0xff, %%g0"
  17247. + [(set_attr "type" "compare")])
  17248. +
  17249. +(define_insn "*cmp_zero_extendqidi2_set"
  17250. + [(set (reg:CCX CC_REG)
  17251. + (compare:CCX (zero_extend:DI (match_operand:QI 1 "register_operand" "r"))
  17252. + (const_int 0)))
  17253. + (set (match_operand:DI 0 "register_operand" "=r")
  17254. + (zero_extend:DI (match_dup 1)))]
  17255. + "TARGET_ARCH64"
  17256. + "andcc\t%1, 0xff, %0"
  17257. + [(set_attr "type" "compare")])
  17258. +
  17259. +(define_insn "*cmp_zero_extendqidi2_andcc_set"
  17260. + [(set (reg:CCX CC_REG)
  17261. + (compare:CCX (and:DI (match_operand:DI 1 "register_operand" "r")
  17262. + (const_int 255))
  17263. + (const_int 0)))
  17264. + (set (match_operand:DI 0 "register_operand" "=r")
  17265. + (zero_extend:DI (subreg:QI (match_dup 1) 0)))]
  17266. + "TARGET_ARCH64"
  17267. + "andcc\t%1, 0xff, %0"
  17268. + [(set_attr "type" "compare")])
  17269. +
  17270. +;; Similarly, handle {SI,DI}->QI mode truncation followed by a compare.
  17271. +
  17272. +(define_insn "*cmp_siqi_trunc"
  17273. + [(set (reg:CC CC_REG)
  17274. + (compare:CC (subreg:QI (match_operand:SI 0 "register_operand" "r") 3)
  17275. + (const_int 0)))]
  17276. + ""
  17277. + "andcc\t%0, 0xff, %%g0"
  17278. + [(set_attr "type" "compare")])
  17279. +
  17280. +(define_insn "*cmp_siqi_trunc_set"
  17281. + [(set (reg:CC CC_REG)
  17282. + (compare:CC (subreg:QI (match_operand:SI 1 "register_operand" "r") 3)
  17283. + (const_int 0)))
  17284. + (set (match_operand:QI 0 "register_operand" "=r")
  17285. + (subreg:QI (match_dup 1) 3))]
  17286. + ""
  17287. + "andcc\t%1, 0xff, %0"
  17288. + [(set_attr "type" "compare")])
  17289. +
  17290. +(define_insn "*cmp_diqi_trunc"
  17291. + [(set (reg:CC CC_REG)
  17292. + (compare:CC (subreg:QI (match_operand:DI 0 "register_operand" "r") 7)
  17293. + (const_int 0)))]
  17294. + "TARGET_ARCH64"
  17295. + "andcc\t%0, 0xff, %%g0"
  17296. + [(set_attr "type" "compare")])
  17297. +
  17298. +(define_insn "*cmp_diqi_trunc_set"
  17299. + [(set (reg:CC CC_REG)
  17300. + (compare:CC (subreg:QI (match_operand:DI 1 "register_operand" "r") 7)
  17301. + (const_int 0)))
  17302. + (set (match_operand:QI 0 "register_operand" "=r")
  17303. + (subreg:QI (match_dup 1) 7))]
  17304. + "TARGET_ARCH64"
  17305. + "andcc\t%1, 0xff, %0"
  17306. + [(set_attr "type" "compare")])
  17307. +
  17308. +
  17309. +;; Sign-extension instructions
  17310. +
  17311. +;; These patterns originally accepted general_operands, however, slightly
  17312. +;; better code is generated by only accepting register_operands, and then
  17313. +;; letting combine generate the lds[hb] insns.
  17314. +
  17315. +(define_expand "extendhisi2"
  17316. + [(set (match_operand:SI 0 "register_operand" "")
  17317. + (sign_extend:SI (match_operand:HI 1 "register_operand" "")))]
  17318. + ""
  17319. +{
  17320. + rtx temp = gen_reg_rtx (SImode);
  17321. + rtx shift_16 = GEN_INT (16);
  17322. + int op1_subbyte = 0;
  17323. +
  17324. + if (GET_CODE (operand1) == SUBREG)
  17325. + {
  17326. + op1_subbyte = SUBREG_BYTE (operand1);
  17327. + op1_subbyte /= GET_MODE_SIZE (SImode);
  17328. + op1_subbyte *= GET_MODE_SIZE (SImode);
  17329. + operand1 = XEXP (operand1, 0);
  17330. + }
  17331. +
  17332. + emit_insn (gen_ashlsi3 (temp, gen_rtx_SUBREG (SImode, operand1, op1_subbyte),
  17333. + shift_16));
  17334. + emit_insn (gen_ashrsi3 (operand0, temp, shift_16));
  17335. + DONE;
  17336. +})
  17337. +
  17338. +(define_insn "*sign_extendhisi2_insn"
  17339. + [(set (match_operand:SI 0 "register_operand" "=r")
  17340. + (sign_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
  17341. + ""
  17342. + "ldsh\t%1, %0"
  17343. + [(set_attr "type" "sload")
  17344. + (set_attr "us3load_type" "3cycle")])
  17345. +
  17346. +(define_expand "extendqihi2"
  17347. + [(set (match_operand:HI 0 "register_operand" "")
  17348. + (sign_extend:HI (match_operand:QI 1 "register_operand" "")))]
  17349. + ""
  17350. +{
  17351. + rtx temp = gen_reg_rtx (SImode);
  17352. + rtx shift_24 = GEN_INT (24);
  17353. + int op1_subbyte = 0;
  17354. + int op0_subbyte = 0;
  17355. +
  17356. + if (GET_CODE (operand1) == SUBREG)
  17357. + {
  17358. + op1_subbyte = SUBREG_BYTE (operand1);
  17359. + op1_subbyte /= GET_MODE_SIZE (SImode);
  17360. + op1_subbyte *= GET_MODE_SIZE (SImode);
  17361. + operand1 = XEXP (operand1, 0);
  17362. + }
  17363. + if (GET_CODE (operand0) == SUBREG)
  17364. + {
  17365. + op0_subbyte = SUBREG_BYTE (operand0);
  17366. + op0_subbyte /= GET_MODE_SIZE (SImode);
  17367. + op0_subbyte *= GET_MODE_SIZE (SImode);
  17368. + operand0 = XEXP (operand0, 0);
  17369. + }
  17370. + emit_insn (gen_ashlsi3 (temp, gen_rtx_SUBREG (SImode, operand1, op1_subbyte),
  17371. + shift_24));
  17372. + if (GET_MODE (operand0) != SImode)
  17373. + operand0 = gen_rtx_SUBREG (SImode, operand0, op0_subbyte);
  17374. + emit_insn (gen_ashrsi3 (operand0, temp, shift_24));
  17375. + DONE;
  17376. +})
  17377. +
  17378. +(define_insn "*sign_extendqihi2_insn"
  17379. + [(set (match_operand:HI 0 "register_operand" "=r")
  17380. + (sign_extend:HI (match_operand:QI 1 "memory_operand" "m")))]
  17381. + ""
  17382. + "ldsb\t%1, %0"
  17383. + [(set_attr "type" "sload")
  17384. + (set_attr "us3load_type" "3cycle")])
  17385. +
  17386. +(define_expand "extendqisi2"
  17387. + [(set (match_operand:SI 0 "register_operand" "")
  17388. + (sign_extend:SI (match_operand:QI 1 "register_operand" "")))]
  17389. + ""
  17390. +{
  17391. + rtx temp = gen_reg_rtx (SImode);
  17392. + rtx shift_24 = GEN_INT (24);
  17393. + int op1_subbyte = 0;
  17394. +
  17395. + if (GET_CODE (operand1) == SUBREG)
  17396. + {
  17397. + op1_subbyte = SUBREG_BYTE (operand1);
  17398. + op1_subbyte /= GET_MODE_SIZE (SImode);
  17399. + op1_subbyte *= GET_MODE_SIZE (SImode);
  17400. + operand1 = XEXP (operand1, 0);
  17401. + }
  17402. +
  17403. + emit_insn (gen_ashlsi3 (temp, gen_rtx_SUBREG (SImode, operand1, op1_subbyte),
  17404. + shift_24));
  17405. + emit_insn (gen_ashrsi3 (operand0, temp, shift_24));
  17406. + DONE;
  17407. +})
  17408. +
  17409. +(define_insn "*sign_extendqisi2_insn"
  17410. + [(set (match_operand:SI 0 "register_operand" "=r")
  17411. + (sign_extend:SI (match_operand:QI 1 "memory_operand" "m")))]
  17412. + ""
  17413. + "ldsb\t%1, %0"
  17414. + [(set_attr "type" "sload")
  17415. + (set_attr "us3load_type" "3cycle")])
  17416. +
  17417. +(define_expand "extendqidi2"
  17418. + [(set (match_operand:DI 0 "register_operand" "")
  17419. + (sign_extend:DI (match_operand:QI 1 "register_operand" "")))]
  17420. + "TARGET_ARCH64"
  17421. +{
  17422. + rtx temp = gen_reg_rtx (DImode);
  17423. + rtx shift_56 = GEN_INT (56);
  17424. + int op1_subbyte = 0;
  17425. +
  17426. + if (GET_CODE (operand1) == SUBREG)
  17427. + {
  17428. + op1_subbyte = SUBREG_BYTE (operand1);
  17429. + op1_subbyte /= GET_MODE_SIZE (DImode);
  17430. + op1_subbyte *= GET_MODE_SIZE (DImode);
  17431. + operand1 = XEXP (operand1, 0);
  17432. + }
  17433. +
  17434. + emit_insn (gen_ashldi3 (temp, gen_rtx_SUBREG (DImode, operand1, op1_subbyte),
  17435. + shift_56));
  17436. + emit_insn (gen_ashrdi3 (operand0, temp, shift_56));
  17437. + DONE;
  17438. +})
  17439. +
  17440. +(define_insn "*sign_extendqidi2_insn"
  17441. + [(set (match_operand:DI 0 "register_operand" "=r")
  17442. + (sign_extend:DI (match_operand:QI 1 "memory_operand" "m")))]
  17443. + "TARGET_ARCH64"
  17444. + "ldsb\t%1, %0"
  17445. + [(set_attr "type" "sload")
  17446. + (set_attr "us3load_type" "3cycle")])
  17447. +
  17448. +(define_expand "extendhidi2"
  17449. + [(set (match_operand:DI 0 "register_operand" "")
  17450. + (sign_extend:DI (match_operand:HI 1 "register_operand" "")))]
  17451. + "TARGET_ARCH64"
  17452. +{
  17453. + rtx temp = gen_reg_rtx (DImode);
  17454. + rtx shift_48 = GEN_INT (48);
  17455. + int op1_subbyte = 0;
  17456. +
  17457. + if (GET_CODE (operand1) == SUBREG)
  17458. + {
  17459. + op1_subbyte = SUBREG_BYTE (operand1);
  17460. + op1_subbyte /= GET_MODE_SIZE (DImode);
  17461. + op1_subbyte *= GET_MODE_SIZE (DImode);
  17462. + operand1 = XEXP (operand1, 0);
  17463. + }
  17464. +
  17465. + emit_insn (gen_ashldi3 (temp, gen_rtx_SUBREG (DImode, operand1, op1_subbyte),
  17466. + shift_48));
  17467. + emit_insn (gen_ashrdi3 (operand0, temp, shift_48));
  17468. + DONE;
  17469. +})
  17470. +
  17471. +(define_insn "*sign_extendhidi2_insn"
  17472. + [(set (match_operand:DI 0 "register_operand" "=r")
  17473. + (sign_extend:DI (match_operand:HI 1 "memory_operand" "m")))]
  17474. + "TARGET_ARCH64"
  17475. + "ldsh\t%1, %0"
  17476. + [(set_attr "type" "sload")
  17477. + (set_attr "us3load_type" "3cycle")])
  17478. +
  17479. +(define_expand "extendsidi2"
  17480. + [(set (match_operand:DI 0 "register_operand" "")
  17481. + (sign_extend:DI (match_operand:SI 1 "register_operand" "")))]
  17482. + "TARGET_ARCH64"
  17483. + "")
  17484. +
  17485. +(define_insn "*sign_extendsidi2_insn"
  17486. + [(set (match_operand:DI 0 "register_operand" "=r,r,r")
  17487. + (sign_extend:DI (match_operand:SI 1 "input_operand" "r,m,*f")))]
  17488. + "TARGET_ARCH64"
  17489. + "@
  17490. + sra\t%1, 0, %0
  17491. + ldsw\t%1, %0
  17492. + movstosw\t%1, %0"
  17493. + [(set_attr "type" "shift,sload,vismv")
  17494. + (set_attr "us3load_type" "*,3cycle,*")
  17495. + (set_attr "cpu_feature" "*,*,vis3")])
  17496. +
  17497. +
  17498. +;; Special pattern for optimizing bit-field compares. This is needed
  17499. +;; because combine uses this as a canonical form.
  17500. +
  17501. +(define_insn "*cmp_zero_extract"
  17502. + [(set (reg:CC CC_REG)
  17503. + (compare:CC
  17504. + (zero_extract:SI (match_operand:SI 0 "register_operand" "r")
  17505. + (match_operand:SI 1 "small_int_operand" "I")
  17506. + (match_operand:SI 2 "small_int_operand" "I"))
  17507. + (const_int 0)))]
  17508. + "INTVAL (operands[2]) > 19"
  17509. +{
  17510. + int len = INTVAL (operands[1]);
  17511. + int pos = 32 - INTVAL (operands[2]) - len;
  17512. + HOST_WIDE_INT mask = ((1 << len) - 1) << pos;
  17513. + operands[1] = GEN_INT (mask);
  17514. + return "andcc\t%0, %1, %%g0";
  17515. +}
  17516. + [(set_attr "type" "compare")])
  17517. +
  17518. +(define_insn "*cmp_zero_extract_sp64"
  17519. + [(set (reg:CCX CC_REG)
  17520. + (compare:CCX
  17521. + (zero_extract:DI (match_operand:DI 0 "register_operand" "r")
  17522. + (match_operand:SI 1 "small_int_operand" "I")
  17523. + (match_operand:SI 2 "small_int_operand" "I"))
  17524. + (const_int 0)))]
  17525. + "TARGET_ARCH64 && INTVAL (operands[2]) > 51"
  17526. +{
  17527. + int len = INTVAL (operands[1]);
  17528. + int pos = 64 - INTVAL (operands[2]) - len;
  17529. + HOST_WIDE_INT mask = (((unsigned HOST_WIDE_INT) 1 << len) - 1) << pos;
  17530. + operands[1] = GEN_INT (mask);
  17531. + return "andcc\t%0, %1, %%g0";
  17532. +}
  17533. + [(set_attr "type" "compare")])
  17534. +
  17535. +
  17536. +;; Conversions between float, double and long double.
  17537. +
  17538. +(define_insn "extendsfdf2"
  17539. + [(set (match_operand:DF 0 "register_operand" "=e")
  17540. + (float_extend:DF (match_operand:SF 1 "register_operand" "f")))]
  17541. + "TARGET_FPU"
  17542. + "fstod\t%1, %0"
  17543. + [(set_attr "type" "fp")
  17544. + (set_attr "fptype" "double")])
  17545. +
  17546. +(define_expand "extendsftf2"
  17547. + [(set (match_operand:TF 0 "nonimmediate_operand" "")
  17548. + (float_extend:TF (match_operand:SF 1 "register_operand" "")))]
  17549. + "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
  17550. + "emit_tfmode_cvt (FLOAT_EXTEND, operands); DONE;")
  17551. +
  17552. +(define_insn "*extendsftf2_hq"
  17553. + [(set (match_operand:TF 0 "register_operand" "=e")
  17554. + (float_extend:TF (match_operand:SF 1 "register_operand" "f")))]
  17555. + "TARGET_FPU && TARGET_HARD_QUAD"
  17556. + "fstoq\t%1, %0"
  17557. + [(set_attr "type" "fp")])
  17558. +
  17559. +(define_expand "extenddftf2"
  17560. + [(set (match_operand:TF 0 "nonimmediate_operand" "")
  17561. + (float_extend:TF (match_operand:DF 1 "register_operand" "")))]
  17562. + "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
  17563. + "emit_tfmode_cvt (FLOAT_EXTEND, operands); DONE;")
  17564. +
  17565. +(define_insn "*extenddftf2_hq"
  17566. + [(set (match_operand:TF 0 "register_operand" "=e")
  17567. + (float_extend:TF (match_operand:DF 1 "register_operand" "e")))]
  17568. + "TARGET_FPU && TARGET_HARD_QUAD"
  17569. + "fdtoq\t%1, %0"
  17570. + [(set_attr "type" "fp")])
  17571. +
  17572. +(define_insn "truncdfsf2"
  17573. + [(set (match_operand:SF 0 "register_operand" "=f")
  17574. + (float_truncate:SF (match_operand:DF 1 "register_operand" "e")))]
  17575. + "TARGET_FPU"
  17576. + "fdtos\t%1, %0"
  17577. + [(set_attr "type" "fp")
  17578. + (set_attr "fptype" "double")
  17579. + (set_attr "fptype_ut699" "single")])
  17580. +
  17581. +(define_expand "trunctfsf2"
  17582. + [(set (match_operand:SF 0 "register_operand" "")
  17583. + (float_truncate:SF (match_operand:TF 1 "general_operand" "")))]
  17584. + "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
  17585. + "emit_tfmode_cvt (FLOAT_TRUNCATE, operands); DONE;")
  17586. +
  17587. +(define_insn "*trunctfsf2_hq"
  17588. + [(set (match_operand:SF 0 "register_operand" "=f")
  17589. + (float_truncate:SF (match_operand:TF 1 "register_operand" "e")))]
  17590. + "TARGET_FPU && TARGET_HARD_QUAD"
  17591. + "fqtos\t%1, %0"
  17592. + [(set_attr "type" "fp")])
  17593. +
  17594. +(define_expand "trunctfdf2"
  17595. + [(set (match_operand:DF 0 "register_operand" "")
  17596. + (float_truncate:DF (match_operand:TF 1 "general_operand" "")))]
  17597. + "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
  17598. + "emit_tfmode_cvt (FLOAT_TRUNCATE, operands); DONE;")
  17599. +
  17600. +(define_insn "*trunctfdf2_hq"
  17601. + [(set (match_operand:DF 0 "register_operand" "=e")
  17602. + (float_truncate:DF (match_operand:TF 1 "register_operand" "e")))]
  17603. + "TARGET_FPU && TARGET_HARD_QUAD"
  17604. + "fqtod\t%1, %0"
  17605. + [(set_attr "type" "fp")])
  17606. +
  17607. +
  17608. +;; Conversion between fixed point and floating point.
  17609. +
  17610. +(define_insn "floatsisf2"
  17611. + [(set (match_operand:SF 0 "register_operand" "=f")
  17612. + (float:SF (match_operand:SI 1 "register_operand" "f")))]
  17613. + "TARGET_FPU"
  17614. + "fitos\t%1, %0"
  17615. + [(set_attr "type" "fp")
  17616. + (set_attr "fptype" "single")])
  17617. +
  17618. +(define_insn "floatsidf2"
  17619. + [(set (match_operand:DF 0 "register_operand" "=e")
  17620. + (float:DF (match_operand:SI 1 "register_operand" "f")))]
  17621. + "TARGET_FPU"
  17622. + "fitod\t%1, %0"
  17623. + [(set_attr "type" "fp")
  17624. + (set_attr "fptype" "double")])
  17625. +
  17626. +(define_expand "floatsitf2"
  17627. + [(set (match_operand:TF 0 "nonimmediate_operand" "")
  17628. + (float:TF (match_operand:SI 1 "register_operand" "")))]
  17629. + "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
  17630. + "emit_tfmode_cvt (FLOAT, operands); DONE;")
  17631. +
  17632. +(define_insn "*floatsitf2_hq"
  17633. + [(set (match_operand:TF 0 "register_operand" "=e")
  17634. + (float:TF (match_operand:SI 1 "register_operand" "f")))]
  17635. + "TARGET_FPU && TARGET_HARD_QUAD"
  17636. + "fitoq\t%1, %0"
  17637. + [(set_attr "type" "fp")])
  17638. +
  17639. +(define_expand "floatunssitf2"
  17640. + [(set (match_operand:TF 0 "nonimmediate_operand" "")
  17641. + (unsigned_float:TF (match_operand:SI 1 "register_operand" "")))]
  17642. + "TARGET_FPU && TARGET_ARCH64 && !TARGET_HARD_QUAD"
  17643. + "emit_tfmode_cvt (UNSIGNED_FLOAT, operands); DONE;")
  17644. +
  17645. +;; Now the same for 64 bit sources.
  17646. +
  17647. +(define_insn "floatdisf2"
  17648. + [(set (match_operand:SF 0 "register_operand" "=f")
  17649. + (float:SF (match_operand:DI 1 "register_operand" "e")))]
  17650. + "TARGET_V9 && TARGET_FPU"
  17651. + "fxtos\t%1, %0"
  17652. + [(set_attr "type" "fp")
  17653. + (set_attr "fptype" "double")])
  17654. +
  17655. +(define_expand "floatunsdisf2"
  17656. + [(use (match_operand:SF 0 "register_operand" ""))
  17657. + (use (match_operand:DI 1 "general_operand" ""))]
  17658. + "TARGET_ARCH64 && TARGET_FPU"
  17659. + "sparc_emit_floatunsdi (operands, SFmode); DONE;")
  17660. +
  17661. +(define_insn "floatdidf2"
  17662. + [(set (match_operand:DF 0 "register_operand" "=e")
  17663. + (float:DF (match_operand:DI 1 "register_operand" "e")))]
  17664. + "TARGET_V9 && TARGET_FPU"
  17665. + "fxtod\t%1, %0"
  17666. + [(set_attr "type" "fp")
  17667. + (set_attr "fptype" "double")])
  17668. +
  17669. +(define_expand "floatunsdidf2"
  17670. + [(use (match_operand:DF 0 "register_operand" ""))
  17671. + (use (match_operand:DI 1 "general_operand" ""))]
  17672. + "TARGET_ARCH64 && TARGET_FPU"
  17673. + "sparc_emit_floatunsdi (operands, DFmode); DONE;")
  17674. +
  17675. +(define_expand "floatditf2"
  17676. + [(set (match_operand:TF 0 "nonimmediate_operand" "")
  17677. + (float:TF (match_operand:DI 1 "register_operand" "")))]
  17678. + "TARGET_FPU && TARGET_V9 && (TARGET_HARD_QUAD || TARGET_ARCH64)"
  17679. + "emit_tfmode_cvt (FLOAT, operands); DONE;")
  17680. +
  17681. +(define_insn "*floatditf2_hq"
  17682. + [(set (match_operand:TF 0 "register_operand" "=e")
  17683. + (float:TF (match_operand:DI 1 "register_operand" "e")))]
  17684. + "TARGET_V9 && TARGET_FPU && TARGET_HARD_QUAD"
  17685. + "fxtoq\t%1, %0"
  17686. + [(set_attr "type" "fp")])
  17687. +
  17688. +(define_expand "floatunsditf2"
  17689. + [(set (match_operand:TF 0 "nonimmediate_operand" "")
  17690. + (unsigned_float:TF (match_operand:DI 1 "register_operand" "")))]
  17691. + "TARGET_FPU && TARGET_ARCH64 && !TARGET_HARD_QUAD"
  17692. + "emit_tfmode_cvt (UNSIGNED_FLOAT, operands); DONE;")
  17693. +
  17694. +;; Convert a float to an actual integer.
  17695. +;; Truncation is performed as part of the conversion.
  17696. +
  17697. +(define_insn "fix_truncsfsi2"
  17698. + [(set (match_operand:SI 0 "register_operand" "=f")
  17699. + (fix:SI (fix:SF (match_operand:SF 1 "register_operand" "f"))))]
  17700. + "TARGET_FPU"
  17701. + "fstoi\t%1, %0"
  17702. + [(set_attr "type" "fp")
  17703. + (set_attr "fptype" "single")])
  17704. +
  17705. +(define_insn "fix_truncdfsi2"
  17706. + [(set (match_operand:SI 0 "register_operand" "=f")
  17707. + (fix:SI (fix:DF (match_operand:DF 1 "register_operand" "e"))))]
  17708. + "TARGET_FPU"
  17709. + "fdtoi\t%1, %0"
  17710. + [(set_attr "type" "fp")
  17711. + (set_attr "fptype" "double")
  17712. + (set_attr "fptype_ut699" "single")])
  17713. +
  17714. +(define_expand "fix_trunctfsi2"
  17715. + [(set (match_operand:SI 0 "register_operand" "")
  17716. + (fix:SI (match_operand:TF 1 "general_operand" "")))]
  17717. + "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
  17718. + "emit_tfmode_cvt (FIX, operands); DONE;")
  17719. +
  17720. +(define_insn "*fix_trunctfsi2_hq"
  17721. + [(set (match_operand:SI 0 "register_operand" "=f")
  17722. + (fix:SI (match_operand:TF 1 "register_operand" "e")))]
  17723. + "TARGET_FPU && TARGET_HARD_QUAD"
  17724. + "fqtoi\t%1, %0"
  17725. + [(set_attr "type" "fp")])
  17726. +
  17727. +(define_expand "fixuns_trunctfsi2"
  17728. + [(set (match_operand:SI 0 "register_operand" "")
  17729. + (unsigned_fix:SI (match_operand:TF 1 "general_operand" "")))]
  17730. + "TARGET_FPU && TARGET_ARCH64 && !TARGET_HARD_QUAD"
  17731. + "emit_tfmode_cvt (UNSIGNED_FIX, operands); DONE;")
  17732. +
  17733. +;; Now the same, for V9 targets
  17734. +
  17735. +(define_insn "fix_truncsfdi2"
  17736. + [(set (match_operand:DI 0 "register_operand" "=e")
  17737. + (fix:DI (fix:SF (match_operand:SF 1 "register_operand" "f"))))]
  17738. + "TARGET_V9 && TARGET_FPU"
  17739. + "fstox\t%1, %0"
  17740. + [(set_attr "type" "fp")
  17741. + (set_attr "fptype" "double")])
  17742. +
  17743. +(define_expand "fixuns_truncsfdi2"
  17744. + [(use (match_operand:DI 0 "register_operand" ""))
  17745. + (use (match_operand:SF 1 "general_operand" ""))]
  17746. + "TARGET_ARCH64 && TARGET_FPU"
  17747. + "sparc_emit_fixunsdi (operands, SFmode); DONE;")
  17748. +
  17749. +(define_insn "fix_truncdfdi2"
  17750. + [(set (match_operand:DI 0 "register_operand" "=e")
  17751. + (fix:DI (fix:DF (match_operand:DF 1 "register_operand" "e"))))]
  17752. + "TARGET_V9 && TARGET_FPU"
  17753. + "fdtox\t%1, %0"
  17754. + [(set_attr "type" "fp")
  17755. + (set_attr "fptype" "double")])
  17756. +
  17757. +(define_expand "fixuns_truncdfdi2"
  17758. + [(use (match_operand:DI 0 "register_operand" ""))
  17759. + (use (match_operand:DF 1 "general_operand" ""))]
  17760. + "TARGET_ARCH64 && TARGET_FPU"
  17761. + "sparc_emit_fixunsdi (operands, DFmode); DONE;")
  17762. +
  17763. +(define_expand "fix_trunctfdi2"
  17764. + [(set (match_operand:DI 0 "register_operand" "")
  17765. + (fix:DI (match_operand:TF 1 "general_operand" "")))]
  17766. + "TARGET_V9 && TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
  17767. + "emit_tfmode_cvt (FIX, operands); DONE;")
  17768. +
  17769. +(define_insn "*fix_trunctfdi2_hq"
  17770. + [(set (match_operand:DI 0 "register_operand" "=e")
  17771. + (fix:DI (match_operand:TF 1 "register_operand" "e")))]
  17772. + "TARGET_V9 && TARGET_FPU && TARGET_HARD_QUAD"
  17773. + "fqtox\t%1, %0"
  17774. + [(set_attr "type" "fp")])
  17775. +
  17776. +(define_expand "fixuns_trunctfdi2"
  17777. + [(set (match_operand:DI 0 "register_operand" "")
  17778. + (unsigned_fix:DI (match_operand:TF 1 "general_operand" "")))]
  17779. + "TARGET_FPU && TARGET_ARCH64 && !TARGET_HARD_QUAD"
  17780. + "emit_tfmode_cvt (UNSIGNED_FIX, operands); DONE;")
  17781. +
  17782. +
  17783. +;; Integer addition/subtraction instructions.
  17784. +
  17785. +(define_expand "adddi3"
  17786. + [(set (match_operand:DI 0 "register_operand" "")
  17787. + (plus:DI (match_operand:DI 1 "register_operand" "")
  17788. + (match_operand:DI 2 "arith_double_add_operand" "")))]
  17789. + ""
  17790. +{
  17791. + if (TARGET_ARCH32)
  17792. + {
  17793. + emit_insn (gen_adddi3_sp32 (operands[0], operands[1], operands[2]));
  17794. + DONE;
  17795. + }
  17796. +})
  17797. +
  17798. +;; Turning an add/sub instruction into the other changes the Carry flag
  17799. +;; so the 4096 trick cannot be used for operations in CCXCmode.
  17800. +
  17801. +(define_expand "uaddvdi4"
  17802. + [(parallel [(set (reg:CCXC CC_REG)
  17803. + (compare:CCXC (plus:DI (match_operand:DI 1 "register_operand")
  17804. + (match_operand:DI 2 "arith_double_operand"))
  17805. + (match_dup 1)))
  17806. + (set (match_operand:DI 0 "register_operand")
  17807. + (plus:DI (match_dup 1) (match_dup 2)))])
  17808. + (set (pc) (if_then_else (ltu (reg:CCXC CC_REG) (const_int 0))
  17809. + (label_ref (match_operand 3))
  17810. + (pc)))]
  17811. + ""
  17812. +{
  17813. + if (TARGET_ARCH32)
  17814. + {
  17815. + emit_insn (gen_uaddvdi4_sp32 (operands[0], operands[1], operands[2]));
  17816. + rtx x = gen_rtx_LTU (VOIDmode, gen_rtx_REG (CCCmode, SPARC_ICC_REG),
  17817. + const0_rtx);
  17818. + emit_jump_insn (gen_cbranchcc4 (x, XEXP (x, 0), XEXP (x, 1), operands[3]));
  17819. + DONE;
  17820. + }
  17821. +})
  17822. +
  17823. +;; Turning an add/sub instruction into the other does not change the Overflow
  17824. +;; flag so the 4096 trick can be used for operations in CCXVmode.
  17825. +
  17826. +(define_expand "addvdi4"
  17827. + [(parallel [(set (reg:CCXV CC_REG)
  17828. + (compare:CCXV (plus:DI (match_operand:DI 1 "register_operand")
  17829. + (match_operand:DI 2 "arith_double_add_operand"))
  17830. + (unspec:DI [(match_dup 1) (match_dup 2)]
  17831. + UNSPEC_ADDV)))
  17832. + (set (match_operand:DI 0 "register_operand")
  17833. + (plus:DI (match_dup 1) (match_dup 2)))])
  17834. + (set (pc) (if_then_else (ne (reg:CCXV CC_REG) (const_int 0))
  17835. + (label_ref (match_operand 3))
  17836. + (pc)))]
  17837. + ""
  17838. +{
  17839. + if (TARGET_ARCH32)
  17840. + {
  17841. + emit_insn (gen_addvdi4_sp32 (operands[0], operands[1], operands[2]));
  17842. + rtx x = gen_rtx_NE (VOIDmode, gen_rtx_REG (CCVmode, SPARC_ICC_REG),
  17843. + const0_rtx);
  17844. + emit_jump_insn (gen_cbranchcc4 (x, XEXP (x, 0), XEXP (x, 1), operands[3]));
  17845. + DONE;
  17846. + }
  17847. +})
  17848. +
  17849. +(define_insn_and_split "adddi3_sp32"
  17850. + [(set (match_operand:DI 0 "register_operand" "=&r")
  17851. + (plus:DI (match_operand:DI 1 "register_operand" "%r")
  17852. + (match_operand:DI 2 "arith_double_operand" "rHI")))
  17853. + (clobber (reg:CC CC_REG))]
  17854. + "TARGET_ARCH32"
  17855. + "#"
  17856. + "&& reload_completed"
  17857. + [(parallel [(set (reg:CCC CC_REG)
  17858. + (compare:CCC (plus:SI (match_dup 4) (match_dup 5))
  17859. + (match_dup 4)))
  17860. + (set (match_dup 3)
  17861. + (plus:SI (match_dup 4) (match_dup 5)))])
  17862. + (set (match_dup 6)
  17863. + (plus:SI (plus:SI (match_dup 7) (match_dup 8))
  17864. + (ltu:SI (reg:CCC CC_REG) (const_int 0))))]
  17865. +{
  17866. + operands[3] = gen_lowpart (SImode, operands[0]);
  17867. + operands[4] = gen_lowpart (SImode, operands[1]);
  17868. + operands[5] = gen_lowpart (SImode, operands[2]);
  17869. + operands[6] = gen_highpart (SImode, operands[0]);
  17870. + operands[7] = gen_highpart_mode (SImode, DImode, operands[1]);
  17871. + operands[8] = gen_highpart_mode (SImode, DImode, operands[2]);
  17872. +}
  17873. + [(set_attr "length" "2")])
  17874. +
  17875. +(define_insn_and_split "uaddvdi4_sp32"
  17876. + [(set (reg:CCC CC_REG)
  17877. + (compare:CCC (plus:DI (match_operand:DI 1 "register_operand" "%r")
  17878. + (match_operand:DI 2 "arith_double_operand" "rHI"))
  17879. + (match_dup 1)))
  17880. + (set (match_operand:DI 0 "register_operand" "=&r")
  17881. + (plus:DI (match_dup 1) (match_dup 2)))]
  17882. + "TARGET_ARCH32"
  17883. + "#"
  17884. + "&& reload_completed"
  17885. + [(parallel [(set (reg:CCC CC_REG)
  17886. + (compare:CCC (plus:SI (match_dup 4) (match_dup 5))
  17887. + (match_dup 4)))
  17888. + (set (match_dup 3)
  17889. + (plus:SI (match_dup 4) (match_dup 5)))])
  17890. + (parallel [(set (reg:CCC CC_REG)
  17891. + (compare:CCC (zero_extend:DI
  17892. + (plus:SI (plus:SI (match_dup 7) (match_dup 8))
  17893. + (ltu:SI (reg:CCC CC_REG)
  17894. + (const_int 0))))
  17895. + (plus:DI (plus:DI (zero_extend:DI (match_dup 7))
  17896. + (zero_extend:DI (match_dup 8)))
  17897. + (ltu:DI (reg:CCC CC_REG)
  17898. + (const_int 0)))))
  17899. + (set (match_dup 6)
  17900. + (plus:SI (plus:SI (match_dup 7) (match_dup 8))
  17901. + (ltu:SI (reg:CCC CC_REG)
  17902. + (const_int 0))))])]
  17903. +{
  17904. + operands[3] = gen_lowpart (SImode, operands[0]);
  17905. + operands[4] = gen_lowpart (SImode, operands[1]);
  17906. + operands[5] = gen_lowpart (SImode, operands[2]);
  17907. + operands[6] = gen_highpart (SImode, operands[0]);
  17908. + operands[7] = gen_highpart_mode (SImode, DImode, operands[1]);
  17909. + operands[8] = gen_highpart_mode (SImode, DImode, operands[2]);
  17910. +}
  17911. + [(set_attr "length" "2")])
  17912. +
  17913. +(define_insn_and_split "addvdi4_sp32"
  17914. + [(set (reg:CCV CC_REG)
  17915. + (compare:CCV (plus:DI (match_operand:DI 1 "register_operand" "%r")
  17916. + (match_operand:DI 2 "arith_double_operand" "rHI"))
  17917. + (unspec:DI [(match_dup 1) (match_dup 2)] UNSPEC_ADDV)))
  17918. + (set (match_operand:DI 0 "register_operand" "=&r")
  17919. + (plus:DI (match_dup 1) (match_dup 2)))]
  17920. + "TARGET_ARCH32"
  17921. + "#"
  17922. + "&& reload_completed"
  17923. + [(parallel [(set (reg:CCC CC_REG)
  17924. + (compare:CCC (plus:SI (match_dup 4) (match_dup 5))
  17925. + (match_dup 4)))
  17926. + (set (match_dup 3)
  17927. + (plus:SI (match_dup 4) (match_dup 5)))])
  17928. + (parallel [(set (reg:CCV CC_REG)
  17929. + (compare:CCV (plus:SI (plus:SI (match_dup 7) (match_dup 8))
  17930. + (ltu:SI (reg:CCC CC_REG)
  17931. + (const_int 0)))
  17932. + (unspec:SI [(plus:SI (match_dup 7) (match_dup 8))
  17933. + (ltu:SI (reg:CCC CC_REG)
  17934. + (const_int 0))]
  17935. + UNSPEC_ADDV)))
  17936. + (set (match_dup 6)
  17937. + (plus:SI (plus:SI (match_dup 7) (match_dup 8))
  17938. + (ltu:SI (reg:CCC CC_REG) (const_int 0))))])]
  17939. +{
  17940. + operands[3] = gen_lowpart (SImode, operands[0]);
  17941. + operands[4] = gen_lowpart (SImode, operands[1]);
  17942. + operands[5] = gen_lowpart (SImode, operands[2]);
  17943. + operands[6] = gen_highpart (SImode, operands[0]);
  17944. + operands[7] = gen_highpart_mode (SImode, DImode, operands[1]);
  17945. + operands[8] = gen_highpart_mode (SImode, DImode, operands[2]);
  17946. +}
  17947. + [(set_attr "length" "2")])
  17948. +
  17949. +(define_insn_and_split "*addx_extend_sp32"
  17950. + [(set (match_operand:DI 0 "register_operand" "=r")
  17951. + (zero_extend:DI (plus:SI (plus:SI
  17952. + (match_operand:SI 1 "register_operand" "%r")
  17953. + (match_operand:SI 2 "arith_operand" "rI"))
  17954. + (ltu:SI (reg:CCC CC_REG) (const_int 0)))))]
  17955. + "TARGET_ARCH32"
  17956. + "#"
  17957. + "&& reload_completed"
  17958. + [(set (match_dup 3) (plus:SI (plus:SI (match_dup 1) (match_dup 2))
  17959. + (ltu:SI (reg:CCC CC_REG) (const_int 0))))
  17960. + (set (match_dup 4) (const_int 0))]
  17961. + "operands[3] = gen_lowpart (SImode, operands[0]);
  17962. + operands[4] = gen_highpart (SImode, operands[0]);"
  17963. + [(set_attr "length" "2")])
  17964. +
  17965. +(define_insn_and_split "*adddi3_extend_sp32"
  17966. + [(set (match_operand:DI 0 "register_operand" "=&r")
  17967. + (plus:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
  17968. + (match_operand:DI 2 "register_operand" "r")))
  17969. + (clobber (reg:CC CC_REG))]
  17970. + "TARGET_ARCH32"
  17971. + "#"
  17972. + "&& reload_completed"
  17973. + [(parallel [(set (reg:CCC CC_REG)
  17974. + (compare:CCC (plus:SI (match_dup 3) (match_dup 1))
  17975. + (match_dup 3)))
  17976. + (set (match_dup 5) (plus:SI (match_dup 3) (match_dup 1)))])
  17977. + (set (match_dup 6)
  17978. + (plus:SI (plus:SI (match_dup 4) (const_int 0))
  17979. + (ltu:SI (reg:CCC CC_REG) (const_int 0))))]
  17980. + "operands[3] = gen_lowpart (SImode, operands[2]);
  17981. + operands[4] = gen_highpart (SImode, operands[2]);
  17982. + operands[5] = gen_lowpart (SImode, operands[0]);
  17983. + operands[6] = gen_highpart (SImode, operands[0]);"
  17984. + [(set_attr "length" "2")])
  17985. +
  17986. +(define_insn "*adddi3_sp64"
  17987. + [(set (match_operand:DI 0 "register_operand" "=r,r")
  17988. + (plus:DI (match_operand:DI 1 "register_operand" "%r,r")
  17989. + (match_operand:DI 2 "arith_add_operand" "rI,O")))]
  17990. + "TARGET_ARCH64"
  17991. + "@
  17992. + add\t%1, %2, %0
  17993. + sub\t%1, -%2, %0")
  17994. +
  17995. +(define_insn "addsi3"
  17996. + [(set (match_operand:SI 0 "register_operand" "=r,r")
  17997. + (plus:SI (match_operand:SI 1 "register_operand" "%r,r")
  17998. + (match_operand:SI 2 "arith_add_operand" "rI,O")))]
  17999. + ""
  18000. + "@
  18001. + add\t%1, %2, %0
  18002. + sub\t%1, -%2, %0")
  18003. +
  18004. +;; Turning an add/sub instruction into the other changes the Carry flag
  18005. +;; so the 4096 trick cannot be used for operations in CCCmode.
  18006. +
  18007. +(define_expand "uaddvsi4"
  18008. + [(parallel [(set (reg:CCC CC_REG)
  18009. + (compare:CCC (plus:SI (match_operand:SI 1 "register_operand")
  18010. + (match_operand:SI 2 "arith_operand"))
  18011. + (match_dup 1)))
  18012. + (set (match_operand:SI 0 "register_operand")
  18013. + (plus:SI (match_dup 1) (match_dup 2)))])
  18014. + (set (pc) (if_then_else (ltu (reg:CCC CC_REG) (const_int 0))
  18015. + (label_ref (match_operand 3))
  18016. + (pc)))]
  18017. + "")
  18018. +
  18019. +;; Turning an add/sub instruction into the other does not change the Overflow
  18020. +;; flag so the 4096 trick can be used for operations in CCVmode.
  18021. +
  18022. +(define_expand "addvsi4"
  18023. + [(parallel [(set (reg:CCV CC_REG)
  18024. + (compare:CCV (plus:SI (match_operand:SI 1 "register_operand")
  18025. + (match_operand:SI 2 "arith_add_operand"))
  18026. + (unspec:SI [(match_dup 1) (match_dup 2)]
  18027. + UNSPEC_ADDV)))
  18028. + (set (match_operand:SI 0 "register_operand")
  18029. + (plus:SI (match_dup 1) (match_dup 2)))])
  18030. + (set (pc) (if_then_else (ne (reg:CCV CC_REG) (const_int 0))
  18031. + (label_ref (match_operand 3))
  18032. + (pc)))]
  18033. + "")
  18034. +
  18035. +(define_insn "*cmp_ccnz_plus"
  18036. + [(set (reg:CCNZ CC_REG)
  18037. + (compare:CCNZ (plus:SI (match_operand:SI 0 "register_operand" "%r")
  18038. + (match_operand:SI 1 "arith_operand" "rI"))
  18039. + (const_int 0)))]
  18040. + ""
  18041. + "addcc\t%0, %1, %%g0"
  18042. + [(set_attr "type" "compare")])
  18043. +
  18044. +(define_insn "*cmp_ccxnz_plus"
  18045. + [(set (reg:CCXNZ CC_REG)
  18046. + (compare:CCXNZ (plus:DI (match_operand:DI 0 "register_operand" "%r")
  18047. + (match_operand:DI 1 "arith_operand" "rI"))
  18048. + (const_int 0)))]
  18049. + "TARGET_ARCH64"
  18050. + "addcc\t%0, %1, %%g0"
  18051. + [(set_attr "type" "compare")])
  18052. +
  18053. +(define_insn "*cmp_ccnz_plus_set"
  18054. + [(set (reg:CCNZ CC_REG)
  18055. + (compare:CCNZ (plus:SI (match_operand:SI 1 "register_operand" "%r")
  18056. + (match_operand:SI 2 "arith_operand" "rI"))
  18057. + (const_int 0)))
  18058. + (set (match_operand:SI 0 "register_operand" "=r")
  18059. + (plus:SI (match_dup 1) (match_dup 2)))]
  18060. + ""
  18061. + "addcc\t%1, %2, %0"
  18062. + [(set_attr "type" "compare")])
  18063. +
  18064. +(define_insn "*cmp_ccxnz_plus_set"
  18065. + [(set (reg:CCXNZ CC_REG)
  18066. + (compare:CCXNZ (plus:DI (match_operand:DI 1 "register_operand" "%r")
  18067. + (match_operand:DI 2 "arith_operand" "rI"))
  18068. + (const_int 0)))
  18069. + (set (match_operand:DI 0 "register_operand" "=r")
  18070. + (plus:DI (match_dup 1) (match_dup 2)))]
  18071. + "TARGET_ARCH64"
  18072. + "addcc\t%1, %2, %0"
  18073. + [(set_attr "type" "compare")])
  18074. +
  18075. +(define_insn "*cmp_ccc_plus"
  18076. + [(set (reg:CCC CC_REG)
  18077. + (compare:CCC (plus:SI (match_operand:SI 0 "register_operand" "%r")
  18078. + (match_operand:SI 1 "arith_operand" "rI"))
  18079. + (match_dup 0)))]
  18080. + ""
  18081. + "addcc\t%0, %1, %%g0"
  18082. + [(set_attr "type" "compare")])
  18083. +
  18084. +(define_insn "*cmp_ccxc_plus"
  18085. + [(set (reg:CCXC CC_REG)
  18086. + (compare:CCXC (plus:DI (match_operand:DI 0 "register_operand" "%r")
  18087. + (match_operand:DI 1 "arith_operand" "rI"))
  18088. + (match_dup 0)))]
  18089. + "TARGET_ARCH64"
  18090. + "addcc\t%0, %1, %%g0"
  18091. + [(set_attr "type" "compare")])
  18092. +
  18093. +(define_insn "*cmp_ccc_plus_set"
  18094. + [(set (reg:CCC CC_REG)
  18095. + (compare:CCC (plus:SI (match_operand:SI 1 "register_operand" "%r")
  18096. + (match_operand:SI 2 "arith_operand" "rI"))
  18097. + (match_dup 1)))
  18098. + (set (match_operand:SI 0 "register_operand" "=r")
  18099. + (plus:SI (match_dup 1) (match_dup 2)))]
  18100. + ""
  18101. + "addcc\t%1, %2, %0"
  18102. + [(set_attr "type" "compare")])
  18103. +
  18104. +(define_insn "*cmp_ccxc_plus_set"
  18105. + [(set (reg:CCXC CC_REG)
  18106. + (compare:CCXC (plus:DI (match_operand:DI 1 "register_operand" "%r")
  18107. + (match_operand:DI 2 "arith_operand" "rI"))
  18108. + (match_dup 1)))
  18109. + (set (match_operand:DI 0 "register_operand" "=r")
  18110. + (plus:DI (match_dup 1) (match_dup 2)))]
  18111. + "TARGET_ARCH64"
  18112. + "addcc\t%1, %2, %0"
  18113. + [(set_attr "type" "compare")])
  18114. +
  18115. +(define_insn "*cmp_ccc_plus_sltu_set"
  18116. + [(set (reg:CCC CC_REG)
  18117. + (compare:CCC (zero_extend:DI
  18118. + (plus:SI
  18119. + (plus:SI (match_operand:SI 1 "register_operand" "%r")
  18120. + (match_operand:SI 2 "arith_operand" "rI"))
  18121. + (ltu:SI (reg:CCC CC_REG) (const_int 0))))
  18122. + (plus:DI (plus:DI (zero_extend:DI (match_dup 1))
  18123. + (zero_extend:DI (match_dup 2)))
  18124. + (ltu:DI (reg:CCC CC_REG) (const_int 0)))))
  18125. + (set (match_operand:SI 0 "register_operand" "=r")
  18126. + (plus:SI (plus:SI (match_dup 1) (match_dup 2))
  18127. + (ltu:SI (reg:CCC CC_REG) (const_int 0))))]
  18128. + ""
  18129. + "addxcc\t%1, %2, %0"
  18130. + [(set_attr "type" "compare")])
  18131. +
  18132. +(define_insn "*cmp_ccv_plus"
  18133. + [(set (reg:CCV CC_REG)
  18134. + (compare:CCV (plus:SI (match_operand:SI 0 "register_operand" "%r,r")
  18135. + (match_operand:SI 1 "arith_add_operand" "rI,O"))
  18136. + (unspec:SI [(match_dup 0) (match_dup 1)] UNSPEC_ADDV)))]
  18137. + ""
  18138. + "@
  18139. + addcc\t%0, %1, %%g0
  18140. + subcc\t%0, -%1, %%g0"
  18141. + [(set_attr "type" "compare")])
  18142. +
  18143. +(define_insn "*cmp_ccxv_plus"
  18144. + [(set (reg:CCXV CC_REG)
  18145. + (compare:CCXV (plus:DI (match_operand:DI 0 "register_operand" "%r,r")
  18146. + (match_operand:DI 1 "arith_add_operand" "rI,O"))
  18147. + (unspec:DI [(match_dup 0) (match_dup 1)] UNSPEC_ADDV)))]
  18148. + "TARGET_ARCH64"
  18149. + "@
  18150. + addcc\t%0, %1, %%g0
  18151. + subcc\t%0, -%1, %%g0"
  18152. + [(set_attr "type" "compare")])
  18153. +
  18154. +(define_insn "*cmp_ccv_plus_set"
  18155. + [(set (reg:CCV CC_REG)
  18156. + (compare:CCV (plus:SI (match_operand:SI 1 "register_operand" "%r,r")
  18157. + (match_operand:SI 2 "arith_add_operand" "rI,O"))
  18158. + (unspec:SI [(match_dup 1) (match_dup 2)] UNSPEC_ADDV)))
  18159. + (set (match_operand:SI 0 "register_operand" "=r,r")
  18160. + (plus:SI (match_dup 1) (match_dup 2)))]
  18161. + ""
  18162. + "@
  18163. + addcc\t%1, %2, %0
  18164. + subcc\t%1, -%2, %0"
  18165. + [(set_attr "type" "compare")])
  18166. +
  18167. +(define_insn "*cmp_ccxv_plus_set"
  18168. + [(set (reg:CCXV CC_REG)
  18169. + (compare:CCXV (plus:DI (match_operand:DI 1 "register_operand" "%r,r")
  18170. + (match_operand:DI 2 "arith_add_operand" "rI,O"))
  18171. + (unspec:DI [(match_dup 1) (match_dup 2)] UNSPEC_ADDV)))
  18172. + (set (match_operand:DI 0 "register_operand" "=r,r")
  18173. + (plus:DI (match_dup 1) (match_dup 2)))]
  18174. + "TARGET_ARCH64"
  18175. + "@
  18176. + addcc\t%1, %2, %0
  18177. + subcc\t%1, -%2, %0"
  18178. + [(set_attr "type" "compare")])
  18179. +
  18180. +(define_insn "*cmp_ccv_plus_sltu_set"
  18181. + [(set (reg:CCV CC_REG)
  18182. + (compare:CCV (plus:SI (plus:SI (match_operand:SI 1 "register_operand" "%r")
  18183. + (match_operand:SI 2 "arith_operand" "rI"))
  18184. + (ltu:SI (reg:CCC CC_REG) (const_int 0)))
  18185. + (unspec:SI [(plus:SI (match_dup 1) (match_dup 2))
  18186. + (ltu:SI (reg:CCC CC_REG) (const_int 0))]
  18187. + UNSPEC_ADDV)))
  18188. + (set (match_operand:SI 0 "register_operand" "=r")
  18189. + (plus:SI (plus:SI (match_dup 1) (match_dup 2))
  18190. + (ltu:SI (reg:CCC CC_REG) (const_int 0))))]
  18191. + ""
  18192. + "addxcc\t%1, %2, %0"
  18193. + [(set_attr "type" "compare")])
  18194. +
  18195. +
  18196. +(define_expand "subdi3"
  18197. + [(set (match_operand:DI 0 "register_operand" "")
  18198. + (minus:DI (match_operand:DI 1 "register_operand" "")
  18199. + (match_operand:DI 2 "arith_double_add_operand" "")))]
  18200. + ""
  18201. +{
  18202. + if (TARGET_ARCH32)
  18203. + {
  18204. + emit_insn (gen_subdi3_sp32 (operands[0], operands[1], operands[2]));
  18205. + DONE;
  18206. + }
  18207. +})
  18208. +
  18209. +;; Turning an add/sub instruction into the other changes the Carry flag
  18210. +;; so the 4096 trick cannot be used for operations in CCXmode.
  18211. +
  18212. +(define_expand "usubvdi4"
  18213. + [(parallel [(set (reg:CCX CC_REG)
  18214. + (compare:CCX (match_operand:DI 1 "register_or_zero_operand")
  18215. + (match_operand:DI 2 "arith_double_operand")))
  18216. + (set (match_operand:DI 0 "register_operand")
  18217. + (minus:DI (match_dup 1) (match_dup 2)))])
  18218. + (set (pc) (if_then_else (ltu (reg:CCX CC_REG) (const_int 0))
  18219. + (label_ref (match_operand 3))
  18220. + (pc)))]
  18221. + ""
  18222. +{
  18223. + if (operands[1] == const0_rtx)
  18224. + {
  18225. + emit_insn (gen_unegvdi3 (operands[0], operands[2], operands[3]));
  18226. + DONE;
  18227. + }
  18228. +
  18229. + if (TARGET_ARCH32)
  18230. + {
  18231. + emit_insn (gen_usubvdi4_sp32 (operands[0], operands[1], operands[2]));
  18232. + rtx x = gen_rtx_LTU (VOIDmode, gen_rtx_REG (CCCmode, SPARC_ICC_REG),
  18233. + const0_rtx);
  18234. + emit_jump_insn (gen_cbranchcc4 (x, XEXP (x, 0), XEXP (x, 1), operands[3]));
  18235. + DONE;
  18236. + }
  18237. +})
  18238. +
  18239. +;; Turning an add/sub instruction into the other does not change the Overflow
  18240. +;; flag so the 4096 trick can be used for operations in CCXVmode.
  18241. +
  18242. +(define_expand "subvdi4"
  18243. + [(parallel [(set (reg:CCXV CC_REG)
  18244. + (compare:CCXV (minus:DI (match_operand:DI 1 "register_operand")
  18245. + (match_operand:DI 2 "arith_double_add_operand"))
  18246. + (unspec:DI [(match_dup 1) (match_dup 2)]
  18247. + UNSPEC_SUBV)))
  18248. + (set (match_operand:DI 0 "register_operand")
  18249. + (minus:DI (match_dup 1) (match_dup 2)))])
  18250. + (set (pc) (if_then_else (ne (reg:CCXV CC_REG) (const_int 0))
  18251. + (label_ref (match_operand 3))
  18252. + (pc)))]
  18253. + ""
  18254. +{
  18255. + if (TARGET_ARCH32)
  18256. + {
  18257. + emit_insn (gen_subvdi4_sp32 (operands[0], operands[1], operands[2]));
  18258. + rtx x = gen_rtx_NE (VOIDmode, gen_rtx_REG (CCVmode, SPARC_ICC_REG),
  18259. + const0_rtx);
  18260. + emit_jump_insn (gen_cbranchcc4 (x, XEXP (x, 0), XEXP (x, 1), operands[3]));
  18261. + DONE;
  18262. + }
  18263. +})
  18264. +
  18265. +(define_insn_and_split "subdi3_sp32"
  18266. + [(set (match_operand:DI 0 "register_operand" "=&r")
  18267. + (minus:DI (match_operand:DI 1 "register_operand" "r")
  18268. + (match_operand:DI 2 "arith_double_operand" "rHI")))
  18269. + (clobber (reg:CC CC_REG))]
  18270. + "TARGET_ARCH32"
  18271. + "#"
  18272. + "&& reload_completed"
  18273. + [(parallel [(set (reg:CC CC_REG)
  18274. + (compare:CC (match_dup 4) (match_dup 5)))
  18275. + (set (match_dup 3)
  18276. + (minus:SI (match_dup 4) (match_dup 5)))])
  18277. + (set (match_dup 6)
  18278. + (minus:SI (minus:SI (match_dup 7) (match_dup 8))
  18279. + (ltu:SI (reg:CC CC_REG) (const_int 0))))]
  18280. +{
  18281. + operands[3] = gen_lowpart (SImode, operands[0]);
  18282. + operands[4] = gen_lowpart (SImode, operands[1]);
  18283. + operands[5] = gen_lowpart (SImode, operands[2]);
  18284. + operands[6] = gen_highpart (SImode, operands[0]);
  18285. + operands[7] = gen_highpart (SImode, operands[1]);
  18286. + operands[8] = gen_highpart_mode (SImode, DImode, operands[2]);
  18287. +}
  18288. + [(set_attr "length" "2")])
  18289. +
  18290. +(define_insn_and_split "usubvdi4_sp32"
  18291. + [(set (reg:CCC CC_REG)
  18292. + (compare:CCC (match_operand:DI 1 "register_operand" "r")
  18293. + (match_operand:DI 2 "arith_double_operand" "rHI")))
  18294. + (set (match_operand:DI 0 "register_operand" "=&r")
  18295. + (minus:DI (match_dup 1) (match_dup 2)))]
  18296. + "TARGET_ARCH32"
  18297. + "#"
  18298. + "&& reload_completed"
  18299. + [(parallel [(set (reg:CC CC_REG)
  18300. + (compare:CC (match_dup 4) (match_dup 5)))
  18301. + (set (match_dup 3)
  18302. + (minus:SI (match_dup 4) (match_dup 5)))])
  18303. + (parallel [(set (reg:CCC CC_REG)
  18304. + (compare:CCC (zero_extend:DI
  18305. + (minus:SI (minus:SI (match_dup 7)
  18306. + (ltu:SI (reg:CC CC_REG)
  18307. + (const_int 0)))
  18308. + (match_dup 8)))
  18309. + (minus:DI
  18310. + (minus:DI (zero_extend:DI (match_dup 7))
  18311. + (ltu:DI (reg:CC CC_REG)
  18312. + (const_int 0)))
  18313. + (zero_extend:DI (match_dup 8)))))
  18314. + (set (match_dup 6)
  18315. + (minus:SI (minus:SI (match_dup 7)
  18316. + (ltu:SI (reg:CC CC_REG)
  18317. + (const_int 0)))
  18318. + (match_dup 8)))])]
  18319. +{
  18320. + operands[3] = gen_lowpart (SImode, operands[0]);
  18321. + operands[4] = gen_lowpart (SImode, operands[1]);
  18322. + operands[5] = gen_lowpart (SImode, operands[2]);
  18323. + operands[6] = gen_highpart (SImode, operands[0]);
  18324. + operands[7] = gen_highpart_mode (SImode, DImode, operands[1]);
  18325. + operands[8] = gen_highpart_mode (SImode, DImode, operands[2]);
  18326. +}
  18327. + [(set_attr "length" "2")])
  18328. +
  18329. +(define_insn_and_split "subvdi4_sp32"
  18330. + [(set (reg:CCV CC_REG)
  18331. + (compare:CCV (minus:DI (match_operand:DI 1 "register_operand" "%r")
  18332. + (match_operand:DI 2 "arith_double_operand" "rHI"))
  18333. + (unspec:DI [(match_dup 1) (match_dup 2)] UNSPEC_SUBV)))
  18334. + (set (match_operand:DI 0 "register_operand" "=&r")
  18335. + (minus:DI (match_dup 1) (match_dup 2)))]
  18336. + "TARGET_ARCH32"
  18337. + "#"
  18338. + "&& reload_completed"
  18339. + [(parallel [(set (reg:CC CC_REG)
  18340. + (compare:CC (match_dup 4) (match_dup 5)))
  18341. + (set (match_dup 3)
  18342. + (minus:SI (match_dup 4) (match_dup 5)))])
  18343. + (parallel [(set (reg:CCV CC_REG)
  18344. + (compare:CCV (minus:SI (minus:SI (match_dup 7) (match_dup 8))
  18345. + (ltu:SI (reg:CC CC_REG)
  18346. + (const_int 0)))
  18347. + (unspec:SI [(minus:SI (match_dup 7) (match_dup 8))
  18348. + (ltu:SI (reg:CC CC_REG)
  18349. + (const_int 0))]
  18350. + UNSPEC_SUBV)))
  18351. + (set (match_dup 6)
  18352. + (minus:SI (minus:SI (match_dup 7) (match_dup 8))
  18353. + (ltu:SI (reg:CC CC_REG) (const_int 0))))])]
  18354. +{
  18355. + operands[3] = gen_lowpart (SImode, operands[0]);
  18356. + operands[4] = gen_lowpart (SImode, operands[1]);
  18357. + operands[5] = gen_lowpart (SImode, operands[2]);
  18358. + operands[6] = gen_highpart (SImode, operands[0]);
  18359. + operands[7] = gen_highpart_mode (SImode, DImode, operands[1]);
  18360. + operands[8] = gen_highpart_mode (SImode, DImode, operands[2]);
  18361. +}
  18362. + [(set_attr "length" "2")])
  18363. +
  18364. +(define_insn_and_split "*subx_extend_sp32"
  18365. + [(set (match_operand:DI 0 "register_operand" "=r")
  18366. + (zero_extend:DI (minus:SI (minus:SI
  18367. + (match_operand:SI 1 "register_or_zero_operand" "rJ")
  18368. + (match_operand:SI 2 "arith_operand" "rI"))
  18369. + (ltu:SI (reg:CCC CC_REG) (const_int 0)))))]
  18370. + "TARGET_ARCH32"
  18371. + "#"
  18372. + "&& reload_completed"
  18373. + [(set (match_dup 3) (minus:SI (minus:SI (match_dup 1) (match_dup 2))
  18374. + (ltu:SI (reg:CCC CC_REG) (const_int 0))))
  18375. + (set (match_dup 4) (const_int 0))]
  18376. + "operands[3] = gen_lowpart (SImode, operands[0]);
  18377. + operands[4] = gen_highpart (SImode, operands[0]);"
  18378. + [(set_attr "length" "2")])
  18379. +
  18380. +(define_insn_and_split "*subdi3_extend_sp32"
  18381. + [(set (match_operand:DI 0 "register_operand" "=&r")
  18382. + (minus:DI (match_operand:DI 1 "register_operand" "r")
  18383. + (zero_extend:DI (match_operand:SI 2 "register_operand" "r"))))
  18384. + (clobber (reg:CC CC_REG))]
  18385. + "TARGET_ARCH32"
  18386. + "#"
  18387. + "&& reload_completed"
  18388. + [(parallel [(set (reg:CC CC_REG)
  18389. + (compare:CC (match_dup 3) (match_dup 2)))
  18390. + (set (match_dup 5) (minus:SI (match_dup 3) (match_dup 2)))])
  18391. + (set (match_dup 6)
  18392. + (minus:SI (minus:SI (match_dup 4) (const_int 0))
  18393. + (ltu:SI (reg:CC CC_REG) (const_int 0))))]
  18394. + "operands[3] = gen_lowpart (SImode, operands[1]);
  18395. + operands[4] = gen_highpart (SImode, operands[1]);
  18396. + operands[5] = gen_lowpart (SImode, operands[0]);
  18397. + operands[6] = gen_highpart (SImode, operands[0]);"
  18398. + [(set_attr "length" "2")])
  18399. +
  18400. +(define_insn "*subdi3_sp64"
  18401. + [(set (match_operand:DI 0 "register_operand" "=r,r")
  18402. + (minus:DI (match_operand:DI 1 "register_operand" "r,r")
  18403. + (match_operand:DI 2 "arith_add_operand" "rI,O")))]
  18404. + "TARGET_ARCH64"
  18405. + "@
  18406. + sub\t%1, %2, %0
  18407. + add\t%1, -%2, %0")
  18408. +
  18409. +(define_insn "subsi3"
  18410. + [(set (match_operand:SI 0 "register_operand" "=r,r")
  18411. + (minus:SI (match_operand:SI 1 "register_operand" "r,r")
  18412. + (match_operand:SI 2 "arith_add_operand" "rI,O")))]
  18413. + ""
  18414. + "@
  18415. + sub\t%1, %2, %0
  18416. + add\t%1, -%2, %0")
  18417. +
  18418. +;; Turning an add/sub instruction into the other changes the Carry flag
  18419. +;; so the 4096 trick cannot be used for operations in CCmode.
  18420. +
  18421. +(define_expand "usubvsi4"
  18422. + [(parallel [(set (reg:CC CC_REG)
  18423. + (compare:CC (match_operand:SI 1 "register_or_zero_operand")
  18424. + (match_operand:SI 2 "arith_operand")))
  18425. + (set (match_operand:SI 0 "register_operand")
  18426. + (minus:SI (match_dup 1) (match_dup 2)))])
  18427. + (set (pc) (if_then_else (ltu (reg:CC CC_REG) (const_int 0))
  18428. + (label_ref (match_operand 3))
  18429. + (pc)))]
  18430. + ""
  18431. +{
  18432. + if (operands[1] == const0_rtx)
  18433. + {
  18434. + emit_insn (gen_unegvsi3 (operands[0], operands[2], operands[3]));
  18435. + DONE;
  18436. + }
  18437. +})
  18438. +
  18439. +;; Turning an add/sub instruction into the other does not change the Overflow
  18440. +;; flag so the 4096 trick can be used for operations in CCVmode.
  18441. +
  18442. +(define_expand "subvsi4"
  18443. + [(parallel [(set (reg:CCV CC_REG)
  18444. + (compare:CCV (minus:SI (match_operand:SI 1 "register_operand")
  18445. + (match_operand:SI 2 "arith_add_operand"))
  18446. + (unspec:SI [(match_dup 1) (match_dup 2)]
  18447. + UNSPEC_SUBV)))
  18448. + (set (match_operand:SI 0 "register_operand")
  18449. + (minus:SI (match_dup 1) (match_dup 2)))])
  18450. + (set (pc) (if_then_else (ne (reg:CCV CC_REG) (const_int 0))
  18451. + (label_ref (match_operand 3))
  18452. + (pc)))]
  18453. + "")
  18454. +
  18455. +(define_insn "*cmp_ccnz_minus"
  18456. + [(set (reg:CCNZ CC_REG)
  18457. + (compare:CCNZ (minus:SI (match_operand:SI 0 "register_or_zero_operand" "rJ")
  18458. + (match_operand:SI 1 "arith_operand" "rI"))
  18459. + (const_int 0)))]
  18460. + ""
  18461. + "subcc\t%r0, %1, %%g0"
  18462. + [(set_attr "type" "compare")])
  18463. +
  18464. +(define_insn "*cmp_ccxnz_minus"
  18465. + [(set (reg:CCXNZ CC_REG)
  18466. + (compare:CCXNZ (minus:DI (match_operand:DI 0 "register_or_zero_operand" "rJ")
  18467. + (match_operand:DI 1 "arith_operand" "rI"))
  18468. + (const_int 0)))]
  18469. + "TARGET_ARCH64"
  18470. + "subcc\t%r0, %1, %%g0"
  18471. + [(set_attr "type" "compare")])
  18472. +
  18473. +(define_insn "*cmp_ccnz_minus_set"
  18474. + [(set (reg:CCNZ CC_REG)
  18475. + (compare:CCNZ (minus:SI (match_operand:SI 1 "register_or_zero_operand" "rJ")
  18476. + (match_operand:SI 2 "arith_operand" "rI"))
  18477. + (const_int 0)))
  18478. + (set (match_operand:SI 0 "register_operand" "=r")
  18479. + (minus:SI (match_dup 1) (match_dup 2)))]
  18480. + ""
  18481. + "subcc\t%r1, %2, %0"
  18482. + [(set_attr "type" "compare")])
  18483. +
  18484. +(define_insn "*cmp_ccxnz_minus_set"
  18485. + [(set (reg:CCXNZ CC_REG)
  18486. + (compare:CCXNZ (minus:DI (match_operand:DI 1 "register_or_zero_operand" "rJ")
  18487. + (match_operand:DI 2 "arith_operand" "rI"))
  18488. + (const_int 0)))
  18489. + (set (match_operand:DI 0 "register_operand" "=r")
  18490. + (minus:DI (match_dup 1) (match_dup 2)))]
  18491. + "TARGET_ARCH64"
  18492. + "subcc\t%r1, %2, %0"
  18493. + [(set_attr "type" "compare")])
  18494. +
  18495. +(define_insn "*cmpsi_set"
  18496. + [(set (reg:CC CC_REG)
  18497. + (compare:CC (match_operand:SI 1 "register_or_zero_operand" "rJ")
  18498. + (match_operand:SI 2 "arith_operand" "rI")))
  18499. + (set (match_operand:SI 0 "register_operand" "=r")
  18500. + (minus:SI (match_dup 1) (match_dup 2)))]
  18501. + ""
  18502. + "subcc\t%r1, %2, %0"
  18503. + [(set_attr "type" "compare")])
  18504. +
  18505. +(define_insn "*cmpdi_set"
  18506. + [(set (reg:CCX CC_REG)
  18507. + (compare:CCX (match_operand:DI 1 "register_or_zero_operand" "rJ")
  18508. + (match_operand:DI 2 "arith_operand" "rI")))
  18509. + (set (match_operand:DI 0 "register_operand" "=r")
  18510. + (minus:DI (match_dup 1) (match_dup 2)))]
  18511. + "TARGET_ARCH64"
  18512. + "subcc\t%r1, %2, %0"
  18513. + [(set_attr "type" "compare")])
  18514. +
  18515. +(define_insn "*cmp_ccc_minus_sltu_set"
  18516. + [(set (reg:CCC CC_REG)
  18517. + (compare:CCC (zero_extend:DI
  18518. + (minus:SI
  18519. + (minus:SI
  18520. + (match_operand:SI 1 "register_or_zero_operand" "rJ")
  18521. + (ltu:SI (reg:CC CC_REG) (const_int 0)))
  18522. + (match_operand:SI 2 "arith_operand" "rI")))
  18523. + (minus:DI
  18524. + (minus:DI
  18525. + (zero_extend:DI (match_dup 1))
  18526. + (ltu:DI (reg:CC CC_REG) (const_int 0)))
  18527. + (zero_extend:DI (match_dup 2)))))
  18528. + (set (match_operand:SI 0 "register_operand" "=r")
  18529. + (minus:SI (minus:SI (match_dup 1)
  18530. + (ltu:SI (reg:CC CC_REG) (const_int 0)))
  18531. + (match_dup 2)))]
  18532. + ""
  18533. + "subxcc\t%r1, %2, %0"
  18534. + [(set_attr "type" "compare")])
  18535. +
  18536. +(define_insn "*cmp_ccv_minus"
  18537. + [(set (reg:CCV CC_REG)
  18538. + (compare:CCV (minus:SI (match_operand:SI 0 "register_or_zero_operand" "rJ,rJ")
  18539. + (match_operand:SI 1 "arith_add_operand" "rI,O"))
  18540. + (unspec:SI [(match_dup 0) (match_dup 1)] UNSPEC_SUBV)))]
  18541. + ""
  18542. + "@
  18543. + subcc\t%r0, %1, %%g0
  18544. + addcc\t%r0, -%1, %%g0"
  18545. + [(set_attr "type" "compare")])
  18546. +
  18547. +(define_insn "*cmp_ccxv_minus"
  18548. + [(set (reg:CCXV CC_REG)
  18549. + (compare:CCXV (minus:DI (match_operand:DI 0 "register_or_zero_operand" "rJ,rJ")
  18550. + (match_operand:DI 1 "arith_add_operand" "rI,O"))
  18551. + (unspec:DI [(match_dup 0) (match_dup 1)] UNSPEC_SUBV)))]
  18552. + "TARGET_ARCH64"
  18553. + "@
  18554. + subcc\t%r0, %1, %%g0
  18555. + addcc\t%r0, -%1, %%g0"
  18556. + [(set_attr "type" "compare")])
  18557. +
  18558. +(define_insn "*cmp_ccv_minus_set"
  18559. + [(set (reg:CCV CC_REG)
  18560. + (compare:CCV (minus:SI (match_operand:SI 1 "register_or_zero_operand" "rJ,rJ")
  18561. + (match_operand:SI 2 "arith_add_operand" "rI,O"))
  18562. + (unspec:SI [(match_dup 1) (match_dup 2)] UNSPEC_SUBV)))
  18563. + (set (match_operand:SI 0 "register_operand" "=r,r")
  18564. + (minus:SI (match_dup 1) (match_dup 2)))]
  18565. + ""
  18566. + "@
  18567. + subcc\t%r1, %2, %0
  18568. + addcc\t%r1, -%2, %0"
  18569. + [(set_attr "type" "compare")])
  18570. +
  18571. +(define_insn "*cmp_ccxv_minus_set"
  18572. + [(set (reg:CCXV CC_REG)
  18573. + (compare:CCXV (minus:DI (match_operand:DI 1 "register_or_zero_operand" "rJ,rJ")
  18574. + (match_operand:DI 2 "arith_add_operand" "rI,O"))
  18575. + (unspec:DI [(match_dup 1) (match_dup 2)] UNSPEC_SUBV)))
  18576. + (set (match_operand:DI 0 "register_operand" "=r,r")
  18577. + (minus:DI (match_dup 1) (match_dup 2)))]
  18578. + "TARGET_ARCH64"
  18579. + "@
  18580. + subcc\t%r1, %2, %0
  18581. + addcc\t%r1, -%2, %0"
  18582. + [(set_attr "type" "compare")])
  18583. +
  18584. +(define_insn "*cmp_ccv_minus_sltu_set"
  18585. + [(set (reg:CCV CC_REG)
  18586. + (compare:CCV
  18587. + (minus:SI (minus:SI (match_operand:SI 1 "register_or_zero_operand" "rJ")
  18588. + (match_operand:SI 2 "arith_operand" "rI"))
  18589. + (ltu:SI (reg:CC CC_REG) (const_int 0)))
  18590. + (unspec:SI [(minus:SI (match_dup 1) (match_dup 2))
  18591. + (ltu:SI (reg:CC CC_REG) (const_int 0))]
  18592. + UNSPEC_SUBV)))
  18593. + (set (match_operand:SI 0 "register_operand" "=r")
  18594. + (minus:SI (minus:SI (match_dup 1) (match_dup 2))
  18595. + (ltu:SI (reg:CC CC_REG) (const_int 0))))]
  18596. + ""
  18597. + "subxcc\t%1, %2, %0"
  18598. + [(set_attr "type" "compare")])
  18599. +
  18600. +
  18601. +;; Integer multiply/divide instructions.
  18602. +
  18603. +;; The 32-bit multiply/divide instructions are deprecated on v9, but at
  18604. +;; least in UltraSPARC I, II and IIi it is a win tick-wise.
  18605. +
  18606. +(define_expand "mulsi3"
  18607. + [(set (match_operand:SI 0 "register_operand" "")
  18608. + (mult:SI (match_operand:SI 1 "arith_operand" "")
  18609. + (match_operand:SI 2 "arith_operand" "")))]
  18610. + "TARGET_HARD_MUL || TARGET_ARCH64"
  18611. + "")
  18612. +
  18613. +(define_insn "*mulsi3_sp32"
  18614. + [(set (match_operand:SI 0 "register_operand" "=r")
  18615. + (mult:SI (match_operand:SI 1 "arith_operand" "%r")
  18616. + (match_operand:SI 2 "arith_operand" "rI")))]
  18617. + "TARGET_HARD_MUL"
  18618. + "smul\t%1, %2, %0"
  18619. + [(set_attr "type" "imul")])
  18620. +
  18621. +(define_insn "*mulsi3_sp64"
  18622. + [(set (match_operand:SI 0 "register_operand" "=r")
  18623. + (mult:SI (match_operand:SI 1 "arith_operand" "%r")
  18624. + (match_operand:SI 2 "arith_operand" "rI")))]
  18625. + "TARGET_ARCH64"
  18626. + "mulx\t%1, %2, %0"
  18627. + [(set_attr "type" "imul")])
  18628. +
  18629. +(define_expand "muldi3"
  18630. + [(set (match_operand:DI 0 "register_operand" "")
  18631. + (mult:DI (match_operand:DI 1 "arith_operand" "")
  18632. + (match_operand:DI 2 "arith_operand" "")))]
  18633. + "TARGET_ARCH64 || TARGET_V8PLUS"
  18634. +{
  18635. + if (TARGET_V8PLUS)
  18636. + {
  18637. + emit_insn (gen_muldi3_v8plus (operands[0], operands[1], operands[2]));
  18638. + DONE;
  18639. + }
  18640. +})
  18641. +
  18642. +(define_insn "*muldi3_sp64"
  18643. + [(set (match_operand:DI 0 "register_operand" "=r")
  18644. + (mult:DI (match_operand:DI 1 "arith_operand" "%r")
  18645. + (match_operand:DI 2 "arith_operand" "rI")))]
  18646. + "TARGET_ARCH64"
  18647. + "mulx\t%1, %2, %0"
  18648. + [(set_attr "type" "imul")])
  18649. +
  18650. +;; V8plus wide multiply.
  18651. +(define_insn "muldi3_v8plus"
  18652. + [(set (match_operand:DI 0 "register_operand" "=r,h")
  18653. + (mult:DI (match_operand:DI 1 "arith_operand" "%r,0")
  18654. + (match_operand:DI 2 "arith_operand" "rI,rI")))
  18655. + (clobber (match_scratch:SI 3 "=&h,X"))
  18656. + (clobber (match_scratch:SI 4 "=&h,X"))]
  18657. + "TARGET_V8PLUS"
  18658. +{
  18659. + return output_v8plus_mult (insn, operands, \"mulx\");
  18660. +}
  18661. + [(set_attr "type" "multi")
  18662. + (set_attr "length" "9,8")])
  18663. +
  18664. +(define_insn "*cmp_mul_set"
  18665. + [(set (reg:CC CC_REG)
  18666. + (compare:CC (mult:SI (match_operand:SI 1 "arith_operand" "%r")
  18667. + (match_operand:SI 2 "arith_operand" "rI"))
  18668. + (const_int 0)))
  18669. + (set (match_operand:SI 0 "register_operand" "=r")
  18670. + (mult:SI (match_dup 1) (match_dup 2)))]
  18671. + "TARGET_V8 || TARGET_SPARCLITE || TARGET_DEPRECATED_V8_INSNS"
  18672. + "smulcc\t%1, %2, %0"
  18673. + [(set_attr "type" "imul")])
  18674. +
  18675. +(define_expand "mulsidi3"
  18676. + [(set (match_operand:DI 0 "register_operand" "")
  18677. + (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" ""))
  18678. + (sign_extend:DI (match_operand:SI 2 "arith_operand" ""))))]
  18679. + "TARGET_HARD_MUL"
  18680. +{
  18681. + if (CONSTANT_P (operands[2]))
  18682. + {
  18683. + if (TARGET_V8PLUS)
  18684. + emit_insn (gen_const_mulsidi3_v8plus (operands[0], operands[1],
  18685. + operands[2]));
  18686. + else if (TARGET_ARCH32)
  18687. + emit_insn (gen_const_mulsidi3_sp32 (operands[0], operands[1],
  18688. + operands[2]));
  18689. + else
  18690. + emit_insn (gen_const_mulsidi3_sp64 (operands[0], operands[1],
  18691. + operands[2]));
  18692. + DONE;
  18693. + }
  18694. + if (TARGET_V8PLUS)
  18695. + {
  18696. + emit_insn (gen_mulsidi3_v8plus (operands[0], operands[1], operands[2]));
  18697. + DONE;
  18698. + }
  18699. +})
  18700. +
  18701. +;; V9 puts the 64-bit product in a 64-bit register. Only out or global
  18702. +;; registers can hold 64-bit values in the V8plus environment.
  18703. +(define_insn "mulsidi3_v8plus"
  18704. + [(set (match_operand:DI 0 "register_operand" "=h,r")
  18705. + (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r,r"))
  18706. + (sign_extend:DI (match_operand:SI 2 "register_operand" "r,r"))))
  18707. + (clobber (match_scratch:SI 3 "=X,&h"))]
  18708. + "TARGET_V8PLUS"
  18709. + "@
  18710. + smul\t%1, %2, %L0\n\tsrlx\t%L0, 32, %H0
  18711. + smul\t%1, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0"
  18712. + [(set_attr "type" "multi")
  18713. + (set_attr "length" "2,3")])
  18714. +
  18715. +(define_insn "const_mulsidi3_v8plus"
  18716. + [(set (match_operand:DI 0 "register_operand" "=h,r")
  18717. + (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r,r"))
  18718. + (match_operand:DI 2 "small_int_operand" "I,I")))
  18719. + (clobber (match_scratch:SI 3 "=X,&h"))]
  18720. + "TARGET_V8PLUS"
  18721. + "@
  18722. + smul\t%1, %2, %L0\n\tsrlx\t%L0, 32, %H0
  18723. + smul\t%1, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0"
  18724. + [(set_attr "type" "multi")
  18725. + (set_attr "length" "2,3")])
  18726. +
  18727. +(define_insn "*mulsidi3_sp32"
  18728. + [(set (match_operand:DI 0 "register_operand" "=r")
  18729. + (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
  18730. + (sign_extend:DI (match_operand:SI 2 "register_operand" "r"))))]
  18731. + "TARGET_HARD_MUL32"
  18732. +{
  18733. + return TARGET_SPARCLET
  18734. + ? "smuld\t%1, %2, %L0"
  18735. + : "smul\t%1, %2, %L0\n\trd\t%%y, %H0";
  18736. +}
  18737. + [(set (attr "type")
  18738. + (if_then_else (eq_attr "isa" "sparclet")
  18739. + (const_string "imul") (const_string "multi")))
  18740. + (set (attr "length")
  18741. + (if_then_else (eq_attr "isa" "sparclet")
  18742. + (const_int 1) (const_int 2)))])
  18743. +
  18744. +(define_insn "*mulsidi3_sp64"
  18745. + [(set (match_operand:DI 0 "register_operand" "=r")
  18746. + (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
  18747. + (sign_extend:DI (match_operand:SI 2 "register_operand" "r"))))]
  18748. + "TARGET_DEPRECATED_V8_INSNS && TARGET_ARCH64"
  18749. + "smul\t%1, %2, %0"
  18750. + [(set_attr "type" "imul")])
  18751. +
  18752. +;; Extra pattern, because sign_extend of a constant isn't valid.
  18753. +
  18754. +(define_insn "const_mulsidi3_sp32"
  18755. + [(set (match_operand:DI 0 "register_operand" "=r")
  18756. + (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
  18757. + (match_operand:DI 2 "small_int_operand" "I")))]
  18758. + "TARGET_HARD_MUL32"
  18759. +{
  18760. + return TARGET_SPARCLET
  18761. + ? "smuld\t%1, %2, %L0"
  18762. + : "smul\t%1, %2, %L0\n\trd\t%%y, %H0";
  18763. +}
  18764. + [(set (attr "type")
  18765. + (if_then_else (eq_attr "isa" "sparclet")
  18766. + (const_string "imul") (const_string "multi")))
  18767. + (set (attr "length")
  18768. + (if_then_else (eq_attr "isa" "sparclet")
  18769. + (const_int 1) (const_int 2)))])
  18770. +
  18771. +(define_insn "const_mulsidi3_sp64"
  18772. + [(set (match_operand:DI 0 "register_operand" "=r")
  18773. + (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
  18774. + (match_operand:DI 2 "small_int_operand" "I")))]
  18775. + "TARGET_DEPRECATED_V8_INSNS && TARGET_ARCH64"
  18776. + "smul\t%1, %2, %0"
  18777. + [(set_attr "type" "imul")])
  18778. +
  18779. +(define_expand "smulsi3_highpart"
  18780. + [(set (match_operand:SI 0 "register_operand" "")
  18781. + (truncate:SI
  18782. + (lshiftrt:DI
  18783. + (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" ""))
  18784. + (sign_extend:DI (match_operand:SI 2 "arith_operand" "")))
  18785. + (const_int 32))))]
  18786. + "TARGET_HARD_MUL && TARGET_ARCH32"
  18787. +{
  18788. + if (CONSTANT_P (operands[2]))
  18789. + {
  18790. + if (TARGET_V8PLUS)
  18791. + {
  18792. + emit_insn (gen_const_smulsi3_highpart_v8plus (operands[0],
  18793. + operands[1],
  18794. + operands[2],
  18795. + GEN_INT (32)));
  18796. + DONE;
  18797. + }
  18798. + emit_insn (gen_const_smulsi3_highpart (operands[0], operands[1], operands[2]));
  18799. + DONE;
  18800. + }
  18801. + if (TARGET_V8PLUS)
  18802. + {
  18803. + emit_insn (gen_smulsi3_highpart_v8plus (operands[0], operands[1],
  18804. + operands[2], GEN_INT (32)));
  18805. + DONE;
  18806. + }
  18807. +})
  18808. +
  18809. +(define_insn "smulsi3_highpart_v8plus"
  18810. + [(set (match_operand:SI 0 "register_operand" "=h,r")
  18811. + (truncate:SI
  18812. + (lshiftrt:DI
  18813. + (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r,r"))
  18814. + (sign_extend:DI (match_operand:SI 2 "register_operand" "r,r")))
  18815. + (match_operand:SI 3 "small_int_operand" "I,I"))))
  18816. + (clobber (match_scratch:SI 4 "=X,&h"))]
  18817. + "TARGET_V8PLUS"
  18818. + "@
  18819. + smul\t%1, %2, %0\;srlx\t%0, %3, %0
  18820. + smul\t%1, %2, %4\;srlx\t%4, %3, %0"
  18821. + [(set_attr "type" "multi")
  18822. + (set_attr "length" "2")])
  18823. +
  18824. +;; The combiner changes TRUNCATE in the previous pattern to SUBREG.
  18825. +(define_insn ""
  18826. + [(set (match_operand:SI 0 "register_operand" "=h,r")
  18827. + (subreg:SI
  18828. + (lshiftrt:DI
  18829. + (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r,r"))
  18830. + (sign_extend:DI (match_operand:SI 2 "register_operand" "r,r")))
  18831. + (match_operand:SI 3 "small_int_operand" "I,I")) 4))
  18832. + (clobber (match_scratch:SI 4 "=X,&h"))]
  18833. + "TARGET_V8PLUS"
  18834. + "@
  18835. + smul\t%1, %2, %0\n\tsrlx\t%0, %3, %0
  18836. + smul\t%1, %2, %4\n\tsrlx\t%4, %3, %0"
  18837. + [(set_attr "type" "multi")
  18838. + (set_attr "length" "2")])
  18839. +
  18840. +(define_insn "const_smulsi3_highpart_v8plus"
  18841. + [(set (match_operand:SI 0 "register_operand" "=h,r")
  18842. + (truncate:SI
  18843. + (lshiftrt:DI
  18844. + (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r,r"))
  18845. + (match_operand:DI 2 "small_int_operand" "I,I"))
  18846. + (match_operand:SI 3 "small_int_operand" "I,I"))))
  18847. + (clobber (match_scratch:SI 4 "=X,&h"))]
  18848. + "TARGET_V8PLUS"
  18849. + "@
  18850. + smul\t%1, %2, %0\n\tsrlx\t%0, %3, %0
  18851. + smul\t%1, %2, %4\n\tsrlx\t%4, %3, %0"
  18852. + [(set_attr "type" "multi")
  18853. + (set_attr "length" "2")])
  18854. +
  18855. +(define_insn "*smulsi3_highpart_sp32"
  18856. + [(set (match_operand:SI 0 "register_operand" "=r")
  18857. + (truncate:SI
  18858. + (lshiftrt:DI
  18859. + (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
  18860. + (sign_extend:DI (match_operand:SI 2 "register_operand" "r")))
  18861. + (const_int 32))))]
  18862. + "TARGET_HARD_MUL32"
  18863. + "smul\t%1, %2, %%g0\n\trd\t%%y, %0"
  18864. + [(set_attr "type" "multi")
  18865. + (set_attr "length" "2")])
  18866. +
  18867. +(define_insn "const_smulsi3_highpart"
  18868. + [(set (match_operand:SI 0 "register_operand" "=r")
  18869. + (truncate:SI
  18870. + (lshiftrt:DI
  18871. + (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
  18872. + (match_operand:DI 2 "small_int_operand" "i"))
  18873. + (const_int 32))))]
  18874. + "TARGET_HARD_MUL32"
  18875. + "smul\t%1, %2, %%g0\n\trd\t%%y, %0"
  18876. + [(set_attr "type" "multi")
  18877. + (set_attr "length" "2")])
  18878. +
  18879. +(define_expand "umulsidi3"
  18880. + [(set (match_operand:DI 0 "register_operand" "")
  18881. + (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" ""))
  18882. + (zero_extend:DI (match_operand:SI 2 "uns_arith_operand" ""))))]
  18883. + "TARGET_HARD_MUL"
  18884. +{
  18885. + if (CONSTANT_P (operands[2]))
  18886. + {
  18887. + if (TARGET_V8PLUS)
  18888. + emit_insn (gen_const_umulsidi3_v8plus (operands[0], operands[1],
  18889. + operands[2]));
  18890. + else if (TARGET_ARCH32)
  18891. + emit_insn (gen_const_umulsidi3_sp32 (operands[0], operands[1],
  18892. + operands[2]));
  18893. + else
  18894. + emit_insn (gen_const_umulsidi3_sp64 (operands[0], operands[1],
  18895. + operands[2]));
  18896. + DONE;
  18897. + }
  18898. + if (TARGET_V8PLUS)
  18899. + {
  18900. + emit_insn (gen_umulsidi3_v8plus (operands[0], operands[1], operands[2]));
  18901. + DONE;
  18902. + }
  18903. +})
  18904. +
  18905. +(define_insn "umulsidi3_v8plus"
  18906. + [(set (match_operand:DI 0 "register_operand" "=h,r")
  18907. + (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r,r"))
  18908. + (zero_extend:DI (match_operand:SI 2 "register_operand" "r,r"))))
  18909. + (clobber (match_scratch:SI 3 "=X,&h"))]
  18910. + "TARGET_V8PLUS"
  18911. + "@
  18912. + umul\t%1, %2, %L0\n\tsrlx\t%L0, 32, %H0
  18913. + umul\t%1, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0"
  18914. + [(set_attr "type" "multi")
  18915. + (set_attr "length" "2,3")])
  18916. +
  18917. +(define_insn "*umulsidi3_sp32"
  18918. + [(set (match_operand:DI 0 "register_operand" "=r")
  18919. + (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
  18920. + (zero_extend:DI (match_operand:SI 2 "register_operand" "r"))))]
  18921. + "TARGET_HARD_MUL32"
  18922. +{
  18923. + return TARGET_SPARCLET
  18924. + ? "umuld\t%1, %2, %L0"
  18925. + : "umul\t%1, %2, %L0\n\trd\t%%y, %H0";
  18926. +}
  18927. + [(set (attr "type")
  18928. + (if_then_else (eq_attr "isa" "sparclet")
  18929. + (const_string "imul") (const_string "multi")))
  18930. + (set (attr "length")
  18931. + (if_then_else (eq_attr "isa" "sparclet")
  18932. + (const_int 1) (const_int 2)))])
  18933. +
  18934. +(define_insn "*umulsidi3_sp64"
  18935. + [(set (match_operand:DI 0 "register_operand" "=r")
  18936. + (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
  18937. + (zero_extend:DI (match_operand:SI 2 "register_operand" "r"))))]
  18938. + "TARGET_DEPRECATED_V8_INSNS && TARGET_ARCH64"
  18939. + "umul\t%1, %2, %0"
  18940. + [(set_attr "type" "imul")])
  18941. +
  18942. +;; Extra pattern, because sign_extend of a constant isn't valid.
  18943. +
  18944. +(define_insn "const_umulsidi3_sp32"
  18945. + [(set (match_operand:DI 0 "register_operand" "=r")
  18946. + (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
  18947. + (match_operand:DI 2 "uns_small_int_operand" "")))]
  18948. + "TARGET_HARD_MUL32"
  18949. +{
  18950. + return TARGET_SPARCLET
  18951. + ? "umuld\t%1, %s2, %L0"
  18952. + : "umul\t%1, %s2, %L0\n\trd\t%%y, %H0";
  18953. +}
  18954. + [(set (attr "type")
  18955. + (if_then_else (eq_attr "isa" "sparclet")
  18956. + (const_string "imul") (const_string "multi")))
  18957. + (set (attr "length")
  18958. + (if_then_else (eq_attr "isa" "sparclet")
  18959. + (const_int 1) (const_int 2)))])
  18960. +
  18961. +(define_insn "const_umulsidi3_sp64"
  18962. + [(set (match_operand:DI 0 "register_operand" "=r")
  18963. + (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
  18964. + (match_operand:DI 2 "uns_small_int_operand" "")))]
  18965. + "TARGET_DEPRECATED_V8_INSNS && TARGET_ARCH64"
  18966. + "umul\t%1, %s2, %0"
  18967. + [(set_attr "type" "imul")])
  18968. +
  18969. +(define_insn "const_umulsidi3_v8plus"
  18970. + [(set (match_operand:DI 0 "register_operand" "=h,r")
  18971. + (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r,r"))
  18972. + (match_operand:DI 2 "uns_small_int_operand" "")))
  18973. + (clobber (match_scratch:SI 3 "=X,h"))]
  18974. + "TARGET_V8PLUS"
  18975. + "@
  18976. + umul\t%1, %s2, %L0\n\tsrlx\t%L0, 32, %H0
  18977. + umul\t%1, %s2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0"
  18978. + [(set_attr "type" "multi")
  18979. + (set_attr "length" "2,3")])
  18980. +
  18981. +(define_expand "umulsi3_highpart"
  18982. + [(set (match_operand:SI 0 "register_operand" "")
  18983. + (truncate:SI
  18984. + (lshiftrt:DI
  18985. + (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" ""))
  18986. + (zero_extend:DI (match_operand:SI 2 "uns_arith_operand" "")))
  18987. + (const_int 32))))]
  18988. + "TARGET_HARD_MUL && TARGET_ARCH32"
  18989. +{
  18990. + if (CONSTANT_P (operands[2]))
  18991. + {
  18992. + if (TARGET_V8PLUS)
  18993. + {
  18994. + emit_insn (gen_const_umulsi3_highpart_v8plus (operands[0],
  18995. + operands[1],
  18996. + operands[2],
  18997. + GEN_INT (32)));
  18998. + DONE;
  18999. + }
  19000. + emit_insn (gen_const_umulsi3_highpart (operands[0], operands[1], operands[2]));
  19001. + DONE;
  19002. + }
  19003. + if (TARGET_V8PLUS)
  19004. + {
  19005. + emit_insn (gen_umulsi3_highpart_v8plus (operands[0], operands[1],
  19006. + operands[2], GEN_INT (32)));
  19007. + DONE;
  19008. + }
  19009. +})
  19010. +
  19011. +(define_insn "umulsi3_highpart_v8plus"
  19012. + [(set (match_operand:SI 0 "register_operand" "=h,r")
  19013. + (truncate:SI
  19014. + (lshiftrt:DI
  19015. + (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r,r"))
  19016. + (zero_extend:DI (match_operand:SI 2 "register_operand" "r,r")))
  19017. + (match_operand:SI 3 "small_int_operand" "I,I"))))
  19018. + (clobber (match_scratch:SI 4 "=X,h"))]
  19019. + "TARGET_V8PLUS"
  19020. + "@
  19021. + umul\t%1, %2, %0\n\tsrlx\t%0, %3, %0
  19022. + umul\t%1, %2, %4\n\tsrlx\t%4, %3, %0"
  19023. + [(set_attr "type" "multi")
  19024. + (set_attr "length" "2")])
  19025. +
  19026. +(define_insn "const_umulsi3_highpart_v8plus"
  19027. + [(set (match_operand:SI 0 "register_operand" "=h,r")
  19028. + (truncate:SI
  19029. + (lshiftrt:DI
  19030. + (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r,r"))
  19031. + (match_operand:DI 2 "uns_small_int_operand" ""))
  19032. + (match_operand:SI 3 "small_int_operand" "I,I"))))
  19033. + (clobber (match_scratch:SI 4 "=X,h"))]
  19034. + "TARGET_V8PLUS"
  19035. + "@
  19036. + umul\t%1, %s2, %0\n\tsrlx\t%0, %3, %0
  19037. + umul\t%1, %s2, %4\n\tsrlx\t%4, %3, %0"
  19038. + [(set_attr "type" "multi")
  19039. + (set_attr "length" "2")])
  19040. +
  19041. +(define_insn "*umulsi3_highpart_sp32"
  19042. + [(set (match_operand:SI 0 "register_operand" "=r")
  19043. + (truncate:SI
  19044. + (lshiftrt:DI
  19045. + (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
  19046. + (zero_extend:DI (match_operand:SI 2 "register_operand" "r")))
  19047. + (const_int 32))))]
  19048. + "TARGET_HARD_MUL32"
  19049. + "umul\t%1, %2, %%g0\n\trd\t%%y, %0"
  19050. + [(set_attr "type" "multi")
  19051. + (set_attr "length" "2")])
  19052. +
  19053. +(define_insn "const_umulsi3_highpart"
  19054. + [(set (match_operand:SI 0 "register_operand" "=r")
  19055. + (truncate:SI
  19056. + (lshiftrt:DI
  19057. + (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
  19058. + (match_operand:DI 2 "uns_small_int_operand" ""))
  19059. + (const_int 32))))]
  19060. + "TARGET_HARD_MUL32"
  19061. + "umul\t%1, %s2, %%g0\n\trd\t%%y, %0"
  19062. + [(set_attr "type" "multi")
  19063. + (set_attr "length" "2")])
  19064. +
  19065. +
  19066. +(define_expand "umulxhi_vis"
  19067. + [(set (match_operand:DI 0 "register_operand" "")
  19068. + (truncate:DI
  19069. + (lshiftrt:TI
  19070. + (mult:TI (zero_extend:TI (match_operand:DI 1 "arith_operand" ""))
  19071. + (zero_extend:TI (match_operand:DI 2 "arith_operand" "")))
  19072. + (const_int 64))))]
  19073. + "TARGET_VIS3"
  19074. +{
  19075. + if (TARGET_ARCH32)
  19076. + {
  19077. + emit_insn (gen_umulxhi_v8plus (operands[0], operands[1], operands[2]));
  19078. + DONE;
  19079. + }
  19080. +})
  19081. +
  19082. +(define_insn "*umulxhi_sp64"
  19083. + [(set (match_operand:DI 0 "register_operand" "=r")
  19084. + (truncate:DI
  19085. + (lshiftrt:TI
  19086. + (mult:TI (zero_extend:TI (match_operand:DI 1 "arith_operand" "%r"))
  19087. + (zero_extend:TI (match_operand:DI 2 "arith_operand" "rI")))
  19088. + (const_int 64))))]
  19089. + "TARGET_VIS3 && TARGET_ARCH64"
  19090. + "umulxhi\t%1, %2, %0"
  19091. + [(set_attr "type" "imul")])
  19092. +
  19093. +(define_insn "umulxhi_v8plus"
  19094. + [(set (match_operand:DI 0 "register_operand" "=r,h")
  19095. + (truncate:DI
  19096. + (lshiftrt:TI
  19097. + (mult:TI (zero_extend:TI (match_operand:DI 1 "arith_operand" "%r,0"))
  19098. + (zero_extend:TI (match_operand:DI 2 "arith_operand" "rI,rI")))
  19099. + (const_int 64))))
  19100. + (clobber (match_scratch:SI 3 "=&h,X"))
  19101. + (clobber (match_scratch:SI 4 "=&h,X"))]
  19102. + "TARGET_VIS3 && TARGET_ARCH32"
  19103. +{
  19104. + return output_v8plus_mult (insn, operands, \"umulxhi\");
  19105. +}
  19106. + [(set_attr "type" "imul")
  19107. + (set_attr "length" "9,8")])
  19108. +
  19109. +(define_expand "xmulx_vis"
  19110. + [(set (match_operand:DI 0 "register_operand" "")
  19111. + (truncate:DI
  19112. + (unspec:TI [(zero_extend:TI (match_operand:DI 1 "arith_operand" ""))
  19113. + (zero_extend:TI (match_operand:DI 2 "arith_operand" ""))]
  19114. + UNSPEC_XMUL)))]
  19115. + "TARGET_VIS3"
  19116. +{
  19117. + if (TARGET_ARCH32)
  19118. + {
  19119. + emit_insn (gen_xmulx_v8plus (operands[0], operands[1], operands[2]));
  19120. + DONE;
  19121. + }
  19122. +})
  19123. +
  19124. +(define_insn "*xmulx_sp64"
  19125. + [(set (match_operand:DI 0 "register_operand" "=r")
  19126. + (truncate:DI
  19127. + (unspec:TI [(zero_extend:TI (match_operand:DI 1 "arith_operand" "%r"))
  19128. + (zero_extend:TI (match_operand:DI 2 "arith_operand" "rI"))]
  19129. + UNSPEC_XMUL)))]
  19130. + "TARGET_VIS3 && TARGET_ARCH64"
  19131. + "xmulx\t%1, %2, %0"
  19132. + [(set_attr "type" "imul")])
  19133. +
  19134. +(define_insn "xmulx_v8plus"
  19135. + [(set (match_operand:DI 0 "register_operand" "=r,h")
  19136. + (truncate:DI
  19137. + (unspec:TI [(zero_extend:TI (match_operand:DI 1 "arith_operand" "%r,0"))
  19138. + (zero_extend:TI (match_operand:DI 2 "arith_operand" "rI,rI"))]
  19139. + UNSPEC_XMUL)))
  19140. + (clobber (match_scratch:SI 3 "=&h,X"))
  19141. + (clobber (match_scratch:SI 4 "=&h,X"))]
  19142. + "TARGET_VIS3 && TARGET_ARCH32"
  19143. +{
  19144. + return output_v8plus_mult (insn, operands, \"xmulx\");
  19145. +}
  19146. + [(set_attr "type" "imul")
  19147. + (set_attr "length" "9,8")])
  19148. +
  19149. +(define_expand "xmulxhi_vis"
  19150. + [(set (match_operand:DI 0 "register_operand" "")
  19151. + (truncate:DI
  19152. + (lshiftrt:TI
  19153. + (unspec:TI [(zero_extend:TI (match_operand:DI 1 "arith_operand" ""))
  19154. + (zero_extend:TI (match_operand:DI 2 "arith_operand" ""))]
  19155. + UNSPEC_XMUL)
  19156. + (const_int 64))))]
  19157. + "TARGET_VIS3"
  19158. +{
  19159. + if (TARGET_ARCH32)
  19160. + {
  19161. + emit_insn (gen_xmulxhi_v8plus (operands[0], operands[1], operands[2]));
  19162. + DONE;
  19163. + }
  19164. +})
  19165. +
  19166. +(define_insn "*xmulxhi_sp64"
  19167. + [(set (match_operand:DI 0 "register_operand" "=r")
  19168. + (truncate:DI
  19169. + (lshiftrt:TI
  19170. + (unspec:TI [(zero_extend:TI (match_operand:DI 1 "arith_operand" "%r"))
  19171. + (zero_extend:TI (match_operand:DI 2 "arith_operand" "rI"))]
  19172. + UNSPEC_XMUL)
  19173. + (const_int 64))))]
  19174. + "TARGET_VIS3 && TARGET_ARCH64"
  19175. + "xmulxhi\t%1, %2, %0"
  19176. + [(set_attr "type" "imul")])
  19177. +
  19178. +(define_insn "xmulxhi_v8plus"
  19179. + [(set (match_operand:DI 0 "register_operand" "=r,h")
  19180. + (truncate:DI
  19181. + (lshiftrt:TI
  19182. + (unspec:TI [(zero_extend:TI (match_operand:DI 1 "arith_operand" "%r,0"))
  19183. + (zero_extend:TI (match_operand:DI 2 "arith_operand" "rI,rI"))]
  19184. + UNSPEC_XMUL)
  19185. + (const_int 64))))
  19186. + (clobber (match_scratch:SI 3 "=&h,X"))
  19187. + (clobber (match_scratch:SI 4 "=&h,X"))]
  19188. + "TARGET_VIS3 && TARGET_ARCH32"
  19189. +{
  19190. + return output_v8plus_mult (insn, operands, \"xmulxhi\");
  19191. +}
  19192. + [(set_attr "type" "imul")
  19193. + (set_attr "length" "9,8")])
  19194. +
  19195. +(define_expand "divsi3"
  19196. + [(parallel [(set (match_operand:SI 0 "register_operand" "")
  19197. + (div:SI (match_operand:SI 1 "register_operand" "")
  19198. + (match_operand:SI 2 "input_operand" "")))
  19199. + (clobber (match_scratch:SI 3 ""))])]
  19200. + "TARGET_V8 || TARGET_DEPRECATED_V8_INSNS"
  19201. +{
  19202. + if (TARGET_ARCH64)
  19203. + {
  19204. + operands[3] = gen_reg_rtx(SImode);
  19205. + emit_insn (gen_ashrsi3 (operands[3], operands[1], GEN_INT (31)));
  19206. + emit_insn (gen_divsi3_sp64 (operands[0], operands[1], operands[2],
  19207. + operands[3]));
  19208. + DONE;
  19209. + }
  19210. +})
  19211. +
  19212. +;; The V8 architecture specifies that there must be at least 3 instructions
  19213. +;; between a write to the Y register and a use of it for correct results.
  19214. +;; We try to fill one of them with a simple constant or a memory load.
  19215. +
  19216. +(define_insn "divsi3_sp32"
  19217. + [(set (match_operand:SI 0 "register_operand" "=r,r,r")
  19218. + (div:SI (match_operand:SI 1 "register_operand" "r,r,r")
  19219. + (match_operand:SI 2 "input_operand" "rI,K,m")))
  19220. + (clobber (match_scratch:SI 3 "=&r,&r,&r"))]
  19221. + "(TARGET_V8 || TARGET_DEPRECATED_V8_INSNS) && TARGET_ARCH32"
  19222. +{
  19223. + output_asm_insn ("sra\t%1, 31, %3", operands);
  19224. + output_asm_insn ("wr\t%3, 0, %%y", operands);
  19225. +
  19226. + switch (which_alternative)
  19227. + {
  19228. + case 0:
  19229. + if (TARGET_V9)
  19230. + return "sdiv\t%1, %2, %0";
  19231. + else
  19232. + return "nop\n\tnop\n\tnop\n\tsdiv\t%1, %2, %0";
  19233. + case 1:
  19234. + if (TARGET_V9)
  19235. + return "sethi\t%%hi(%a2), %3\n\tsdiv\t%1, %3, %0";
  19236. + else
  19237. + return "sethi\t%%hi(%a2), %3\n\tnop\n\tnop\n\tsdiv\t%1, %3, %0";
  19238. + case 2:
  19239. + if (TARGET_V9)
  19240. + return "ld\t%2, %3\n\tsdiv\t%1, %3, %0";
  19241. + else
  19242. + return "ld\t%2, %3\n\tnop\n\tnop\n\tsdiv\t%1, %3, %0";
  19243. + default:
  19244. + gcc_unreachable ();
  19245. + }
  19246. +}
  19247. + [(set_attr "type" "multi")
  19248. + (set (attr "length")
  19249. + (if_then_else (eq_attr "isa" "v9")
  19250. + (const_int 4) (const_int 6)))])
  19251. +
  19252. +(define_insn "divsi3_sp64"
  19253. + [(set (match_operand:SI 0 "register_operand" "=r")
  19254. + (div:SI (match_operand:SI 1 "register_operand" "r")
  19255. + (match_operand:SI 2 "input_operand" "rI")))
  19256. + (use (match_operand:SI 3 "register_operand" "r"))]
  19257. + "TARGET_DEPRECATED_V8_INSNS && TARGET_ARCH64"
  19258. + "wr\t%%g0, %3, %%y\n\tsdiv\t%1, %2, %0"
  19259. + [(set_attr "type" "multi")
  19260. + (set_attr "length" "2")])
  19261. +
  19262. +(define_insn "divdi3"
  19263. + [(set (match_operand:DI 0 "register_operand" "=r")
  19264. + (div:DI (match_operand:DI 1 "register_operand" "r")
  19265. + (match_operand:DI 2 "arith_operand" "rI")))]
  19266. + "TARGET_ARCH64"
  19267. + "sdivx\t%1, %2, %0"
  19268. + [(set_attr "type" "idiv")])
  19269. +
  19270. +(define_insn "*cmp_sdiv_cc_set"
  19271. + [(set (reg:CC CC_REG)
  19272. + (compare:CC (div:SI (match_operand:SI 1 "register_operand" "r")
  19273. + (match_operand:SI 2 "arith_operand" "rI"))
  19274. + (const_int 0)))
  19275. + (set (match_operand:SI 0 "register_operand" "=r")
  19276. + (div:SI (match_dup 1) (match_dup 2)))
  19277. + (clobber (match_scratch:SI 3 "=&r"))]
  19278. + "TARGET_V8 || TARGET_DEPRECATED_V8_INSNS"
  19279. +{
  19280. + output_asm_insn ("sra\t%1, 31, %3", operands);
  19281. + output_asm_insn ("wr\t%3, 0, %%y", operands);
  19282. +
  19283. + if (TARGET_V9)
  19284. + return "sdivcc\t%1, %2, %0";
  19285. + else
  19286. + return "nop\n\tnop\n\tnop\n\tsdivcc\t%1, %2, %0";
  19287. +}
  19288. + [(set_attr "type" "multi")
  19289. + (set (attr "length")
  19290. + (if_then_else (eq_attr "isa" "v9")
  19291. + (const_int 3) (const_int 6)))])
  19292. +
  19293. +(define_expand "udivsi3"
  19294. + [(set (match_operand:SI 0 "register_operand" "")
  19295. + (udiv:SI (match_operand:SI 1 "nonimmediate_operand" "")
  19296. + (match_operand:SI 2 "input_operand" "")))]
  19297. + "TARGET_V8 || TARGET_DEPRECATED_V8_INSNS"
  19298. + "")
  19299. +
  19300. +;; The V8 architecture specifies that there must be at least 3 instructions
  19301. +;; between a write to the Y register and a use of it for correct results.
  19302. +;; We try to fill one of them with a simple constant or a memory load.
  19303. +
  19304. +(define_insn "udivsi3_sp32"
  19305. + [(set (match_operand:SI 0 "register_operand" "=r,&r,&r,&r")
  19306. + (udiv:SI (match_operand:SI 1 "nonimmediate_operand" "r,r,r,m")
  19307. + (match_operand:SI 2 "input_operand" "rI,K,m,r")))]
  19308. + "(TARGET_V8 || TARGET_DEPRECATED_V8_INSNS) && TARGET_ARCH32"
  19309. +{
  19310. + output_asm_insn ("wr\t%%g0, 0, %%y", operands);
  19311. +
  19312. + switch (which_alternative)
  19313. + {
  19314. + case 0:
  19315. + if (TARGET_V9)
  19316. + return "udiv\t%1, %2, %0";
  19317. + else
  19318. + return "nop\n\tnop\n\tnop\n\tudiv\t%1, %2, %0";
  19319. + case 1:
  19320. + if (TARGET_V9)
  19321. + return "sethi\t%%hi(%a2), %0\n\tudiv\t%1, %0, %0";
  19322. + else
  19323. + return "sethi\t%%hi(%a2), %0\n\tnop\n\tnop\n\tudiv\t%1, %0, %0";
  19324. + case 2:
  19325. + if (TARGET_V9)
  19326. + return "ld\t%2, %0\n\tudiv\t%1, %0, %0";
  19327. + else
  19328. + return "ld\t%2, %0\n\tnop\n\tnop\n\tudiv\t%1, %0, %0";
  19329. + case 3:
  19330. + if (TARGET_V9)
  19331. + return "ld\t%1, %0\n\tudiv\t%0, %2, %0";
  19332. + else
  19333. + return "ld\t%1, %0\n\tnop\n\tnop\n\tudiv\t%0, %2, %0";
  19334. + default:
  19335. + gcc_unreachable ();
  19336. + }
  19337. +}
  19338. + [(set_attr "type" "multi")
  19339. + (set (attr "length")
  19340. + (if_then_else (eq_attr "isa" "v9")
  19341. + (const_int 3) (const_int 5)))])
  19342. +
  19343. +(define_insn "udivsi3_sp64"
  19344. + [(set (match_operand:SI 0 "register_operand" "=r")
  19345. + (udiv:SI (match_operand:SI 1 "nonimmediate_operand" "r")
  19346. + (match_operand:SI 2 "input_operand" "rI")))]
  19347. + "TARGET_DEPRECATED_V8_INSNS && TARGET_ARCH64"
  19348. + "wr\t%%g0, 0, %%y\n\tudiv\t%1, %2, %0"
  19349. + [(set_attr "type" "multi")
  19350. + (set_attr "length" "2")])
  19351. +
  19352. +(define_insn "udivdi3"
  19353. + [(set (match_operand:DI 0 "register_operand" "=r")
  19354. + (udiv:DI (match_operand:DI 1 "register_operand" "r")
  19355. + (match_operand:DI 2 "arith_operand" "rI")))]
  19356. + "TARGET_ARCH64"
  19357. + "udivx\t%1, %2, %0"
  19358. + [(set_attr "type" "idiv")])
  19359. +
  19360. +(define_insn "*cmp_udiv_cc_set"
  19361. + [(set (reg:CC CC_REG)
  19362. + (compare:CC (udiv:SI (match_operand:SI 1 "register_operand" "r")
  19363. + (match_operand:SI 2 "arith_operand" "rI"))
  19364. + (const_int 0)))
  19365. + (set (match_operand:SI 0 "register_operand" "=r")
  19366. + (udiv:SI (match_dup 1) (match_dup 2)))]
  19367. + "TARGET_V8 || TARGET_DEPRECATED_V8_INSNS"
  19368. +{
  19369. + output_asm_insn ("wr\t%%g0, 0, %%y", operands);
  19370. +
  19371. + if (TARGET_V9)
  19372. + return "udivcc\t%1, %2, %0";
  19373. + else
  19374. + return "nop\n\tnop\n\tnop\n\tudivcc\t%1, %2, %0";
  19375. +}
  19376. + [(set_attr "type" "multi")
  19377. + (set (attr "length")
  19378. + (if_then_else (eq_attr "isa" "v9")
  19379. + (const_int 2) (const_int 5)))])
  19380. +
  19381. +
  19382. +;; SPARClet multiply/accumulate insns
  19383. +
  19384. +(define_insn "*smacsi"
  19385. + [(set (match_operand:SI 0 "register_operand" "=r")
  19386. + (plus:SI (mult:SI (match_operand:SI 1 "register_operand" "%r")
  19387. + (match_operand:SI 2 "arith_operand" "rI"))
  19388. + (match_operand:SI 3 "register_operand" "0")))]
  19389. + "TARGET_SPARCLET"
  19390. + "smac\t%1, %2, %0"
  19391. + [(set_attr "type" "imul")])
  19392. +
  19393. +(define_insn "*smacdi"
  19394. + [(set (match_operand:DI 0 "register_operand" "=r")
  19395. + (plus:DI (mult:DI (sign_extend:DI
  19396. + (match_operand:SI 1 "register_operand" "%r"))
  19397. + (sign_extend:DI
  19398. + (match_operand:SI 2 "register_operand" "r")))
  19399. + (match_operand:DI 3 "register_operand" "0")))]
  19400. + "TARGET_SPARCLET"
  19401. + "smacd\t%1, %2, %L0"
  19402. + [(set_attr "type" "imul")])
  19403. +
  19404. +(define_insn "*umacdi"
  19405. + [(set (match_operand:DI 0 "register_operand" "=r")
  19406. + (plus:DI (mult:DI (zero_extend:DI
  19407. + (match_operand:SI 1 "register_operand" "%r"))
  19408. + (zero_extend:DI
  19409. + (match_operand:SI 2 "register_operand" "r")))
  19410. + (match_operand:DI 3 "register_operand" "0")))]
  19411. + "TARGET_SPARCLET"
  19412. + "umacd\t%1, %2, %L0"
  19413. + [(set_attr "type" "imul")])
  19414. +
  19415. +
  19416. +;; Boolean instructions.
  19417. +
  19418. +(define_insn "anddi3"
  19419. + [(set (match_operand:DI 0 "register_operand" "=r")
  19420. + (and:DI (match_operand:DI 1 "arith_operand" "%r")
  19421. + (match_operand:DI 2 "arith_operand" "rI")))]
  19422. + "TARGET_ARCH64"
  19423. + "and\t%1, %2, %0")
  19424. +
  19425. +(define_insn "andsi3"
  19426. + [(set (match_operand:SI 0 "register_operand" "=r")
  19427. + (and:SI (match_operand:SI 1 "arith_operand" "%r")
  19428. + (match_operand:SI 2 "arith_operand" "rI")))]
  19429. + ""
  19430. + "and\t%1, %2, %0")
  19431. +
  19432. +(define_split
  19433. + [(set (match_operand:SI 0 "register_operand" "")
  19434. + (and:SI (match_operand:SI 1 "register_operand" "")
  19435. + (match_operand:SI 2 "const_compl_high_operand" "")))
  19436. + (clobber (match_operand:SI 3 "register_operand" ""))]
  19437. + ""
  19438. + [(set (match_dup 3) (match_dup 4))
  19439. + (set (match_dup 0) (and:SI (not:SI (match_dup 3)) (match_dup 1)))]
  19440. +{
  19441. + operands[4] = GEN_INT (~INTVAL (operands[2]));
  19442. +})
  19443. +
  19444. +(define_insn "*and_not_di_sp64"
  19445. + [(set (match_operand:DI 0 "register_operand" "=r")
  19446. + (and:DI (not:DI (match_operand:DI 1 "register_operand" "%r"))
  19447. + (match_operand:DI 2 "register_operand" "r")))]
  19448. + "TARGET_ARCH64"
  19449. + "andn\t%2, %1, %0")
  19450. +
  19451. +(define_insn "*and_not_si"
  19452. + [(set (match_operand:SI 0 "register_operand" "=r")
  19453. + (and:SI (not:SI (match_operand:SI 1 "register_operand" "%r"))
  19454. + (match_operand:SI 2 "register_operand" "r")))]
  19455. + ""
  19456. + "andn\t%2, %1, %0")
  19457. +
  19458. +(define_insn "iordi3"
  19459. + [(set (match_operand:DI 0 "register_operand" "=r")
  19460. + (ior:DI (match_operand:DI 1 "arith_operand" "%r")
  19461. + (match_operand:DI 2 "arith_operand" "rI")))]
  19462. + "TARGET_ARCH64"
  19463. + "or\t%1, %2, %0")
  19464. +
  19465. +(define_insn "iorsi3"
  19466. + [(set (match_operand:SI 0 "register_operand" "=r")
  19467. + (ior:SI (match_operand:SI 1 "arith_operand" "%r")
  19468. + (match_operand:SI 2 "arith_operand" "rI")))]
  19469. + ""
  19470. + "or\t%1, %2, %0")
  19471. +
  19472. +(define_split
  19473. + [(set (match_operand:SI 0 "register_operand" "")
  19474. + (ior:SI (match_operand:SI 1 "register_operand" "")
  19475. + (match_operand:SI 2 "const_compl_high_operand" "")))
  19476. + (clobber (match_operand:SI 3 "register_operand" ""))]
  19477. + ""
  19478. + [(set (match_dup 3) (match_dup 4))
  19479. + (set (match_dup 0) (ior:SI (not:SI (match_dup 3)) (match_dup 1)))]
  19480. +{
  19481. + operands[4] = gen_int_mode (~INTVAL (operands[2]), SImode);
  19482. +})
  19483. +
  19484. +(define_insn "*or_not_di_sp64"
  19485. + [(set (match_operand:DI 0 "register_operand" "=r")
  19486. + (ior:DI (not:DI (match_operand:DI 1 "register_operand" "r"))
  19487. + (match_operand:DI 2 "register_operand" "r")))]
  19488. + "TARGET_ARCH64"
  19489. + "orn\t%2, %1, %0")
  19490. +
  19491. +(define_insn "*or_not_si"
  19492. + [(set (match_operand:SI 0 "register_operand" "=r")
  19493. + (ior:SI (not:SI (match_operand:SI 1 "register_operand" "r"))
  19494. + (match_operand:SI 2 "register_operand" "r")))]
  19495. + ""
  19496. + "orn\t%2, %1, %0")
  19497. +
  19498. +(define_insn "xordi3"
  19499. + [(set (match_operand:DI 0 "register_operand" "=r")
  19500. + (xor:DI (match_operand:DI 1 "arith_operand" "%rJ")
  19501. + (match_operand:DI 2 "arith_operand" "rI")))]
  19502. + "TARGET_ARCH64"
  19503. + "xor\t%r1, %2, %0")
  19504. +
  19505. +(define_insn "xorsi3"
  19506. + [(set (match_operand:SI 0 "register_operand" "=r")
  19507. + (xor:SI (match_operand:SI 1 "arith_operand" "%rJ")
  19508. + (match_operand:SI 2 "arith_operand" "rI")))]
  19509. + ""
  19510. + "xor\t%r1, %2, %0")
  19511. +
  19512. +(define_split
  19513. + [(set (match_operand:SI 0 "register_operand" "")
  19514. + (xor:SI (match_operand:SI 1 "register_operand" "")
  19515. + (match_operand:SI 2 "const_compl_high_operand" "")))
  19516. + (clobber (match_operand:SI 3 "register_operand" ""))]
  19517. + ""
  19518. + [(set (match_dup 3) (match_dup 4))
  19519. + (set (match_dup 0) (not:SI (xor:SI (match_dup 3) (match_dup 1))))]
  19520. +{
  19521. + operands[4] = gen_int_mode (~INTVAL (operands[2]), SImode);
  19522. +})
  19523. +
  19524. +(define_split
  19525. + [(set (match_operand:SI 0 "register_operand" "")
  19526. + (not:SI (xor:SI (match_operand:SI 1 "register_operand" "")
  19527. + (match_operand:SI 2 "const_compl_high_operand" ""))))
  19528. + (clobber (match_operand:SI 3 "register_operand" ""))]
  19529. + ""
  19530. + [(set (match_dup 3) (match_dup 4))
  19531. + (set (match_dup 0) (xor:SI (match_dup 3) (match_dup 1)))]
  19532. +{
  19533. + operands[4] = gen_int_mode (~INTVAL (operands[2]), SImode);
  19534. +})
  19535. +
  19536. +(define_insn "*xor_not_di_sp64"
  19537. + [(set (match_operand:DI 0 "register_operand" "=r")
  19538. + (not:DI (xor:DI (match_operand:DI 1 "register_or_zero_operand" "rJ")
  19539. + (match_operand:DI 2 "arith_operand" "rI"))))]
  19540. + "TARGET_ARCH64"
  19541. + "xnor\t%r1, %2, %0")
  19542. +
  19543. +(define_insn "*xor_not_si"
  19544. + [(set (match_operand:SI 0 "register_operand" "=r")
  19545. + (not:SI (xor:SI (match_operand:SI 1 "register_or_zero_operand" "rJ")
  19546. + (match_operand:SI 2 "arith_operand" "rI"))))]
  19547. + ""
  19548. + "xnor\t%r1, %2, %0")
  19549. +
  19550. +;; These correspond to the above in the case where we also (or only)
  19551. +;; want to set the condition code.
  19552. +
  19553. +(define_insn "*cmp_cc_arith_op"
  19554. + [(set (reg:CC CC_REG)
  19555. + (compare:CC (match_operator:SI 2 "cc_arith_operator"
  19556. + [(match_operand:SI 0 "arith_operand" "%r")
  19557. + (match_operand:SI 1 "arith_operand" "rI")])
  19558. + (const_int 0)))]
  19559. + ""
  19560. + "%A2cc\t%0, %1, %%g0"
  19561. + [(set_attr "type" "compare")])
  19562. +
  19563. +(define_insn "*cmp_ccx_arith_op"
  19564. + [(set (reg:CCX CC_REG)
  19565. + (compare:CCX (match_operator:DI 2 "cc_arith_operator"
  19566. + [(match_operand:DI 0 "arith_operand" "%r")
  19567. + (match_operand:DI 1 "arith_operand" "rI")])
  19568. + (const_int 0)))]
  19569. + "TARGET_ARCH64"
  19570. + "%A2cc\t%0, %1, %%g0"
  19571. + [(set_attr "type" "compare")])
  19572. +
  19573. +(define_insn "*cmp_cc_arith_op_set"
  19574. + [(set (reg:CC CC_REG)
  19575. + (compare:CC (match_operator:SI 3 "cc_arith_operator"
  19576. + [(match_operand:SI 1 "arith_operand" "%r")
  19577. + (match_operand:SI 2 "arith_operand" "rI")])
  19578. + (const_int 0)))
  19579. + (set (match_operand:SI 0 "register_operand" "=r")
  19580. + (match_operator:SI 4 "cc_arith_operator"
  19581. + [(match_dup 1) (match_dup 2)]))]
  19582. + "GET_CODE (operands[3]) == GET_CODE (operands[4])"
  19583. + "%A3cc\t%1, %2, %0"
  19584. + [(set_attr "type" "compare")])
  19585. +
  19586. +(define_insn "*cmp_ccx_arith_op_set"
  19587. + [(set (reg:CCX CC_REG)
  19588. + (compare:CCX (match_operator:DI 3 "cc_arith_operator"
  19589. + [(match_operand:DI 1 "arith_operand" "%r")
  19590. + (match_operand:DI 2 "arith_operand" "rI")])
  19591. + (const_int 0)))
  19592. + (set (match_operand:DI 0 "register_operand" "=r")
  19593. + (match_operator:DI 4 "cc_arith_operator"
  19594. + [(match_dup 1) (match_dup 2)]))]
  19595. + "TARGET_ARCH64 && GET_CODE (operands[3]) == GET_CODE (operands[4])"
  19596. + "%A3cc\t%1, %2, %0"
  19597. + [(set_attr "type" "compare")])
  19598. +
  19599. +(define_insn "*cmp_cc_xor_not"
  19600. + [(set (reg:CC CC_REG)
  19601. + (compare:CC
  19602. + (not:SI (xor:SI (match_operand:SI 0 "register_or_zero_operand" "%rJ")
  19603. + (match_operand:SI 1 "arith_operand" "rI")))
  19604. + (const_int 0)))]
  19605. + ""
  19606. + "xnorcc\t%r0, %1, %%g0"
  19607. + [(set_attr "type" "compare")])
  19608. +
  19609. +(define_insn "*cmp_ccx_xor_not"
  19610. + [(set (reg:CCX CC_REG)
  19611. + (compare:CCX
  19612. + (not:DI (xor:DI (match_operand:DI 0 "register_or_zero_operand" "%rJ")
  19613. + (match_operand:DI 1 "arith_operand" "rI")))
  19614. + (const_int 0)))]
  19615. + "TARGET_ARCH64"
  19616. + "xnorcc\t%r0, %1, %%g0"
  19617. + [(set_attr "type" "compare")])
  19618. +
  19619. +(define_insn "*cmp_cc_xor_not_set"
  19620. + [(set (reg:CC CC_REG)
  19621. + (compare:CC
  19622. + (not:SI (xor:SI (match_operand:SI 1 "register_or_zero_operand" "%rJ")
  19623. + (match_operand:SI 2 "arith_operand" "rI")))
  19624. + (const_int 0)))
  19625. + (set (match_operand:SI 0 "register_operand" "=r")
  19626. + (not:SI (xor:SI (match_dup 1) (match_dup 2))))]
  19627. + ""
  19628. + "xnorcc\t%r1, %2, %0"
  19629. + [(set_attr "type" "compare")])
  19630. +
  19631. +(define_insn "*cmp_ccx_xor_not_set"
  19632. + [(set (reg:CCX CC_REG)
  19633. + (compare:CCX
  19634. + (not:DI (xor:DI (match_operand:DI 1 "register_or_zero_operand" "%rJ")
  19635. + (match_operand:DI 2 "arith_operand" "rI")))
  19636. + (const_int 0)))
  19637. + (set (match_operand:DI 0 "register_operand" "=r")
  19638. + (not:DI (xor:DI (match_dup 1) (match_dup 2))))]
  19639. + "TARGET_ARCH64"
  19640. + "xnorcc\t%r1, %2, %0"
  19641. + [(set_attr "type" "compare")])
  19642. +
  19643. +(define_insn "*cmp_cc_arith_op_not"
  19644. + [(set (reg:CC CC_REG)
  19645. + (compare:CC (match_operator:SI 2 "cc_arith_not_operator"
  19646. + [(not:SI (match_operand:SI 0 "arith_operand" "rI"))
  19647. + (match_operand:SI 1 "register_or_zero_operand" "rJ")])
  19648. + (const_int 0)))]
  19649. + ""
  19650. + "%B2cc\t%r1, %0, %%g0"
  19651. + [(set_attr "type" "compare")])
  19652. +
  19653. +(define_insn "*cmp_ccx_arith_op_not"
  19654. + [(set (reg:CCX CC_REG)
  19655. + (compare:CCX (match_operator:DI 2 "cc_arith_not_operator"
  19656. + [(not:DI (match_operand:DI 0 "arith_operand" "rI"))
  19657. + (match_operand:DI 1 "register_or_zero_operand" "rJ")])
  19658. + (const_int 0)))]
  19659. + "TARGET_ARCH64"
  19660. + "%B2cc\t%r1, %0, %%g0"
  19661. + [(set_attr "type" "compare")])
  19662. +
  19663. +(define_insn "*cmp_cc_arith_op_not_set"
  19664. + [(set (reg:CC CC_REG)
  19665. + (compare:CC (match_operator:SI 3 "cc_arith_not_operator"
  19666. + [(not:SI (match_operand:SI 1 "arith_operand" "rI"))
  19667. + (match_operand:SI 2 "register_or_zero_operand" "rJ")])
  19668. + (const_int 0)))
  19669. + (set (match_operand:SI 0 "register_operand" "=r")
  19670. + (match_operator:SI 4 "cc_arith_not_operator"
  19671. + [(not:SI (match_dup 1)) (match_dup 2)]))]
  19672. + "GET_CODE (operands[3]) == GET_CODE (operands[4])"
  19673. + "%B3cc\t%r2, %1, %0"
  19674. + [(set_attr "type" "compare")])
  19675. +
  19676. +(define_insn "*cmp_ccx_arith_op_not_set"
  19677. + [(set (reg:CCX CC_REG)
  19678. + (compare:CCX (match_operator:DI 3 "cc_arith_not_operator"
  19679. + [(not:DI (match_operand:DI 1 "arith_operand" "rI"))
  19680. + (match_operand:DI 2 "register_or_zero_operand" "rJ")])
  19681. + (const_int 0)))
  19682. + (set (match_operand:DI 0 "register_operand" "=r")
  19683. + (match_operator:DI 4 "cc_arith_not_operator"
  19684. + [(not:DI (match_dup 1)) (match_dup 2)]))]
  19685. + "TARGET_ARCH64 && GET_CODE (operands[3]) == GET_CODE (operands[4])"
  19686. + "%B3cc\t%r2, %1, %0"
  19687. + [(set_attr "type" "compare")])
  19688. +
  19689. +;; We cannot use the "neg" pseudo insn because the Sun assembler
  19690. +;; does not know how to make it work for constants.
  19691. +
  19692. +(define_expand "negdi2"
  19693. + [(set (match_operand:DI 0 "register_operand" "=r")
  19694. + (neg:DI (match_operand:DI 1 "register_operand" "r")))]
  19695. + ""
  19696. +{
  19697. + if (TARGET_ARCH32)
  19698. + {
  19699. + emit_insn (gen_negdi2_sp32 (operands[0], operands[1]));
  19700. + DONE;
  19701. + }
  19702. +})
  19703. +
  19704. +(define_expand "unegvdi3"
  19705. + [(parallel [(set (reg:CCXC CC_REG)
  19706. + (compare:CCXC (not:DI (match_operand:DI 1 "register_operand" ""))
  19707. + (const_int -1)))
  19708. + (set (match_operand:DI 0 "register_operand" "")
  19709. + (neg:DI (match_dup 1)))])
  19710. + (set (pc)
  19711. + (if_then_else (ltu (reg:CCXC CC_REG) (const_int 0))
  19712. + (label_ref (match_operand 2 ""))
  19713. + (pc)))]
  19714. + ""
  19715. +{
  19716. + if (TARGET_ARCH32)
  19717. + {
  19718. + emit_insn (gen_unegvdi3_sp32 (operands[0], operands[1]));
  19719. + rtx x = gen_rtx_LTU (VOIDmode, gen_rtx_REG (CCCmode, SPARC_ICC_REG),
  19720. + const0_rtx);
  19721. + emit_jump_insn (gen_cbranchcc4 (x, XEXP (x, 0), XEXP (x, 1), operands[2]));
  19722. + DONE;
  19723. + }
  19724. +})
  19725. +
  19726. +(define_expand "negvdi3"
  19727. + [(parallel [(set (reg:CCXV CC_REG)
  19728. + (compare:CCXV (neg:DI (match_operand:DI 1 "register_operand" ""))
  19729. + (unspec:DI [(match_dup 1)] UNSPEC_NEGV)))
  19730. + (set (match_operand:DI 0 "register_operand" "")
  19731. + (neg:DI (match_dup 1)))])
  19732. + (set (pc)
  19733. + (if_then_else (ne (reg:CCXV CC_REG) (const_int 0))
  19734. + (label_ref (match_operand 2 ""))
  19735. + (pc)))]
  19736. + ""
  19737. +{
  19738. + if (TARGET_ARCH32)
  19739. + {
  19740. + emit_insn (gen_negvdi3_sp32 (operands[0], operands[1]));
  19741. + rtx x = gen_rtx_NE (VOIDmode, gen_rtx_REG (CCVmode, SPARC_ICC_REG),
  19742. + const0_rtx);
  19743. + emit_jump_insn (gen_cbranchcc4 (x, XEXP (x, 0), XEXP (x, 1), operands[2]));
  19744. + DONE;
  19745. + }
  19746. +})
  19747. +
  19748. +(define_insn_and_split "negdi2_sp32"
  19749. + [(set (match_operand:DI 0 "register_operand" "=&r")
  19750. + (neg:DI (match_operand:DI 1 "register_operand" "r")))
  19751. + (clobber (reg:CC CC_REG))]
  19752. + "TARGET_ARCH32"
  19753. + "#"
  19754. + "&& reload_completed"
  19755. + [(parallel [(set (reg:CCC CC_REG)
  19756. + (compare:CCC (not:SI (match_dup 5)) (const_int -1)))
  19757. + (set (match_dup 4) (neg:SI (match_dup 5)))])
  19758. + (set (match_dup 2) (minus:SI (minus:SI (const_int 0) (match_dup 3))
  19759. + (ltu:SI (reg:CCC CC_REG) (const_int 0))))]
  19760. + "operands[2] = gen_highpart (SImode, operands[0]);
  19761. + operands[3] = gen_highpart (SImode, operands[1]);
  19762. + operands[4] = gen_lowpart (SImode, operands[0]);
  19763. + operands[5] = gen_lowpart (SImode, operands[1]);"
  19764. + [(set_attr "length" "2")])
  19765. +
  19766. +(define_insn_and_split "unegvdi3_sp32"
  19767. + [(set (reg:CCC CC_REG)
  19768. + (compare:CCC (not:DI (match_operand:DI 1 "register_operand" "r"))
  19769. + (const_int -1)))
  19770. + (set (match_operand:DI 0 "register_operand" "=&r")
  19771. + (neg:DI (match_dup 1)))]
  19772. + "TARGET_ARCH32"
  19773. + "#"
  19774. + "&& reload_completed"
  19775. + [(parallel [(set (reg:CCC CC_REG)
  19776. + (compare:CCC (not:SI (match_dup 5)) (const_int -1)))
  19777. + (set (match_dup 4) (neg:SI (match_dup 5)))])
  19778. + (parallel [(set (reg:CCC CC_REG)
  19779. + (compare:CCC (zero_extend:DI
  19780. + (neg:SI (plus:SI (match_dup 3)
  19781. + (ltu:SI (reg:CCC CC_REG)
  19782. + (const_int 0)))))
  19783. + (neg:DI (plus:DI (zero_extend:DI (match_dup 3))
  19784. + (ltu:DI (reg:CCC CC_REG)
  19785. + (const_int 0))))))
  19786. + (set (match_dup 2) (neg:SI (plus:SI (match_dup 3)
  19787. + (ltu:SI (reg:CCC CC_REG)
  19788. + (const_int 0)))))])]
  19789. + "operands[2] = gen_highpart (SImode, operands[0]);
  19790. + operands[3] = gen_highpart (SImode, operands[1]);
  19791. + operands[4] = gen_lowpart (SImode, operands[0]);
  19792. + operands[5] = gen_lowpart (SImode, operands[1]);"
  19793. + [(set_attr "length" "2")])
  19794. +
  19795. +(define_insn_and_split "negvdi3_sp32"
  19796. + [(set (reg:CCV CC_REG)
  19797. + (compare:CCV (neg:DI (match_operand:DI 1 "register_operand" "r"))
  19798. + (unspec:DI [(match_dup 1)] UNSPEC_NEGV)))
  19799. + (set (match_operand:DI 0 "register_operand" "=&r")
  19800. + (neg:DI (match_dup 1)))]
  19801. + "TARGET_ARCH32"
  19802. + "#"
  19803. + "&& reload_completed"
  19804. + [(parallel [(set (reg:CCC CC_REG)
  19805. + (compare:CCC (not:SI (match_dup 5)) (const_int -1)))
  19806. + (set (match_dup 4) (neg:SI (match_dup 5)))])
  19807. + (parallel [(set (reg:CCV CC_REG)
  19808. + (compare:CCV (neg:SI (plus:SI (match_dup 3)
  19809. + (ltu:SI (reg:CCC CC_REG)
  19810. + (const_int 0))))
  19811. + (unspec:SI [(plus:SI (match_dup 3)
  19812. + (ltu:SI (reg:CCC CC_REG)
  19813. + (const_int 0)))]
  19814. + UNSPEC_NEGV)))
  19815. + (set (match_dup 2) (neg:SI (plus:SI (match_dup 3)
  19816. + (ltu:SI (reg:CCC CC_REG)
  19817. + (const_int 0)))))])]
  19818. + "operands[2] = gen_highpart (SImode, operands[0]);
  19819. + operands[3] = gen_highpart (SImode, operands[1]);
  19820. + operands[4] = gen_lowpart (SImode, operands[0]);
  19821. + operands[5] = gen_lowpart (SImode, operands[1]);"
  19822. + [(set_attr "length" "2")])
  19823. +
  19824. +(define_insn "*negdi2_sp64"
  19825. + [(set (match_operand:DI 0 "register_operand" "=r")
  19826. + (neg:DI (match_operand:DI 1 "register_operand" "r")))]
  19827. + "TARGET_ARCH64"
  19828. + "sub\t%%g0, %1, %0")
  19829. +
  19830. +(define_insn "negsi2"
  19831. + [(set (match_operand:SI 0 "register_operand" "=r")
  19832. + (neg:SI (match_operand:SI 1 "register_operand" "r")))]
  19833. + ""
  19834. + "sub\t%%g0, %1, %0")
  19835. +
  19836. +(define_expand "unegvsi3"
  19837. + [(parallel [(set (reg:CCC CC_REG)
  19838. + (compare:CCC (not:SI (match_operand:SI 1 "register_operand" ""))
  19839. + (const_int -1)))
  19840. + (set (match_operand:SI 0 "register_operand" "")
  19841. + (neg:SI (match_dup 1)))])
  19842. + (set (pc)
  19843. + (if_then_else (ltu (reg:CCC CC_REG) (const_int 0))
  19844. + (label_ref (match_operand 2 ""))
  19845. + (pc)))]
  19846. + "")
  19847. +
  19848. +(define_expand "negvsi3"
  19849. + [(parallel [(set (reg:CCV CC_REG)
  19850. + (compare:CCV (neg:SI (match_operand:SI 1 "register_operand" ""))
  19851. + (unspec:SI [(match_dup 1)] UNSPEC_NEGV)))
  19852. + (set (match_operand:SI 0 "register_operand" "")
  19853. + (neg:SI (match_dup 1)))])
  19854. + (set (pc)
  19855. + (if_then_else (ne (reg:CCV CC_REG) (const_int 0))
  19856. + (label_ref (match_operand 2 ""))
  19857. + (pc)))]
  19858. +"")
  19859. +
  19860. +(define_insn "*cmp_ccnz_neg"
  19861. + [(set (reg:CCNZ CC_REG)
  19862. + (compare:CCNZ (neg:SI (match_operand:SI 0 "register_operand" "r"))
  19863. + (const_int 0)))]
  19864. + ""
  19865. + "subcc\t%%g0, %0, %%g0"
  19866. + [(set_attr "type" "compare")])
  19867. +
  19868. +(define_insn "*cmp_ccxnz_neg"
  19869. + [(set (reg:CCXNZ CC_REG)
  19870. + (compare:CCXNZ (neg:DI (match_operand:DI 0 "register_operand" "r"))
  19871. + (const_int 0)))]
  19872. + "TARGET_ARCH64"
  19873. + "subcc\t%%g0, %0, %%g0"
  19874. + [(set_attr "type" "compare")])
  19875. +
  19876. +(define_insn "*cmp_ccnz_neg_set"
  19877. + [(set (reg:CCNZ CC_REG)
  19878. + (compare:CCNZ (neg:SI (match_operand:SI 1 "register_operand" "r"))
  19879. + (const_int 0)))
  19880. + (set (match_operand:SI 0 "register_operand" "=r")
  19881. + (neg:SI (match_dup 1)))]
  19882. + ""
  19883. + "subcc\t%%g0, %1, %0"
  19884. + [(set_attr "type" "compare")])
  19885. +
  19886. +(define_insn "*cmp_ccxnz_neg_set"
  19887. + [(set (reg:CCXNZ CC_REG)
  19888. + (compare:CCXNZ (neg:DI (match_operand:DI 1 "register_operand" "r"))
  19889. + (const_int 0)))
  19890. + (set (match_operand:DI 0 "register_operand" "=r")
  19891. + (neg:DI (match_dup 1)))]
  19892. + "TARGET_ARCH64"
  19893. + "subcc\t%%g0, %1, %0"
  19894. + [(set_attr "type" "compare")])
  19895. +
  19896. +(define_insn "*cmp_ccc_neg_set"
  19897. + [(set (reg:CCC CC_REG)
  19898. + (compare:CCC (not:SI (match_operand:SI 1 "register_operand" "r"))
  19899. + (const_int -1)))
  19900. + (set (match_operand:SI 0 "register_operand" "=r")
  19901. + (neg:SI (match_dup 1)))]
  19902. + ""
  19903. + "subcc\t%%g0, %1, %0"
  19904. + [(set_attr "type" "compare")])
  19905. +
  19906. +(define_insn "*cmp_ccxc_neg_set"
  19907. + [(set (reg:CCXC CC_REG)
  19908. + (compare:CCXC (not:DI (match_operand:DI 1 "register_operand" "r"))
  19909. + (const_int -1)))
  19910. + (set (match_operand:DI 0 "register_operand" "=r")
  19911. + (neg:DI (match_dup 1)))]
  19912. + "TARGET_ARCH64"
  19913. + "subcc\t%%g0, %1, %0"
  19914. + [(set_attr "type" "compare")])
  19915. +
  19916. +(define_insn "*cmp_ccc_neg_sltu_set"
  19917. + [(set (reg:CCC CC_REG)
  19918. + (compare:CCC (zero_extend:DI
  19919. + (neg:SI (plus:SI (match_operand:SI 1 "register_operand" "r")
  19920. + (ltu:SI (reg:CCC CC_REG)
  19921. + (const_int 0)))))
  19922. + (neg:DI (plus:DI (zero_extend:DI (match_dup 1))
  19923. + (ltu:DI (reg:CCC CC_REG)
  19924. + (const_int 0))))))
  19925. + (set (match_operand:SI 0 "register_operand" "=r")
  19926. + (neg:SI (plus:SI (match_dup 1)
  19927. + (ltu:SI (reg:CCC CC_REG) (const_int 0)))))]
  19928. + ""
  19929. + "subxcc\t%%g0, %1, %0"
  19930. + [(set_attr "type" "compare")])
  19931. +
  19932. +(define_insn "*cmp_ccv_neg"
  19933. + [(set (reg:CCV CC_REG)
  19934. + (compare:CCV (neg:SI (match_operand:SI 0 "register_operand" "r"))
  19935. + (unspec:SI [(match_dup 0)] UNSPEC_NEGV)))]
  19936. + ""
  19937. + "subcc\t%%g0, %0, %%g0"
  19938. + [(set_attr "type" "compare")])
  19939. +
  19940. +(define_insn "*cmp_ccxv_neg"
  19941. + [(set (reg:CCXV CC_REG)
  19942. + (compare:CCXV (neg:DI (match_operand:DI 0 "register_operand" "r"))
  19943. + (unspec:DI [(match_dup 0)] UNSPEC_NEGV)))]
  19944. + "TARGET_ARCH64"
  19945. + "subcc\t%%g0, %0, %%g0"
  19946. + [(set_attr "type" "compare")])
  19947. +
  19948. +(define_insn "*cmp_ccv_neg_set"
  19949. + [(set (reg:CCV CC_REG)
  19950. + (compare:CCV (neg:SI (match_operand:SI 1 "register_operand" "r"))
  19951. + (unspec:SI [(match_dup 1)] UNSPEC_NEGV)))
  19952. + (set (match_operand:SI 0 "register_operand" "=r")
  19953. + (neg:SI (match_dup 1)))]
  19954. + ""
  19955. + "subcc\t%%g0, %1, %0"
  19956. + [(set_attr "type" "compare")])
  19957. +
  19958. +(define_insn "*cmp_ccxv_neg_set"
  19959. + [(set (reg:CCXV CC_REG)
  19960. + (compare:CCXV (neg:DI (match_operand:DI 1 "register_operand" "r"))
  19961. + (unspec:DI [(match_dup 1)] UNSPEC_NEGV)))
  19962. + (set (match_operand:DI 0 "register_operand" "=r")
  19963. + (neg:DI (match_dup 1)))]
  19964. + "TARGET_ARCH64"
  19965. + "subcc\t%%g0, %1, %0"
  19966. + [(set_attr "type" "compare")])
  19967. +
  19968. +(define_insn "*cmp_ccv_neg_sltu_set"
  19969. + [(set (reg:CCV CC_REG)
  19970. + (compare:CCV (neg:SI (plus:SI (match_operand:SI 1 "register_operand" "r")
  19971. + (ltu:SI (reg:CCC CC_REG) (const_int 0))))
  19972. + (unspec:SI [(plus:SI (match_dup 1)
  19973. + (ltu:SI (reg:CCC CC_REG)
  19974. + (const_int 0)))]
  19975. + UNSPEC_NEGV)))
  19976. + (set (match_operand:SI 0 "register_operand" "=r")
  19977. + (neg:SI (plus:SI (match_dup 1)
  19978. + (ltu:SI (reg:CCC CC_REG) (const_int 0)))))]
  19979. + ""
  19980. + "subxcc\t%%g0, %1, %0"
  19981. + [(set_attr "type" "compare")])
  19982. +
  19983. +
  19984. +(define_insn "one_cmpldi2"
  19985. + [(set (match_operand:DI 0 "register_operand" "=r")
  19986. + (not:DI (match_operand:DI 1 "arith_operand" "rI")))]
  19987. + "TARGET_ARCH64"
  19988. + "xnor\t%%g0, %1, %0")
  19989. +
  19990. +(define_insn "one_cmplsi2"
  19991. + [(set (match_operand:SI 0 "register_operand" "=r")
  19992. + (not:SI (match_operand:SI 1 "arith_operand" "rI")))]
  19993. + ""
  19994. + "xnor\t%%g0, %1, %0")
  19995. +
  19996. +(define_insn "*cmp_cc_not"
  19997. + [(set (reg:CC CC_REG)
  19998. + (compare:CC (not:SI (match_operand:SI 0 "arith_operand" "rI"))
  19999. + (const_int 0)))]
  20000. + ""
  20001. + "xnorcc\t%%g0, %0, %%g0"
  20002. + [(set_attr "type" "compare")])
  20003. +
  20004. +(define_insn "*cmp_ccx_not"
  20005. + [(set (reg:CCX CC_REG)
  20006. + (compare:CCX (not:DI (match_operand:DI 0 "arith_operand" "rI"))
  20007. + (const_int 0)))]
  20008. + "TARGET_ARCH64"
  20009. + "xnorcc\t%%g0, %0, %%g0"
  20010. + [(set_attr "type" "compare")])
  20011. +
  20012. +(define_insn "*cmp_cc_set_not"
  20013. + [(set (reg:CC CC_REG)
  20014. + (compare:CC (not:SI (match_operand:SI 1 "arith_operand" "rI"))
  20015. + (const_int 0)))
  20016. + (set (match_operand:SI 0 "register_operand" "=r")
  20017. + (not:SI (match_dup 1)))]
  20018. + ""
  20019. + "xnorcc\t%%g0, %1, %0"
  20020. + [(set_attr "type" "compare")])
  20021. +
  20022. +(define_insn "*cmp_ccx_set_not"
  20023. + [(set (reg:CCX CC_REG)
  20024. + (compare:CCX (not:DI (match_operand:DI 1 "arith_operand" "rI"))
  20025. + (const_int 0)))
  20026. + (set (match_operand:DI 0 "register_operand" "=r")
  20027. + (not:DI (match_dup 1)))]
  20028. + "TARGET_ARCH64"
  20029. + "xnorcc\t%%g0, %1, %0"
  20030. + [(set_attr "type" "compare")])
  20031. +
  20032. +(define_insn "*cmp_cc_set"
  20033. + [(set (match_operand:SI 0 "register_operand" "=r")
  20034. + (match_operand:SI 1 "register_operand" "r"))
  20035. + (set (reg:CC CC_REG)
  20036. + (compare:CC (match_dup 1) (const_int 0)))]
  20037. + ""
  20038. + "orcc\t%1, 0, %0"
  20039. + [(set_attr "type" "compare")])
  20040. +
  20041. +(define_insn "*cmp_ccx_set64"
  20042. + [(set (match_operand:DI 0 "register_operand" "=r")
  20043. + (match_operand:DI 1 "register_operand" "r"))
  20044. + (set (reg:CCX CC_REG)
  20045. + (compare:CCX (match_dup 1) (const_int 0)))]
  20046. + "TARGET_ARCH64"
  20047. + "orcc\t%1, 0, %0"
  20048. + [(set_attr "type" "compare")])
  20049. +
  20050. +
  20051. +;; Floating point arithmetic instructions.
  20052. +
  20053. +(define_expand "addtf3"
  20054. + [(set (match_operand:TF 0 "nonimmediate_operand" "")
  20055. + (plus:TF (match_operand:TF 1 "general_operand" "")
  20056. + (match_operand:TF 2 "general_operand" "")))]
  20057. + "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
  20058. + "emit_tfmode_binop (PLUS, operands); DONE;")
  20059. +
  20060. +(define_insn "*addtf3_hq"
  20061. + [(set (match_operand:TF 0 "register_operand" "=e")
  20062. + (plus:TF (match_operand:TF 1 "register_operand" "e")
  20063. + (match_operand:TF 2 "register_operand" "e")))]
  20064. + "TARGET_FPU && TARGET_HARD_QUAD"
  20065. + "faddq\t%1, %2, %0"
  20066. + [(set_attr "type" "fp")])
  20067. +
  20068. +(define_insn "adddf3"
  20069. + [(set (match_operand:DF 0 "register_operand" "=e")
  20070. + (plus:DF (match_operand:DF 1 "register_operand" "e")
  20071. + (match_operand:DF 2 "register_operand" "e")))]
  20072. + "TARGET_FPU"
  20073. + "faddd\t%1, %2, %0"
  20074. + [(set_attr "type" "fp")
  20075. + (set_attr "fptype" "double")])
  20076. +
  20077. +(define_insn "addsf3"
  20078. + [(set (match_operand:SF 0 "register_operand" "=f")
  20079. + (plus:SF (match_operand:SF 1 "register_operand" "f")
  20080. + (match_operand:SF 2 "register_operand" "f")))]
  20081. + "TARGET_FPU"
  20082. + "fadds\t%1, %2, %0"
  20083. + [(set_attr "type" "fp")])
  20084. +
  20085. +(define_expand "subtf3"
  20086. + [(set (match_operand:TF 0 "nonimmediate_operand" "")
  20087. + (minus:TF (match_operand:TF 1 "general_operand" "")
  20088. + (match_operand:TF 2 "general_operand" "")))]
  20089. + "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
  20090. + "emit_tfmode_binop (MINUS, operands); DONE;")
  20091. +
  20092. +(define_insn "*subtf3_hq"
  20093. + [(set (match_operand:TF 0 "register_operand" "=e")
  20094. + (minus:TF (match_operand:TF 1 "register_operand" "e")
  20095. + (match_operand:TF 2 "register_operand" "e")))]
  20096. + "TARGET_FPU && TARGET_HARD_QUAD"
  20097. + "fsubq\t%1, %2, %0"
  20098. + [(set_attr "type" "fp")])
  20099. +
  20100. +(define_insn "subdf3"
  20101. + [(set (match_operand:DF 0 "register_operand" "=e")
  20102. + (minus:DF (match_operand:DF 1 "register_operand" "e")
  20103. + (match_operand:DF 2 "register_operand" "e")))]
  20104. + "TARGET_FPU"
  20105. + "fsubd\t%1, %2, %0"
  20106. + [(set_attr "type" "fp")
  20107. + (set_attr "fptype" "double")])
  20108. +
  20109. +(define_insn "subsf3"
  20110. + [(set (match_operand:SF 0 "register_operand" "=f")
  20111. + (minus:SF (match_operand:SF 1 "register_operand" "f")
  20112. + (match_operand:SF 2 "register_operand" "f")))]
  20113. + "TARGET_FPU"
  20114. + "fsubs\t%1, %2, %0"
  20115. + [(set_attr "type" "fp")])
  20116. +
  20117. +(define_expand "multf3"
  20118. + [(set (match_operand:TF 0 "nonimmediate_operand" "")
  20119. + (mult:TF (match_operand:TF 1 "general_operand" "")
  20120. + (match_operand:TF 2 "general_operand" "")))]
  20121. + "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
  20122. + "emit_tfmode_binop (MULT, operands); DONE;")
  20123. +
  20124. +(define_insn "*multf3_hq"
  20125. + [(set (match_operand:TF 0 "register_operand" "=e")
  20126. + (mult:TF (match_operand:TF 1 "register_operand" "e")
  20127. + (match_operand:TF 2 "register_operand" "e")))]
  20128. + "TARGET_FPU && TARGET_HARD_QUAD"
  20129. + "fmulq\t%1, %2, %0"
  20130. + [(set_attr "type" "fpmul")])
  20131. +
  20132. +(define_insn "muldf3"
  20133. + [(set (match_operand:DF 0 "register_operand" "=e")
  20134. + (mult:DF (match_operand:DF 1 "register_operand" "e")
  20135. + (match_operand:DF 2 "register_operand" "e")))]
  20136. + "TARGET_FPU"
  20137. + "fmuld\t%1, %2, %0"
  20138. + [(set_attr "type" "fpmul")
  20139. + (set_attr "fptype" "double")])
  20140. +
  20141. +(define_insn "mulsf3"
  20142. + [(set (match_operand:SF 0 "register_operand" "=f")
  20143. + (mult:SF (match_operand:SF 1 "register_operand" "f")
  20144. + (match_operand:SF 2 "register_operand" "f")))]
  20145. + "TARGET_FPU"
  20146. + "fmuls\t%1, %2, %0"
  20147. + [(set_attr "type" "fpmul")])
  20148. +
  20149. +(define_insn "fmadf4"
  20150. + [(set (match_operand:DF 0 "register_operand" "=e")
  20151. + (fma:DF (match_operand:DF 1 "register_operand" "e")
  20152. + (match_operand:DF 2 "register_operand" "e")
  20153. + (match_operand:DF 3 "register_operand" "e")))]
  20154. + "TARGET_FMAF"
  20155. + "fmaddd\t%1, %2, %3, %0"
  20156. + [(set_attr "type" "fpmul")])
  20157. +
  20158. +(define_insn "fmsdf4"
  20159. + [(set (match_operand:DF 0 "register_operand" "=e")
  20160. + (fma:DF (match_operand:DF 1 "register_operand" "e")
  20161. + (match_operand:DF 2 "register_operand" "e")
  20162. + (neg:DF (match_operand:DF 3 "register_operand" "e"))))]
  20163. + "TARGET_FMAF"
  20164. + "fmsubd\t%1, %2, %3, %0"
  20165. + [(set_attr "type" "fpmul")])
  20166. +
  20167. +(define_insn "*nfmadf4"
  20168. + [(set (match_operand:DF 0 "register_operand" "=e")
  20169. + (neg:DF (fma:DF (match_operand:DF 1 "register_operand" "e")
  20170. + (match_operand:DF 2 "register_operand" "e")
  20171. + (match_operand:DF 3 "register_operand" "e"))))]
  20172. + "TARGET_FMAF"
  20173. + "fnmaddd\t%1, %2, %3, %0"
  20174. + [(set_attr "type" "fpmul")])
  20175. +
  20176. +(define_insn "*nfmsdf4"
  20177. + [(set (match_operand:DF 0 "register_operand" "=e")
  20178. + (neg:DF (fma:DF (match_operand:DF 1 "register_operand" "e")
  20179. + (match_operand:DF 2 "register_operand" "e")
  20180. + (neg:DF (match_operand:DF 3 "register_operand" "e")))))]
  20181. + "TARGET_FMAF"
  20182. + "fnmsubd\t%1, %2, %3, %0"
  20183. + [(set_attr "type" "fpmul")])
  20184. +
  20185. +(define_insn "fmasf4"
  20186. + [(set (match_operand:SF 0 "register_operand" "=f")
  20187. + (fma:SF (match_operand:SF 1 "register_operand" "f")
  20188. + (match_operand:SF 2 "register_operand" "f")
  20189. + (match_operand:SF 3 "register_operand" "f")))]
  20190. + "TARGET_FMAF"
  20191. + "fmadds\t%1, %2, %3, %0"
  20192. + [(set_attr "type" "fpmul")])
  20193. +
  20194. +(define_insn "fmssf4"
  20195. + [(set (match_operand:SF 0 "register_operand" "=f")
  20196. + (fma:SF (match_operand:SF 1 "register_operand" "f")
  20197. + (match_operand:SF 2 "register_operand" "f")
  20198. + (neg:SF (match_operand:SF 3 "register_operand" "f"))))]
  20199. + "TARGET_FMAF"
  20200. + "fmsubs\t%1, %2, %3, %0"
  20201. + [(set_attr "type" "fpmul")])
  20202. +
  20203. +(define_insn "*nfmasf4"
  20204. + [(set (match_operand:SF 0 "register_operand" "=f")
  20205. + (neg:SF (fma:SF (match_operand:SF 1 "register_operand" "f")
  20206. + (match_operand:SF 2 "register_operand" "f")
  20207. + (match_operand:SF 3 "register_operand" "f"))))]
  20208. + "TARGET_FMAF"
  20209. + "fnmadds\t%1, %2, %3, %0"
  20210. + [(set_attr "type" "fpmul")])
  20211. +
  20212. +(define_insn "*nfmssf4"
  20213. + [(set (match_operand:SF 0 "register_operand" "=f")
  20214. + (neg:SF (fma:SF (match_operand:SF 1 "register_operand" "f")
  20215. + (match_operand:SF 2 "register_operand" "f")
  20216. + (neg:SF (match_operand:SF 3 "register_operand" "f")))))]
  20217. + "TARGET_FMAF"
  20218. + "fnmsubs\t%1, %2, %3, %0"
  20219. + [(set_attr "type" "fpmul")])
  20220. +
  20221. +(define_insn "*muldf3_extend"
  20222. + [(set (match_operand:DF 0 "register_operand" "=e")
  20223. + (mult:DF (float_extend:DF (match_operand:SF 1 "register_operand" "f"))
  20224. + (float_extend:DF (match_operand:SF 2 "register_operand" "f"))))]
  20225. + "TARGET_FSMULD"
  20226. + "fsmuld\t%1, %2, %0"
  20227. + [(set_attr "type" "fpmul")
  20228. + (set_attr "fptype" "double")])
  20229. +
  20230. +(define_insn "*multf3_extend"
  20231. + [(set (match_operand:TF 0 "register_operand" "=e")
  20232. + (mult:TF (float_extend:TF (match_operand:DF 1 "register_operand" "e"))
  20233. + (float_extend:TF (match_operand:DF 2 "register_operand" "e"))))]
  20234. + "(TARGET_V8 || TARGET_V9) && TARGET_FPU && TARGET_HARD_QUAD"
  20235. + "fdmulq\t%1, %2, %0"
  20236. + [(set_attr "type" "fpmul")])
  20237. +
  20238. +(define_expand "divtf3"
  20239. + [(set (match_operand:TF 0 "nonimmediate_operand" "")
  20240. + (div:TF (match_operand:TF 1 "general_operand" "")
  20241. + (match_operand:TF 2 "general_operand" "")))]
  20242. + "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
  20243. + "emit_tfmode_binop (DIV, operands); DONE;")
  20244. +
  20245. +;; don't have timing for quad-prec. divide.
  20246. +(define_insn "*divtf3_hq"
  20247. + [(set (match_operand:TF 0 "register_operand" "=e")
  20248. + (div:TF (match_operand:TF 1 "register_operand" "e")
  20249. + (match_operand:TF 2 "register_operand" "e")))]
  20250. + "TARGET_FPU && TARGET_HARD_QUAD"
  20251. + "fdivq\t%1, %2, %0"
  20252. + [(set_attr "type" "fpdivs")])
  20253. +
  20254. +(define_expand "divdf3"
  20255. + [(set (match_operand:DF 0 "register_operand" "=e")
  20256. + (div:DF (match_operand:DF 1 "register_operand" "e")
  20257. + (match_operand:DF 2 "register_operand" "e")))]
  20258. + "TARGET_FPU"
  20259. + "")
  20260. +
  20261. +(define_insn "*divdf3_nofix"
  20262. + [(set (match_operand:DF 0 "register_operand" "=e")
  20263. + (div:DF (match_operand:DF 1 "register_operand" "e")
  20264. + (match_operand:DF 2 "register_operand" "e")))]
  20265. + "TARGET_FPU && !sparc_fix_ut699"
  20266. + "fdivd\t%1, %2, %0"
  20267. + [(set_attr "type" "fpdivd")
  20268. + (set_attr "fptype" "double")])
  20269. +
  20270. +(define_insn "*divdf3_fix"
  20271. + [(set (match_operand:DF 0 "register_operand" "=e")
  20272. + (div:DF (match_operand:DF 1 "register_operand" "e")
  20273. + (match_operand:DF 2 "register_operand" "e")))]
  20274. + "TARGET_FPU && sparc_fix_ut699"
  20275. + "fdivd\t%1, %2, %0\n\tstd\t%0, [%%sp-8]\n\tnop"
  20276. + [(set_attr "type" "fpdivd")
  20277. + (set_attr "fptype" "double")
  20278. + (set_attr "length" "3")])
  20279. +
  20280. +(define_insn "divsf3"
  20281. + [(set (match_operand:SF 0 "register_operand" "=f")
  20282. + (div:SF (match_operand:SF 1 "register_operand" "f")
  20283. + (match_operand:SF 2 "register_operand" "f")))]
  20284. + "TARGET_FPU && !sparc_fix_ut699"
  20285. + "fdivs\t%1, %2, %0"
  20286. + [(set_attr "type" "fpdivs")])
  20287. +
  20288. +(define_expand "negtf2"
  20289. + [(set (match_operand:TF 0 "register_operand" "")
  20290. + (neg:TF (match_operand:TF 1 "register_operand" "")))]
  20291. + "TARGET_FPU"
  20292. + "")
  20293. +
  20294. +(define_insn "*negtf2_hq"
  20295. + [(set (match_operand:TF 0 "register_operand" "=e")
  20296. + (neg:TF (match_operand:TF 1 "register_operand" "e")))]
  20297. + "TARGET_FPU && TARGET_HARD_QUAD"
  20298. + "fnegq\t%1, %0"
  20299. + [(set_attr "type" "fpmove")])
  20300. +
  20301. +(define_insn_and_split "*negtf2"
  20302. + [(set (match_operand:TF 0 "register_operand" "=e")
  20303. + (neg:TF (match_operand:TF 1 "register_operand" "e")))]
  20304. + "TARGET_FPU && !TARGET_HARD_QUAD"
  20305. + "#"
  20306. + "&& reload_completed"
  20307. + [(clobber (const_int 0))]
  20308. +{
  20309. + rtx set_dest = operands[0];
  20310. + rtx set_src = operands[1];
  20311. + rtx dest1, dest2;
  20312. + rtx src1, src2;
  20313. +
  20314. + dest1 = gen_df_reg (set_dest, 0);
  20315. + dest2 = gen_df_reg (set_dest, 1);
  20316. + src1 = gen_df_reg (set_src, 0);
  20317. + src2 = gen_df_reg (set_src, 1);
  20318. +
  20319. + /* Now emit using the real source and destination we found, swapping
  20320. + the order if we detect overlap. */
  20321. + if (reg_overlap_mentioned_p (dest1, src2))
  20322. + {
  20323. + emit_insn (gen_movdf (dest2, src2));
  20324. + emit_insn (gen_negdf2 (dest1, src1));
  20325. + }
  20326. + else
  20327. + {
  20328. + emit_insn (gen_negdf2 (dest1, src1));
  20329. + if (REGNO (dest2) != REGNO (src2))
  20330. + emit_insn (gen_movdf (dest2, src2));
  20331. + }
  20332. + DONE;
  20333. +}
  20334. + [(set_attr "length" "2")])
  20335. +
  20336. +(define_expand "negdf2"
  20337. + [(set (match_operand:DF 0 "register_operand" "")
  20338. + (neg:DF (match_operand:DF 1 "register_operand" "")))]
  20339. + "TARGET_FPU"
  20340. + "")
  20341. +
  20342. +(define_insn_and_split "*negdf2_notv9"
  20343. + [(set (match_operand:DF 0 "register_operand" "=e")
  20344. + (neg:DF (match_operand:DF 1 "register_operand" "e")))]
  20345. + "TARGET_FPU && !TARGET_V9"
  20346. + "#"
  20347. + "&& reload_completed"
  20348. + [(clobber (const_int 0))]
  20349. +{
  20350. + rtx set_dest = operands[0];
  20351. + rtx set_src = operands[1];
  20352. + rtx dest1, dest2;
  20353. + rtx src1, src2;
  20354. +
  20355. + dest1 = gen_highpart (SFmode, set_dest);
  20356. + dest2 = gen_lowpart (SFmode, set_dest);
  20357. + src1 = gen_highpart (SFmode, set_src);
  20358. + src2 = gen_lowpart (SFmode, set_src);
  20359. +
  20360. + /* Now emit using the real source and destination we found, swapping
  20361. + the order if we detect overlap. */
  20362. + if (reg_overlap_mentioned_p (dest1, src2))
  20363. + {
  20364. + emit_insn (gen_movsf (dest2, src2));
  20365. + emit_insn (gen_negsf2 (dest1, src1));
  20366. + }
  20367. + else
  20368. + {
  20369. + emit_insn (gen_negsf2 (dest1, src1));
  20370. + if (REGNO (dest2) != REGNO (src2))
  20371. + emit_insn (gen_movsf (dest2, src2));
  20372. + }
  20373. + DONE;
  20374. +}
  20375. + [(set_attr "length" "2")])
  20376. +
  20377. +(define_insn "*negdf2_v9"
  20378. + [(set (match_operand:DF 0 "register_operand" "=e")
  20379. + (neg:DF (match_operand:DF 1 "register_operand" "e")))]
  20380. + "TARGET_FPU && TARGET_V9"
  20381. + "fnegd\t%1, %0"
  20382. + [(set_attr "type" "fpmove")
  20383. + (set_attr "fptype" "double")])
  20384. +
  20385. +(define_insn "negsf2"
  20386. + [(set (match_operand:SF 0 "register_operand" "=f")
  20387. + (neg:SF (match_operand:SF 1 "register_operand" "f")))]
  20388. + "TARGET_FPU"
  20389. + "fnegs\t%1, %0"
  20390. + [(set_attr "type" "fpmove")])
  20391. +
  20392. +(define_expand "abstf2"
  20393. + [(set (match_operand:TF 0 "register_operand" "")
  20394. + (abs:TF (match_operand:TF 1 "register_operand" "")))]
  20395. + "TARGET_FPU"
  20396. + "")
  20397. +
  20398. +(define_insn "*abstf2_hq"
  20399. + [(set (match_operand:TF 0 "register_operand" "=e")
  20400. + (abs:TF (match_operand:TF 1 "register_operand" "e")))]
  20401. + "TARGET_FPU && TARGET_HARD_QUAD"
  20402. + "fabsq\t%1, %0"
  20403. + [(set_attr "type" "fpmove")])
  20404. +
  20405. +(define_insn_and_split "*abstf2"
  20406. + [(set (match_operand:TF 0 "register_operand" "=e")
  20407. + (abs:TF (match_operand:TF 1 "register_operand" "e")))]
  20408. + "TARGET_FPU && !TARGET_HARD_QUAD"
  20409. + "#"
  20410. + "&& reload_completed"
  20411. + [(clobber (const_int 0))]
  20412. +{
  20413. + rtx set_dest = operands[0];
  20414. + rtx set_src = operands[1];
  20415. + rtx dest1, dest2;
  20416. + rtx src1, src2;
  20417. +
  20418. + dest1 = gen_df_reg (set_dest, 0);
  20419. + dest2 = gen_df_reg (set_dest, 1);
  20420. + src1 = gen_df_reg (set_src, 0);
  20421. + src2 = gen_df_reg (set_src, 1);
  20422. +
  20423. + /* Now emit using the real source and destination we found, swapping
  20424. + the order if we detect overlap. */
  20425. + if (reg_overlap_mentioned_p (dest1, src2))
  20426. + {
  20427. + emit_insn (gen_movdf (dest2, src2));
  20428. + emit_insn (gen_absdf2 (dest1, src1));
  20429. + }
  20430. + else
  20431. + {
  20432. + emit_insn (gen_absdf2 (dest1, src1));
  20433. + if (REGNO (dest2) != REGNO (src2))
  20434. + emit_insn (gen_movdf (dest2, src2));
  20435. + }
  20436. + DONE;
  20437. +}
  20438. + [(set_attr "length" "2")])
  20439. +
  20440. +(define_expand "absdf2"
  20441. + [(set (match_operand:DF 0 "register_operand" "")
  20442. + (abs:DF (match_operand:DF 1 "register_operand" "")))]
  20443. + "TARGET_FPU"
  20444. + "")
  20445. +
  20446. +(define_insn_and_split "*absdf2_notv9"
  20447. + [(set (match_operand:DF 0 "register_operand" "=e")
  20448. + (abs:DF (match_operand:DF 1 "register_operand" "e")))]
  20449. + "TARGET_FPU && !TARGET_V9"
  20450. + "#"
  20451. + "&& reload_completed"
  20452. + [(clobber (const_int 0))]
  20453. +{
  20454. + rtx set_dest = operands[0];
  20455. + rtx set_src = operands[1];
  20456. + rtx dest1, dest2;
  20457. + rtx src1, src2;
  20458. +
  20459. + dest1 = gen_highpart (SFmode, set_dest);
  20460. + dest2 = gen_lowpart (SFmode, set_dest);
  20461. + src1 = gen_highpart (SFmode, set_src);
  20462. + src2 = gen_lowpart (SFmode, set_src);
  20463. +
  20464. + /* Now emit using the real source and destination we found, swapping
  20465. + the order if we detect overlap. */
  20466. + if (reg_overlap_mentioned_p (dest1, src2))
  20467. + {
  20468. + emit_insn (gen_movsf (dest2, src2));
  20469. + emit_insn (gen_abssf2 (dest1, src1));
  20470. + }
  20471. + else
  20472. + {
  20473. + emit_insn (gen_abssf2 (dest1, src1));
  20474. + if (REGNO (dest2) != REGNO (src2))
  20475. + emit_insn (gen_movsf (dest2, src2));
  20476. + }
  20477. + DONE;
  20478. +}
  20479. + [(set_attr "length" "2")])
  20480. +
  20481. +(define_insn "*absdf2_v9"
  20482. + [(set (match_operand:DF 0 "register_operand" "=e")
  20483. + (abs:DF (match_operand:DF 1 "register_operand" "e")))]
  20484. + "TARGET_FPU && TARGET_V9"
  20485. + "fabsd\t%1, %0"
  20486. + [(set_attr "type" "fpmove")
  20487. + (set_attr "fptype" "double")])
  20488. +
  20489. +(define_insn "abssf2"
  20490. + [(set (match_operand:SF 0 "register_operand" "=f")
  20491. + (abs:SF (match_operand:SF 1 "register_operand" "f")))]
  20492. + "TARGET_FPU"
  20493. + "fabss\t%1, %0"
  20494. + [(set_attr "type" "fpmove")])
  20495. +
  20496. +(define_expand "sqrttf2"
  20497. + [(set (match_operand:TF 0 "nonimmediate_operand" "")
  20498. + (sqrt:TF (match_operand:TF 1 "general_operand" "")))]
  20499. + "TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
  20500. + "emit_tfmode_unop (SQRT, operands); DONE;")
  20501. +
  20502. +(define_insn "*sqrttf2_hq"
  20503. + [(set (match_operand:TF 0 "register_operand" "=e")
  20504. + (sqrt:TF (match_operand:TF 1 "register_operand" "e")))]
  20505. + "TARGET_FPU && TARGET_HARD_QUAD"
  20506. + "fsqrtq\t%1, %0"
  20507. + [(set_attr "type" "fpsqrts")])
  20508. +
  20509. +(define_expand "sqrtdf2"
  20510. + [(set (match_operand:DF 0 "register_operand" "=e")
  20511. + (sqrt:DF (match_operand:DF 1 "register_operand" "e")))]
  20512. + "TARGET_FPU"
  20513. + "")
  20514. +
  20515. +(define_insn "*sqrtdf2_nofix"
  20516. + [(set (match_operand:DF 0 "register_operand" "=e")
  20517. + (sqrt:DF (match_operand:DF 1 "register_operand" "e")))]
  20518. + "TARGET_FPU && !sparc_fix_ut699"
  20519. + "fsqrtd\t%1, %0"
  20520. + [(set_attr "type" "fpsqrtd")
  20521. + (set_attr "fptype" "double")])
  20522. +
  20523. +(define_insn "*sqrtdf2_fix"
  20524. + [(set (match_operand:DF 0 "register_operand" "=e")
  20525. + (sqrt:DF (match_operand:DF 1 "register_operand" "e")))]
  20526. + "TARGET_FPU && sparc_fix_ut699"
  20527. + "fsqrtd\t%1, %0\n\tstd\t%0, [%%sp-8]\n\tnop"
  20528. + [(set_attr "type" "fpsqrtd")
  20529. + (set_attr "fptype" "double")
  20530. + (set_attr "length" "3")])
  20531. +
  20532. +(define_insn "sqrtsf2"
  20533. + [(set (match_operand:SF 0 "register_operand" "=f")
  20534. + (sqrt:SF (match_operand:SF 1 "register_operand" "f")))]
  20535. + "TARGET_FPU && !sparc_fix_ut699"
  20536. + "fsqrts\t%1, %0"
  20537. + [(set_attr "type" "fpsqrts")])
  20538. +
  20539. +
  20540. +;; Arithmetic shift instructions.
  20541. +
  20542. +(define_insn "ashlsi3"
  20543. + [(set (match_operand:SI 0 "register_operand" "=r")
  20544. + (ashift:SI (match_operand:SI 1 "register_operand" "r")
  20545. + (match_operand:SI 2 "arith_operand" "rI")))]
  20546. + ""
  20547. +{
  20548. + if (GET_CODE (operands[2]) == CONST_INT)
  20549. + operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
  20550. + return "sll\t%1, %2, %0";
  20551. +}
  20552. + [(set_attr "type" "shift")])
  20553. +
  20554. +(define_expand "ashldi3"
  20555. + [(set (match_operand:DI 0 "register_operand" "=r")
  20556. + (ashift:DI (match_operand:DI 1 "register_operand" "r")
  20557. + (match_operand:SI 2 "arith_operand" "rI")))]
  20558. + "TARGET_ARCH64 || TARGET_V8PLUS"
  20559. +{
  20560. + if (TARGET_ARCH32)
  20561. + {
  20562. + if (GET_CODE (operands[2]) == CONST_INT)
  20563. + FAIL;
  20564. + emit_insn (gen_ashldi3_v8plus (operands[0], operands[1], operands[2]));
  20565. + DONE;
  20566. + }
  20567. +})
  20568. +
  20569. +(define_insn "*ashldi3_sp64"
  20570. + [(set (match_operand:DI 0 "register_operand" "=r")
  20571. + (ashift:DI (match_operand:DI 1 "register_operand" "r")
  20572. + (match_operand:SI 2 "arith_operand" "rI")))]
  20573. + "TARGET_ARCH64"
  20574. +{
  20575. + if (GET_CODE (operands[2]) == CONST_INT)
  20576. + operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
  20577. + return "sllx\t%1, %2, %0";
  20578. +}
  20579. + [(set_attr "type" "shift")])
  20580. +
  20581. +(define_insn "ashldi3_v8plus"
  20582. + [(set (match_operand:DI 0 "register_operand" "=&h,&h,r")
  20583. + (ashift:DI (match_operand:DI 1 "arith_operand" "rI,0,rI")
  20584. + (match_operand:SI 2 "arith_operand" "rI,rI,rI")))
  20585. + (clobber (match_scratch:SI 3 "=X,X,&h"))]
  20586. + "TARGET_V8PLUS"
  20587. +{
  20588. + return output_v8plus_shift (insn ,operands, \"sllx\");
  20589. +}
  20590. + [(set_attr "type" "multi")
  20591. + (set_attr "length" "5,5,6")])
  20592. +
  20593. +(define_insn "*cmp_ccnz_ashift_1"
  20594. + [(set (reg:CCNZ CC_REG)
  20595. + (compare:CCNZ (ashift:SI (match_operand:SI 0 "register_operand" "r")
  20596. + (const_int 1))
  20597. + (const_int 0)))]
  20598. + ""
  20599. + "addcc\t%0, %0, %%g0"
  20600. + [(set_attr "type" "compare")])
  20601. +
  20602. +(define_insn "*cmp_ccnz_set_ashift_1"
  20603. + [(set (reg:CCNZ CC_REG)
  20604. + (compare:CCNZ (ashift:SI (match_operand:SI 1 "register_operand" "r")
  20605. + (const_int 1))
  20606. + (const_int 0)))
  20607. + (set (match_operand:SI 0 "register_operand" "=r")
  20608. + (ashift:SI (match_dup 1) (const_int 1)))]
  20609. + ""
  20610. + "addcc\t%1, %1, %0"
  20611. + [(set_attr "type" "compare")])
  20612. +
  20613. +(define_insn "ashrsi3"
  20614. + [(set (match_operand:SI 0 "register_operand" "=r")
  20615. + (ashiftrt:SI (match_operand:SI 1 "register_operand" "r")
  20616. + (match_operand:SI 2 "arith_operand" "rI")))]
  20617. + ""
  20618. +{
  20619. + if (GET_CODE (operands[2]) == CONST_INT)
  20620. + operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
  20621. + return "sra\t%1, %2, %0";
  20622. +}
  20623. + [(set_attr "type" "shift")])
  20624. +
  20625. +(define_insn "*ashrsi3_extend0"
  20626. + [(set (match_operand:DI 0 "register_operand" "=r")
  20627. + (sign_extend:DI (ashiftrt:SI (match_operand:SI 1 "register_operand" "r")
  20628. + (match_operand:SI 2 "arith_operand" "rI"))))]
  20629. + "TARGET_ARCH64"
  20630. +{
  20631. + if (GET_CODE (operands[2]) == CONST_INT)
  20632. + operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
  20633. + return "sra\t%1, %2, %0";
  20634. +}
  20635. + [(set_attr "type" "shift")])
  20636. +
  20637. +;; This handles the case where
  20638. +;; (sign_extend:DI (ashiftrt:SI (match_operand:SI) (match_operand:SI)))
  20639. +;; but combiner "simplifies" it for us.
  20640. +(define_insn "*ashrsi3_extend1"
  20641. + [(set (match_operand:DI 0 "register_operand" "=r")
  20642. + (ashiftrt:DI (ashift:DI (subreg:DI (match_operand:SI 1 "register_operand" "r") 0)
  20643. + (const_int 32))
  20644. + (match_operand:SI 2 "small_int_operand" "I")))]
  20645. + "TARGET_ARCH64 && INTVAL (operands[2]) >= 32 && INTVAL (operands[2]) < 64"
  20646. +{
  20647. + operands[2] = GEN_INT (INTVAL (operands[2]) - 32);
  20648. + return "sra\t%1, %2, %0";
  20649. +}
  20650. + [(set_attr "type" "shift")])
  20651. +
  20652. +;; This handles the case where
  20653. +;; (ashiftrt:DI (sign_extend:DI (match_operand:SI)) (const_int))
  20654. +;; but combiner "simplifies" it for us.
  20655. +(define_insn "*ashrsi3_extend2"
  20656. + [(set (match_operand:DI 0 "register_operand" "=r")
  20657. + (sign_extract:DI (subreg:DI (match_operand:SI 1 "register_operand" "r") 0)
  20658. + (match_operand 2 "small_int_operand" "I")
  20659. + (const_int 32)))]
  20660. + "TARGET_ARCH64 && INTVAL (operands[2]) > 0 && INTVAL (operands[2]) <= 32"
  20661. +{
  20662. + operands[2] = GEN_INT (32 - INTVAL (operands[2]));
  20663. + return "sra\t%1, %2, %0";
  20664. +}
  20665. + [(set_attr "type" "shift")])
  20666. +
  20667. +(define_expand "ashrdi3"
  20668. + [(set (match_operand:DI 0 "register_operand" "=r")
  20669. + (ashiftrt:DI (match_operand:DI 1 "register_operand" "r")
  20670. + (match_operand:SI 2 "arith_operand" "rI")))]
  20671. + "TARGET_ARCH64 || TARGET_V8PLUS"
  20672. +{
  20673. + if (TARGET_ARCH32)
  20674. + {
  20675. + if (GET_CODE (operands[2]) == CONST_INT)
  20676. + FAIL; /* prefer generic code in this case */
  20677. + emit_insn (gen_ashrdi3_v8plus (operands[0], operands[1], operands[2]));
  20678. + DONE;
  20679. + }
  20680. +})
  20681. +
  20682. +(define_insn "*ashrdi3_sp64"
  20683. + [(set (match_operand:DI 0 "register_operand" "=r")
  20684. + (ashiftrt:DI (match_operand:DI 1 "register_operand" "r")
  20685. + (match_operand:SI 2 "arith_operand" "rI")))]
  20686. + "TARGET_ARCH64"
  20687. +{
  20688. + if (GET_CODE (operands[2]) == CONST_INT)
  20689. + operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
  20690. + return "srax\t%1, %2, %0";
  20691. +}
  20692. + [(set_attr "type" "shift")])
  20693. +
  20694. +(define_insn "ashrdi3_v8plus"
  20695. + [(set (match_operand:DI 0 "register_operand" "=&h,&h,r")
  20696. + (ashiftrt:DI (match_operand:DI 1 "arith_operand" "rI,0,rI")
  20697. + (match_operand:SI 2 "arith_operand" "rI,rI,rI")))
  20698. + (clobber (match_scratch:SI 3 "=X,X,&h"))]
  20699. + "TARGET_V8PLUS"
  20700. +{
  20701. + return output_v8plus_shift (insn, operands, \"srax\");
  20702. +}
  20703. + [(set_attr "type" "multi")
  20704. + (set_attr "length" "5,5,6")])
  20705. +
  20706. +(define_insn "lshrsi3"
  20707. + [(set (match_operand:SI 0 "register_operand" "=r")
  20708. + (lshiftrt:SI (match_operand:SI 1 "register_operand" "r")
  20709. + (match_operand:SI 2 "arith_operand" "rI")))]
  20710. + ""
  20711. +{
  20712. + if (GET_CODE (operands[2]) == CONST_INT)
  20713. + operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
  20714. + return "srl\t%1, %2, %0";
  20715. +}
  20716. + [(set_attr "type" "shift")])
  20717. +
  20718. +(define_insn "*lshrsi3_extend0"
  20719. + [(set (match_operand:DI 0 "register_operand" "=r")
  20720. + (zero_extend:DI
  20721. + (lshiftrt:SI (match_operand:SI 1 "register_operand" "r")
  20722. + (match_operand:SI 2 "arith_operand" "rI"))))]
  20723. + "TARGET_ARCH64"
  20724. +{
  20725. + if (GET_CODE (operands[2]) == CONST_INT)
  20726. + operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
  20727. + return "srl\t%1, %2, %0";
  20728. +}
  20729. + [(set_attr "type" "shift")])
  20730. +
  20731. +;; This handles the case where
  20732. +;; (zero_extend:DI (lshiftrt:SI (match_operand:SI) (match_operand:SI)))
  20733. +;; but combiner "simplifies" it for us.
  20734. +(define_insn "*lshrsi3_extend1"
  20735. + [(set (match_operand:DI 0 "register_operand" "=r")
  20736. + (and:DI (subreg:DI (lshiftrt:SI (match_operand:SI 1 "register_operand" "r")
  20737. + (match_operand:SI 2 "arith_operand" "rI")) 0)
  20738. + (match_operand 3 "const_int_operand" "")))]
  20739. + "TARGET_ARCH64 && (unsigned HOST_WIDE_INT) INTVAL (operands[3]) == 0xffffffff"
  20740. +{
  20741. + if (GET_CODE (operands[2]) == CONST_INT)
  20742. + operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
  20743. + return "srl\t%1, %2, %0";
  20744. +}
  20745. + [(set_attr "type" "shift")])
  20746. +
  20747. +;; This handles the case where
  20748. +;; (lshiftrt:DI (zero_extend:DI (match_operand:SI)) (const_int))
  20749. +;; but combiner "simplifies" it for us.
  20750. +(define_insn "*lshrsi3_extend2"
  20751. + [(set (match_operand:DI 0 "register_operand" "=r")
  20752. + (zero_extract:DI (subreg:DI (match_operand:SI 1 "register_operand" "r") 0)
  20753. + (match_operand 2 "small_int_operand" "I")
  20754. + (const_int 32)))]
  20755. + "TARGET_ARCH64 && INTVAL (operands[2]) > 0 && INTVAL (operands[2]) <= 32"
  20756. +{
  20757. + operands[2] = GEN_INT (32 - INTVAL (operands[2]));
  20758. + return "srl\t%1, %2, %0";
  20759. +}
  20760. + [(set_attr "type" "shift")])
  20761. +
  20762. +(define_expand "lshrdi3"
  20763. + [(set (match_operand:DI 0 "register_operand" "=r")
  20764. + (lshiftrt:DI (match_operand:DI 1 "register_operand" "r")
  20765. + (match_operand:SI 2 "arith_operand" "rI")))]
  20766. + "TARGET_ARCH64 || TARGET_V8PLUS"
  20767. +{
  20768. + if (TARGET_ARCH32)
  20769. + {
  20770. + if (GET_CODE (operands[2]) == CONST_INT)
  20771. + FAIL;
  20772. + emit_insn (gen_lshrdi3_v8plus (operands[0], operands[1], operands[2]));
  20773. + DONE;
  20774. + }
  20775. +})
  20776. +
  20777. +(define_insn "*lshrdi3_sp64"
  20778. + [(set (match_operand:DI 0 "register_operand" "=r")
  20779. + (lshiftrt:DI (match_operand:DI 1 "register_operand" "r")
  20780. + (match_operand:SI 2 "arith_operand" "rI")))]
  20781. + "TARGET_ARCH64"
  20782. +{
  20783. + if (GET_CODE (operands[2]) == CONST_INT)
  20784. + operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
  20785. + return "srlx\t%1, %2, %0";
  20786. +}
  20787. + [(set_attr "type" "shift")])
  20788. +
  20789. +(define_insn "lshrdi3_v8plus"
  20790. + [(set (match_operand:DI 0 "register_operand" "=&h,&h,r")
  20791. + (lshiftrt:DI (match_operand:DI 1 "arith_operand" "rI,0,rI")
  20792. + (match_operand:SI 2 "arith_operand" "rI,rI,rI")))
  20793. + (clobber (match_scratch:SI 3 "=X,X,&h"))]
  20794. + "TARGET_V8PLUS"
  20795. +{
  20796. + return output_v8plus_shift (insn, operands, \"srlx\");
  20797. +}
  20798. + [(set_attr "type" "multi")
  20799. + (set_attr "length" "5,5,6")])
  20800. +
  20801. +(define_insn ""
  20802. + [(set (match_operand:SI 0 "register_operand" "=r")
  20803. + (ashiftrt:SI (subreg:SI (lshiftrt:DI (match_operand:DI 1 "register_operand" "r")
  20804. + (const_int 32)) 4)
  20805. + (match_operand:SI 2 "small_int_operand" "I")))]
  20806. + "TARGET_ARCH64 && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) < 32"
  20807. +{
  20808. + operands[2] = GEN_INT (INTVAL (operands[2]) + 32);
  20809. + return "srax\t%1, %2, %0";
  20810. +}
  20811. + [(set_attr "type" "shift")])
  20812. +
  20813. +(define_insn ""
  20814. + [(set (match_operand:SI 0 "register_operand" "=r")
  20815. + (lshiftrt:SI (subreg:SI (ashiftrt:DI (match_operand:DI 1 "register_operand" "r")
  20816. + (const_int 32)) 4)
  20817. + (match_operand:SI 2 "small_int_operand" "I")))]
  20818. + "TARGET_ARCH64 && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) < 32"
  20819. +{
  20820. + operands[2] = GEN_INT (INTVAL (operands[2]) + 32);
  20821. + return "srlx\t%1, %2, %0";
  20822. +}
  20823. + [(set_attr "type" "shift")])
  20824. +
  20825. +(define_insn ""
  20826. + [(set (match_operand:SI 0 "register_operand" "=r")
  20827. + (ashiftrt:SI (subreg:SI (ashiftrt:DI (match_operand:DI 1 "register_operand" "r")
  20828. + (match_operand:SI 2 "small_int_operand" "I")) 4)
  20829. + (match_operand:SI 3 "small_int_operand" "I")))]
  20830. + "TARGET_ARCH64
  20831. + && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) >= 32
  20832. + && (unsigned HOST_WIDE_INT) INTVAL (operands[3]) < 32
  20833. + && (unsigned HOST_WIDE_INT) (INTVAL (operands[2]) + INTVAL (operands[3])) < 64"
  20834. +{
  20835. + operands[2] = GEN_INT (INTVAL (operands[2]) + INTVAL (operands[3]));
  20836. +
  20837. + return "srax\t%1, %2, %0";
  20838. +}
  20839. + [(set_attr "type" "shift")])
  20840. +
  20841. +(define_insn ""
  20842. + [(set (match_operand:SI 0 "register_operand" "=r")
  20843. + (lshiftrt:SI (subreg:SI (lshiftrt:DI (match_operand:DI 1 "register_operand" "r")
  20844. + (match_operand:SI 2 "small_int_operand" "I")) 4)
  20845. + (match_operand:SI 3 "small_int_operand" "I")))]
  20846. + "TARGET_ARCH64
  20847. + && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) >= 32
  20848. + && (unsigned HOST_WIDE_INT) INTVAL (operands[3]) < 32
  20849. + && (unsigned HOST_WIDE_INT) (INTVAL (operands[2]) + INTVAL (operands[3])) < 64"
  20850. +{
  20851. + operands[2] = GEN_INT (INTVAL (operands[2]) + INTVAL (operands[3]));
  20852. +
  20853. + return "srlx\t%1, %2, %0";
  20854. +}
  20855. + [(set_attr "type" "shift")])
  20856. +
  20857. +
  20858. +;; Unconditional and other jump instructions.
  20859. +
  20860. +(define_expand "jump"
  20861. + [(set (pc) (label_ref (match_operand 0 "" "")))]
  20862. + "")
  20863. +
  20864. +(define_insn "*jump_ubranch"
  20865. + [(set (pc) (label_ref (match_operand 0 "" "")))]
  20866. + "!TARGET_CBCOND"
  20867. +{
  20868. + return output_ubranch (operands[0], insn);
  20869. +}
  20870. + [(set_attr "type" "uncond_branch")])
  20871. +
  20872. +(define_insn "*jump_cbcond"
  20873. + [(set (pc) (label_ref (match_operand 0 "" "")))]
  20874. + "TARGET_CBCOND"
  20875. +{
  20876. + return output_ubranch (operands[0], insn);
  20877. +}
  20878. + [(set_attr "type" "uncond_cbcond")])
  20879. +
  20880. +(define_expand "tablejump"
  20881. + [(parallel [(set (pc) (match_operand 0 "register_operand" "r"))
  20882. + (use (label_ref (match_operand 1 "" "")))])]
  20883. + ""
  20884. +{
  20885. + gcc_assert (GET_MODE (operands[0]) == CASE_VECTOR_MODE);
  20886. +
  20887. + /* In pic mode, our address differences are against the base of the
  20888. + table. Add that base value back in; CSE ought to be able to combine
  20889. + the two address loads. */
  20890. + if (flag_pic)
  20891. + {
  20892. + rtx tmp, tmp2;
  20893. + tmp = gen_rtx_LABEL_REF (Pmode, operands[1]);
  20894. + tmp2 = operands[0];
  20895. + if (CASE_VECTOR_MODE != Pmode)
  20896. + tmp2 = gen_rtx_SIGN_EXTEND (Pmode, tmp2);
  20897. + tmp = gen_rtx_PLUS (Pmode, tmp2, tmp);
  20898. + operands[0] = memory_address (Pmode, tmp);
  20899. + }
  20900. +})
  20901. +
  20902. +(define_insn "*tablejump<P:mode>"
  20903. + [(set (pc) (match_operand:P 0 "address_operand" "p"))
  20904. + (use (label_ref (match_operand 1 "" "")))]
  20905. + ""
  20906. + "jmp\t%a0%#"
  20907. + [(set_attr "type" "uncond_branch")])
  20908. +
  20909. +
  20910. +;; Jump to subroutine instructions.
  20911. +
  20912. +(define_expand "call"
  20913. + ;; Note that this expression is not used for generating RTL.
  20914. + ;; All the RTL is generated explicitly below.
  20915. + [(call (match_operand 0 "call_operand" "")
  20916. + (match_operand 3 "" "i"))]
  20917. + ;; operands[2] is next_arg_register
  20918. + ;; operands[3] is struct_value_size_rtx.
  20919. + ""
  20920. +{
  20921. + rtx fn_rtx;
  20922. +
  20923. + gcc_assert (MEM_P (operands[0]) && GET_MODE (operands[0]) == FUNCTION_MODE);
  20924. +
  20925. + gcc_assert (GET_CODE (operands[3]) == CONST_INT);
  20926. +
  20927. + if (GET_CODE (XEXP (operands[0], 0)) == LABEL_REF)
  20928. + {
  20929. + /* This is really a PIC sequence. We want to represent
  20930. + it as a funny jump so its delay slots can be filled.
  20931. +
  20932. + ??? But if this really *is* a CALL, will not it clobber the
  20933. + call-clobbered registers? We lose this if it is a JUMP_INSN.
  20934. + Why cannot we have delay slots filled if it were a CALL? */
  20935. +
  20936. + /* We accept negative sizes for untyped calls. */
  20937. + if (TARGET_ARCH32 && INTVAL (operands[3]) != 0)
  20938. + emit_jump_insn
  20939. + (gen_rtx_PARALLEL
  20940. + (VOIDmode,
  20941. + gen_rtvec (3,
  20942. + gen_rtx_SET (pc_rtx, XEXP (operands[0], 0)),
  20943. + operands[3],
  20944. + gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 15)))));
  20945. + else
  20946. + emit_jump_insn
  20947. + (gen_rtx_PARALLEL
  20948. + (VOIDmode,
  20949. + gen_rtvec (2,
  20950. + gen_rtx_SET (pc_rtx, XEXP (operands[0], 0)),
  20951. + gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 15)))));
  20952. + goto finish_call;
  20953. + }
  20954. +
  20955. + fn_rtx = operands[0];
  20956. +
  20957. + /* We accept negative sizes for untyped calls. */
  20958. + if (TARGET_ARCH32 && INTVAL (operands[3]) != 0)
  20959. + sparc_emit_call_insn
  20960. + (gen_rtx_PARALLEL
  20961. + (VOIDmode,
  20962. + gen_rtvec (3, gen_rtx_CALL (VOIDmode, fn_rtx, const0_rtx),
  20963. + operands[3],
  20964. + gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 15)))),
  20965. + XEXP (fn_rtx, 0));
  20966. + else
  20967. + sparc_emit_call_insn
  20968. + (gen_rtx_PARALLEL
  20969. + (VOIDmode,
  20970. + gen_rtvec (2, gen_rtx_CALL (VOIDmode, fn_rtx, const0_rtx),
  20971. + gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 15)))),
  20972. + XEXP (fn_rtx, 0));
  20973. +
  20974. + finish_call:
  20975. +
  20976. + DONE;
  20977. +})
  20978. +
  20979. +;; We can't use the same pattern for these two insns, because then registers
  20980. +;; in the address may not be properly reloaded.
  20981. +
  20982. +(define_insn "*call_address<P:mode>"
  20983. + [(call (mem:P (match_operand:P 0 "address_operand" "p"))
  20984. + (match_operand 1 "" ""))
  20985. + (clobber (reg:P O7_REG))]
  20986. + ;;- Do not use operand 1 for most machines.
  20987. + ""
  20988. + "call\t%a0, %1%#"
  20989. + [(set_attr "type" "call")])
  20990. +
  20991. +(define_insn "*call_symbolic<P:mode>"
  20992. + [(call (mem:P (match_operand:P 0 "symbolic_operand" "s"))
  20993. + (match_operand 1 "" ""))
  20994. + (clobber (reg:P O7_REG))]
  20995. + ;;- Do not use operand 1 for most machines.
  20996. + ""
  20997. + "call\t%a0, %1%#"
  20998. + [(set_attr "type" "call")])
  20999. +
  21000. +;; This is a call that wants a structure value.
  21001. +;; There is no such critter for v9 (??? we may need one anyway).
  21002. +(define_insn "*call_address_struct_value_sp32"
  21003. + [(call (mem:SI (match_operand:SI 0 "address_operand" "p"))
  21004. + (match_operand 1 "" ""))
  21005. + (match_operand 2 "immediate_operand" "")
  21006. + (clobber (reg:SI O7_REG))]
  21007. + ;;- Do not use operand 1 for most machines.
  21008. + "TARGET_ARCH32 && GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) > 0"
  21009. +{
  21010. + operands[2] = GEN_INT (INTVAL (operands[2]) & 0xfff);
  21011. + return "call\t%a0, %1\n\t nop\n\tunimp\t%2";
  21012. +}
  21013. + [(set_attr "type" "call_no_delay_slot")
  21014. + (set_attr "length" "3")])
  21015. +
  21016. +;; This is a call that wants a structure value.
  21017. +;; There is no such critter for v9 (??? we may need one anyway).
  21018. +(define_insn "*call_symbolic_struct_value_sp32"
  21019. + [(call (mem:SI (match_operand:SI 0 "symbolic_operand" "s"))
  21020. + (match_operand 1 "" ""))
  21021. + (match_operand 2 "immediate_operand" "")
  21022. + (clobber (reg:SI O7_REG))]
  21023. + ;;- Do not use operand 1 for most machines.
  21024. + "TARGET_ARCH32 && GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) > 0"
  21025. +{
  21026. + operands[2] = GEN_INT (INTVAL (operands[2]) & 0xfff);
  21027. + return "call\t%a0, %1\n\t nop\n\tunimp\t%2";
  21028. +}
  21029. + [(set_attr "type" "call_no_delay_slot")
  21030. + (set_attr "length" "3")])
  21031. +
  21032. +;; This is a call that may want a structure value. This is used for
  21033. +;; untyped_calls.
  21034. +(define_insn "*call_address_untyped_struct_value_sp32"
  21035. + [(call (mem:SI (match_operand:SI 0 "address_operand" "p"))
  21036. + (match_operand 1 "" ""))
  21037. + (match_operand 2 "immediate_operand" "")
  21038. + (clobber (reg:SI O7_REG))]
  21039. + ;;- Do not use operand 1 for most machines.
  21040. + "TARGET_ARCH32 && GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) < 0"
  21041. + "call\t%a0, %1\n\t nop\n\tnop"
  21042. + [(set_attr "type" "call_no_delay_slot")
  21043. + (set_attr "length" "3")])
  21044. +
  21045. +;; This is a call that may want a structure value. This is used for
  21046. +;; untyped_calls.
  21047. +(define_insn "*call_symbolic_untyped_struct_value_sp32"
  21048. + [(call (mem:SI (match_operand:SI 0 "symbolic_operand" "s"))
  21049. + (match_operand 1 "" ""))
  21050. + (match_operand 2 "immediate_operand" "")
  21051. + (clobber (reg:SI O7_REG))]
  21052. + ;;- Do not use operand 1 for most machines.
  21053. + "TARGET_ARCH32 && GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) < 0"
  21054. + "call\t%a0, %1\n\t nop\n\tnop"
  21055. + [(set_attr "type" "call_no_delay_slot")
  21056. + (set_attr "length" "3")])
  21057. +
  21058. +(define_expand "call_value"
  21059. + ;; Note that this expression is not used for generating RTL.
  21060. + ;; All the RTL is generated explicitly below.
  21061. + [(set (match_operand 0 "register_operand" "")
  21062. + (call (match_operand 1 "call_operand" "")
  21063. + (match_operand 4 "" "")))]
  21064. + ;; operand 2 is stack_size_rtx
  21065. + ;; operand 3 is next_arg_register
  21066. + ""
  21067. +{
  21068. + rtx fn_rtx;
  21069. + rtvec vec;
  21070. +
  21071. + gcc_assert (MEM_P (operands[1]) && GET_MODE (operands[1]) == FUNCTION_MODE);
  21072. +
  21073. + fn_rtx = operands[1];
  21074. +
  21075. + vec = gen_rtvec (2,
  21076. + gen_rtx_SET (operands[0],
  21077. + gen_rtx_CALL (VOIDmode, fn_rtx, const0_rtx)),
  21078. + gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 15)));
  21079. +
  21080. + sparc_emit_call_insn (gen_rtx_PARALLEL (VOIDmode, vec), XEXP (fn_rtx, 0));
  21081. +
  21082. + DONE;
  21083. +})
  21084. +
  21085. +(define_insn "*call_value_address<P:mode>"
  21086. + [(set (match_operand 0 "" "")
  21087. + (call (mem:P (match_operand:P 1 "address_operand" "p"))
  21088. + (match_operand 2 "" "")))
  21089. + (clobber (reg:P O7_REG))]
  21090. + ;;- Do not use operand 2 for most machines.
  21091. + ""
  21092. + "call\t%a1, %2%#"
  21093. + [(set_attr "type" "call")])
  21094. +
  21095. +(define_insn "*call_value_symbolic<P:mode>"
  21096. + [(set (match_operand 0 "" "")
  21097. + (call (mem:P (match_operand:P 1 "symbolic_operand" "s"))
  21098. + (match_operand 2 "" "")))
  21099. + (clobber (reg:P O7_REG))]
  21100. + ;;- Do not use operand 2 for most machines.
  21101. + ""
  21102. + "call\t%a1, %2%#"
  21103. + [(set_attr "type" "call")])
  21104. +
  21105. +(define_expand "untyped_call"
  21106. + [(parallel [(call (match_operand 0 "" "")
  21107. + (const_int 0))
  21108. + (match_operand:BLK 1 "memory_operand" "")
  21109. + (match_operand 2 "" "")])]
  21110. + ""
  21111. +{
  21112. + rtx valreg1 = gen_rtx_REG (DImode, 8);
  21113. + rtx result = operands[1];
  21114. +
  21115. + /* Pass constm1 to indicate that it may expect a structure value, but
  21116. + we don't know what size it is. */
  21117. + emit_call_insn (gen_call (operands[0], const0_rtx, NULL, constm1_rtx));
  21118. +
  21119. + /* Save the function value registers. */
  21120. + emit_move_insn (adjust_address (result, DImode, 0), valreg1);
  21121. + if (TARGET_FPU)
  21122. + {
  21123. + rtx valreg2 = gen_rtx_REG (TARGET_ARCH64 ? TFmode : DFmode, 32);
  21124. + emit_move_insn (adjust_address (result, TARGET_ARCH64 ? TFmode : DFmode, 8),
  21125. + valreg2);
  21126. + }
  21127. +
  21128. + /* The optimizer does not know that the call sets the function value
  21129. + registers we stored in the result block. We avoid problems by
  21130. + claiming that all hard registers are used and clobbered at this
  21131. + point. */
  21132. + emit_insn (gen_blockage ());
  21133. +
  21134. + DONE;
  21135. +})
  21136. +
  21137. +
  21138. +;; Tail call instructions.
  21139. +
  21140. +(define_expand "sibcall"
  21141. + [(parallel [(call (match_operand 0 "call_operand" "") (const_int 0))
  21142. + (return)])]
  21143. + ""
  21144. + "")
  21145. +
  21146. +(define_insn "*sibcall_symbolic<P:mode>"
  21147. + [(call (mem:P (match_operand:P 0 "symbolic_operand" "s"))
  21148. + (match_operand 1 "" ""))
  21149. + (return)]
  21150. + ""
  21151. +{
  21152. + return output_sibcall (insn, operands[0]);
  21153. +}
  21154. + [(set_attr "type" "sibcall")])
  21155. +
  21156. +(define_expand "sibcall_value"
  21157. + [(parallel [(set (match_operand 0 "register_operand")
  21158. + (call (match_operand 1 "call_operand" "") (const_int 0)))
  21159. + (return)])]
  21160. + ""
  21161. + "")
  21162. +
  21163. +(define_insn "*sibcall_value_symbolic<P:mode>"
  21164. + [(set (match_operand 0 "" "")
  21165. + (call (mem:P (match_operand:P 1 "symbolic_operand" "s"))
  21166. + (match_operand 2 "" "")))
  21167. + (return)]
  21168. + ""
  21169. +{
  21170. + return output_sibcall (insn, operands[1]);
  21171. +}
  21172. + [(set_attr "type" "sibcall")])
  21173. +
  21174. +
  21175. +;; Special instructions.
  21176. +
  21177. +(define_expand "prologue"
  21178. + [(const_int 0)]
  21179. + ""
  21180. +{
  21181. + if (TARGET_FLAT)
  21182. + sparc_flat_expand_prologue ();
  21183. + else
  21184. + sparc_expand_prologue ();
  21185. + DONE;
  21186. +})
  21187. +
  21188. +;; The "register window save" insn is modelled as follows. The dwarf2
  21189. +;; information is manually added in emit_window_save.
  21190. +
  21191. +(define_insn "window_save"
  21192. + [(unspec_volatile [(match_operand 0 "arith_operand" "rI")] UNSPECV_SAVEW)]
  21193. + "!TARGET_FLAT"
  21194. + "save\t%%sp, %0, %%sp"
  21195. + [(set_attr "type" "savew")])
  21196. +
  21197. +(define_expand "epilogue"
  21198. + [(return)]
  21199. + ""
  21200. +{
  21201. + if (TARGET_FLAT)
  21202. + sparc_flat_expand_epilogue (false);
  21203. + else
  21204. + sparc_expand_epilogue (false);
  21205. +})
  21206. +
  21207. +(define_expand "sibcall_epilogue"
  21208. + [(return)]
  21209. + ""
  21210. +{
  21211. + if (TARGET_FLAT)
  21212. + sparc_flat_expand_epilogue (false);
  21213. + else
  21214. + sparc_expand_epilogue (false);
  21215. + DONE;
  21216. +})
  21217. +
  21218. +(define_expand "eh_return"
  21219. + [(use (match_operand 0 "general_operand" ""))]
  21220. + ""
  21221. +{
  21222. + emit_move_insn (gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM), operands[0]);
  21223. + emit_jump_insn (gen_eh_return_internal ());
  21224. + emit_barrier ();
  21225. + DONE;
  21226. +})
  21227. +
  21228. +(define_insn_and_split "eh_return_internal"
  21229. + [(eh_return)]
  21230. + ""
  21231. + "#"
  21232. + "epilogue_completed"
  21233. + [(return)]
  21234. +{
  21235. + if (TARGET_FLAT)
  21236. + sparc_flat_expand_epilogue (true);
  21237. + else
  21238. + sparc_expand_epilogue (true);
  21239. +})
  21240. +
  21241. +(define_expand "return"
  21242. + [(return)]
  21243. + "sparc_can_use_return_insn_p ()"
  21244. +{
  21245. + if (cfun->calls_alloca)
  21246. + emit_insn (gen_frame_blockage ());
  21247. +})
  21248. +
  21249. +(define_insn "*return_internal"
  21250. + [(return)]
  21251. + ""
  21252. +{
  21253. + return output_return (insn);
  21254. +}
  21255. + [(set_attr "type" "return")
  21256. + (set (attr "length")
  21257. + (cond [(eq_attr "calls_eh_return" "true")
  21258. + (if_then_else (eq_attr "delayed_branch" "true")
  21259. + (if_then_else (ior (eq_attr "isa" "v9")
  21260. + (eq_attr "flat" "true"))
  21261. + (const_int 2)
  21262. + (const_int 3))
  21263. + (if_then_else (eq_attr "flat" "true")
  21264. + (const_int 3)
  21265. + (const_int 4)))
  21266. + (ior (eq_attr "leaf_function" "true") (eq_attr "flat" "true"))
  21267. + (if_then_else (eq_attr "empty_delay_slot" "true")
  21268. + (const_int 2)
  21269. + (const_int 1))
  21270. + (eq_attr "empty_delay_slot" "true")
  21271. + (if_then_else (eq_attr "delayed_branch" "true")
  21272. + (const_int 2)
  21273. + (const_int 3))
  21274. + ] (const_int 1)))])
  21275. +
  21276. +;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
  21277. +;; all of memory. This blocks insns from being moved across this point.
  21278. +
  21279. +(define_insn "blockage"
  21280. + [(unspec_volatile [(const_int 0)] UNSPECV_BLOCKAGE)]
  21281. + ""
  21282. + ""
  21283. + [(set_attr "length" "0")])
  21284. +
  21285. +;; Do not schedule instructions accessing memory before this point.
  21286. +
  21287. +(define_expand "frame_blockage"
  21288. + [(set (match_dup 0)
  21289. + (unspec:BLK [(match_dup 1)] UNSPEC_FRAME_BLOCKAGE))]
  21290. + ""
  21291. +{
  21292. + operands[0] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
  21293. + MEM_VOLATILE_P (operands[0]) = 1;
  21294. + operands[1] = stack_pointer_rtx;
  21295. +})
  21296. +
  21297. +(define_insn "*frame_blockage<P:mode>"
  21298. + [(set (match_operand:BLK 0 "" "")
  21299. + (unspec:BLK [(match_operand:P 1 "" "")] UNSPEC_FRAME_BLOCKAGE))]
  21300. + ""
  21301. + ""
  21302. + [(set_attr "length" "0")])
  21303. +
  21304. +;; We use membar #Sync for the speculation barrier on V9.
  21305. +
  21306. +(define_insn "speculation_barrier"
  21307. + [(unspec_volatile [(const_int 0)] UNSPECV_SPECULATION_BARRIER)]
  21308. + "TARGET_V9"
  21309. + "membar\t64"
  21310. + [(set_attr "type" "multi")])
  21311. +
  21312. +(define_expand "probe_stack"
  21313. + [(set (match_operand 0 "memory_operand" "") (const_int 0))]
  21314. + ""
  21315. +{
  21316. + operands[0]
  21317. + = adjust_address (operands[0], GET_MODE (operands[0]), SPARC_STACK_BIAS);
  21318. +})
  21319. +
  21320. +(define_insn "probe_stack_range<P:mode>"
  21321. + [(set (match_operand:P 0 "register_operand" "=r")
  21322. + (unspec_volatile:P [(match_operand:P 1 "register_operand" "0")
  21323. + (match_operand:P 2 "register_operand" "r")]
  21324. + UNSPECV_PROBE_STACK_RANGE))]
  21325. + ""
  21326. +{
  21327. + return output_probe_stack_range (operands[0], operands[2]);
  21328. +}
  21329. + [(set_attr "type" "multi")])
  21330. +
  21331. +;; Prepare to return any type including a structure value.
  21332. +
  21333. +(define_expand "untyped_return"
  21334. + [(match_operand:BLK 0 "memory_operand" "")
  21335. + (match_operand 1 "" "")]
  21336. + ""
  21337. +{
  21338. + rtx valreg1 = gen_rtx_REG (DImode, 24);
  21339. + rtx result = operands[0];
  21340. +
  21341. + if (TARGET_ARCH32)
  21342. + {
  21343. + rtx rtnreg = gen_rtx_REG (SImode, RETURN_ADDR_REGNUM);
  21344. + rtx value = gen_reg_rtx (SImode);
  21345. +
  21346. + /* Fetch the instruction where we will return to and see if it's an unimp
  21347. + instruction (the most significant 10 bits will be zero). If so,
  21348. + update the return address to skip the unimp instruction. */
  21349. + emit_move_insn (value,
  21350. + gen_rtx_MEM (SImode, plus_constant (SImode, rtnreg, 8)));
  21351. + emit_insn (gen_lshrsi3 (value, value, GEN_INT (22)));
  21352. + emit_insn (gen_update_return (rtnreg, value));
  21353. + }
  21354. +
  21355. + /* Reload the function value registers.
  21356. + Put USE insns before the return. */
  21357. + emit_move_insn (valreg1, adjust_address (result, DImode, 0));
  21358. + emit_use (valreg1);
  21359. +
  21360. + if (TARGET_FPU)
  21361. + {
  21362. + rtx valreg2 = gen_rtx_REG (TARGET_ARCH64 ? TFmode : DFmode, 32);
  21363. + emit_move_insn (valreg2,
  21364. + adjust_address (result, TARGET_ARCH64 ? TFmode : DFmode, 8));
  21365. + emit_use (valreg2);
  21366. + }
  21367. +
  21368. + /* Construct the return. */
  21369. + expand_naked_return ();
  21370. +
  21371. + DONE;
  21372. +})
  21373. +
  21374. +;; Adjust the return address conditionally. If the value of op1 is equal
  21375. +;; to all zero then adjust the return address i.e. op0 = op0 + 4.
  21376. +;; This is technically *half* the check required by the 32-bit SPARC
  21377. +;; psABI. This check only ensures that an "unimp" insn was written by
  21378. +;; the caller, but doesn't check to see if the expected size matches
  21379. +;; (this is encoded in the 12 lower bits). This check is obsolete and
  21380. +;; only used by the above code "untyped_return".
  21381. +
  21382. +(define_insn "update_return"
  21383. + [(unspec:SI [(match_operand:SI 0 "register_operand" "r")
  21384. + (match_operand:SI 1 "register_operand" "r")] UNSPEC_UPDATE_RETURN)]
  21385. + "TARGET_ARCH32"
  21386. +{
  21387. + if (flag_delayed_branch)
  21388. + return "cmp\t%1, 0\n\tbe,a\t.+8\n\t add\t%0, 4, %0";
  21389. + else
  21390. + return "cmp\t%1, 0\n\tbne\t.+12\n\t nop\n\tadd\t%0, 4, %0";
  21391. +}
  21392. + [(set (attr "type") (const_string "multi"))
  21393. + (set (attr "length")
  21394. + (if_then_else (eq_attr "delayed_branch" "true")
  21395. + (const_int 3)
  21396. + (const_int 4)))])
  21397. +
  21398. +(define_insn "nop"
  21399. + [(const_int 0)]
  21400. + ""
  21401. + "nop")
  21402. +
  21403. +(define_expand "indirect_jump"
  21404. + [(set (pc) (match_operand 0 "address_operand" "p"))]
  21405. + ""
  21406. + "")
  21407. +
  21408. +(define_insn "*branch<P:mode>"
  21409. + [(set (pc) (match_operand:P 0 "address_operand" "p"))]
  21410. + ""
  21411. + "jmp\t%a0%#"
  21412. + [(set_attr "type" "uncond_branch")])
  21413. +
  21414. +(define_expand "save_stack_nonlocal"
  21415. + [(set (match_operand 0 "memory_operand" "")
  21416. + (match_operand 1 "register_operand" ""))
  21417. + (set (match_dup 2) (match_dup 3))]
  21418. + ""
  21419. +{
  21420. + operands[0] = adjust_address (operands[0], Pmode, 0);
  21421. + operands[2] = adjust_address (operands[0], Pmode, GET_MODE_SIZE (Pmode));
  21422. + operands[3] = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
  21423. +})
  21424. +
  21425. +(define_expand "restore_stack_nonlocal"
  21426. + [(set (match_operand 0 "register_operand" "")
  21427. + (match_operand 1 "memory_operand" ""))]
  21428. + ""
  21429. +{
  21430. + operands[1] = adjust_address (operands[1], Pmode, 0);
  21431. +})
  21432. +
  21433. +(define_expand "nonlocal_goto"
  21434. + [(match_operand 0 "general_operand" "")
  21435. + (match_operand 1 "general_operand" "")
  21436. + (match_operand 2 "memory_operand" "")
  21437. + (match_operand 3 "memory_operand" "")]
  21438. + ""
  21439. +{
  21440. + rtx i7 = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
  21441. + rtx r_label = operands[1];
  21442. + rtx r_sp = adjust_address (operands[2], Pmode, 0);
  21443. + rtx r_fp = operands[3];
  21444. + rtx r_i7 = adjust_address (operands[2], Pmode, GET_MODE_SIZE (Pmode));
  21445. +
  21446. + /* We need to flush all the register windows so that their contents will
  21447. + be re-synchronized by the restore insn of the target function. */
  21448. + if (!TARGET_FLAT)
  21449. + emit_insn (gen_flush_register_windows ());
  21450. +
  21451. + emit_clobber (gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode)));
  21452. + emit_clobber (gen_rtx_MEM (BLKmode, hard_frame_pointer_rtx));
  21453. +
  21454. + r_label = copy_to_reg (r_label);
  21455. +
  21456. + /* Restore the frame pointer and stack pointer. We must use a
  21457. + temporary since the setjmp buffer may be a local. */
  21458. + r_fp = copy_to_reg (r_fp);
  21459. + emit_stack_restore (SAVE_NONLOCAL, r_sp);
  21460. + r_i7 = copy_to_reg (r_i7);
  21461. +
  21462. + /* Ensure the frame pointer move is not optimized. */
  21463. + emit_insn (gen_blockage ());
  21464. + emit_clobber (hard_frame_pointer_rtx);
  21465. + emit_move_insn (hard_frame_pointer_rtx, r_fp);
  21466. + emit_move_insn (i7, r_i7);
  21467. +
  21468. + /* USE of hard_frame_pointer_rtx added for consistency;
  21469. + not clear if really needed. */
  21470. + emit_use (hard_frame_pointer_rtx);
  21471. + emit_use (stack_pointer_rtx);
  21472. + emit_use (i7);
  21473. +
  21474. + emit_indirect_jump (r_label);
  21475. + DONE;
  21476. +})
  21477. +
  21478. +(define_expand "builtin_setjmp_receiver"
  21479. + [(label_ref (match_operand 0 "" ""))]
  21480. + "TARGET_VXWORKS_RTP && flag_pic"
  21481. +{
  21482. + load_got_register ();
  21483. + DONE;
  21484. +})
  21485. +
  21486. +;; Special insn to flush register windows.
  21487. +
  21488. +(define_insn "flush_register_windows"
  21489. + [(unspec_volatile [(const_int 0)] UNSPECV_FLUSHW)]
  21490. + ""
  21491. +{
  21492. + return TARGET_V9 ? "flushw" : "ta\t3";
  21493. +}
  21494. + [(set_attr "type" "flushw")])
  21495. +
  21496. +;; Special pattern for the FLUSH instruction.
  21497. +
  21498. +(define_insn "flush<P:mode>"
  21499. + [(unspec_volatile [(match_operand:P 0 "memory_operand" "m")] UNSPECV_FLUSH)]
  21500. + ""
  21501. +{
  21502. + return TARGET_V9 ? "flush\t%f0" : "iflush\t%f0";
  21503. +}
  21504. + [(set_attr "type" "iflush")])
  21505. +
  21506. +;; Special insns to load and store the 32-bit FP Status Register.
  21507. +
  21508. +(define_insn "ldfsr"
  21509. + [(unspec_volatile [(match_operand:SI 0 "memory_operand" "m")] UNSPECV_LDFSR)]
  21510. + "TARGET_FPU"
  21511. + "ld\t%0, %%fsr"
  21512. + [(set_attr "type" "load")
  21513. + (set_attr "subtype" "regular")])
  21514. +
  21515. +(define_insn "stfsr"
  21516. + [(set (match_operand:SI 0 "memory_operand" "=m")
  21517. + (unspec_volatile:SI [(const_int 0)] UNSPECV_STFSR))]
  21518. + "TARGET_FPU"
  21519. + "st\t%%fsr, %0"
  21520. + [(set_attr "type" "store")])
  21521. +
  21522. +
  21523. +;; Find first set instructions.
  21524. +
  21525. +(define_expand "popcountdi2"
  21526. + [(set (match_operand:DI 0 "register_operand" "")
  21527. + (popcount:DI (match_operand:DI 1 "register_operand" "")))]
  21528. + "TARGET_POPC"
  21529. +{
  21530. + if (TARGET_ARCH32)
  21531. + {
  21532. + emit_insn (gen_popcountdi_v8plus (operands[0], operands[1]));
  21533. + DONE;
  21534. + }
  21535. +})
  21536. +
  21537. +(define_insn "*popcountdi_sp64"
  21538. + [(set (match_operand:DI 0 "register_operand" "=r")
  21539. + (popcount:DI (match_operand:DI 1 "register_operand" "r")))]
  21540. + "TARGET_POPC && TARGET_ARCH64"
  21541. + "popc\t%1, %0")
  21542. +
  21543. +(define_insn "popcountdi_v8plus"
  21544. + [(set (match_operand:DI 0 "register_operand" "=r")
  21545. + (popcount:DI (match_operand:DI 1 "register_operand" "r")))
  21546. + (clobber (match_scratch:SI 2 "=&h"))]
  21547. + "TARGET_POPC && TARGET_ARCH32"
  21548. +{
  21549. + if (sparc_check_64 (operands[1], insn) <= 0)
  21550. + output_asm_insn ("srl\t%L1, 0, %L1", operands);
  21551. + return "sllx\t%H1, 32, %2\n\tor\t%L1, %2, %2\n\tpopc\t%2, %L0\n\tclr\t%H0";
  21552. +}
  21553. + [(set_attr "type" "multi")
  21554. + (set_attr "length" "5")])
  21555. +
  21556. +(define_expand "popcountsi2"
  21557. + [(set (match_dup 2)
  21558. + (zero_extend:DI (match_operand:SI 1 "register_operand" "")))
  21559. + (set (match_operand:SI 0 "register_operand" "")
  21560. + (truncate:SI (popcount:DI (match_dup 2))))]
  21561. + "TARGET_POPC"
  21562. +{
  21563. + if (TARGET_ARCH32)
  21564. + {
  21565. + emit_insn (gen_popcountsi_v8plus (operands[0], operands[1]));
  21566. + DONE;
  21567. + }
  21568. + else
  21569. + operands[2] = gen_reg_rtx (DImode);
  21570. +})
  21571. +
  21572. +(define_insn "*popcountsi_sp64"
  21573. + [(set (match_operand:SI 0 "register_operand" "=r")
  21574. + (truncate:SI
  21575. + (popcount:DI (match_operand:DI 1 "register_operand" "r"))))]
  21576. + "TARGET_POPC && TARGET_ARCH64"
  21577. + "popc\t%1, %0")
  21578. +
  21579. +(define_insn "popcountsi_v8plus"
  21580. + [(set (match_operand:SI 0 "register_operand" "=r")
  21581. + (popcount:SI (match_operand:SI 1 "register_operand" "r")))]
  21582. + "TARGET_POPC && TARGET_ARCH32"
  21583. +{
  21584. + if (sparc_check_64 (operands[1], insn) <= 0)
  21585. + output_asm_insn ("srl\t%1, 0, %1", operands);
  21586. + return "popc\t%1, %0";
  21587. +}
  21588. + [(set_attr "type" "multi")
  21589. + (set_attr "length" "2")])
  21590. +
  21591. +(define_expand "clzdi2"
  21592. + [(set (match_operand:DI 0 "register_operand" "")
  21593. + (clz:DI (match_operand:DI 1 "register_operand" "")))]
  21594. + "TARGET_VIS3"
  21595. +{
  21596. + if (TARGET_ARCH32)
  21597. + {
  21598. + emit_insn (gen_clzdi_v8plus (operands[0], operands[1]));
  21599. + DONE;
  21600. + }
  21601. +})
  21602. +
  21603. +(define_insn "*clzdi_sp64"
  21604. + [(set (match_operand:DI 0 "register_operand" "=r")
  21605. + (clz:DI (match_operand:DI 1 "register_operand" "r")))]
  21606. + "TARGET_VIS3 && TARGET_ARCH64"
  21607. + "lzd\t%1, %0"
  21608. + [(set_attr "type" "lzd")])
  21609. +
  21610. +(define_insn "clzdi_v8plus"
  21611. + [(set (match_operand:DI 0 "register_operand" "=r")
  21612. + (clz:DI (match_operand:DI 1 "register_operand" "r")))
  21613. + (clobber (match_scratch:SI 2 "=&h"))]
  21614. + "TARGET_VIS3 && TARGET_ARCH32"
  21615. +{
  21616. + if (sparc_check_64 (operands[1], insn) <= 0)
  21617. + output_asm_insn ("srl\t%L1, 0, %L1", operands);
  21618. + return "sllx\t%H1, 32, %2\n\tor\t%L1, %2, %2\n\tlzd\t%2, %L0\n\tclr\t%H0";
  21619. +}
  21620. + [(set_attr "type" "multi")
  21621. + (set_attr "length" "5")])
  21622. +
  21623. +(define_expand "clzsi2"
  21624. + [(set (match_dup 2)
  21625. + (zero_extend:DI (match_operand:SI 1 "register_operand" "")))
  21626. + (set (match_dup 3)
  21627. + (truncate:SI (clz:DI (match_dup 2))))
  21628. + (set (match_operand:SI 0 "register_operand" "")
  21629. + (minus:SI (match_dup 3) (const_int 32)))]
  21630. + "TARGET_VIS3"
  21631. +{
  21632. + if (TARGET_ARCH32)
  21633. + {
  21634. + emit_insn (gen_clzsi_v8plus (operands[0], operands[1]));
  21635. + DONE;
  21636. + }
  21637. + else
  21638. + {
  21639. + operands[2] = gen_reg_rtx (DImode);
  21640. + operands[3] = gen_reg_rtx (SImode);
  21641. + }
  21642. +})
  21643. +
  21644. +(define_insn "*clzsi_sp64"
  21645. + [(set (match_operand:SI 0 "register_operand" "=r")
  21646. + (truncate:SI
  21647. + (clz:DI (match_operand:DI 1 "register_operand" "r"))))]
  21648. + "TARGET_VIS3 && TARGET_ARCH64"
  21649. + "lzd\t%1, %0"
  21650. + [(set_attr "type" "lzd")])
  21651. +
  21652. +(define_insn "clzsi_v8plus"
  21653. + [(set (match_operand:SI 0 "register_operand" "=r")
  21654. + (clz:SI (match_operand:SI 1 "register_operand" "r")))]
  21655. + "TARGET_VIS3 && TARGET_ARCH32"
  21656. +{
  21657. + if (sparc_check_64 (operands[1], insn) <= 0)
  21658. + output_asm_insn ("srl\t%1, 0, %1", operands);
  21659. + return "lzd\t%1, %0\n\tsub\t%0, 32, %0";
  21660. +}
  21661. + [(set_attr "type" "multi")
  21662. + (set_attr "length" "3")])
  21663. +
  21664. +
  21665. +;; Peepholes go at the end.
  21666. +
  21667. +;; Optimize consecutive loads or stores into ldd and std when possible.
  21668. +;; The conditions in which we do this are very restricted and are
  21669. +;; explained in the code for {registers,memory}_ok_for_ldd functions.
  21670. +
  21671. +(define_peephole2
  21672. + [(set (match_operand:SI 0 "memory_operand" "")
  21673. + (const_int 0))
  21674. + (set (match_operand:SI 1 "memory_operand" "")
  21675. + (const_int 0))]
  21676. + "TARGET_V9
  21677. + && mems_ok_for_ldd_peep (operands[0], operands[1], NULL_RTX)"
  21678. + [(set (match_dup 0) (const_int 0))]
  21679. +{
  21680. + operands[0] = widen_mem_for_ldd_peep (operands[0], operands[1], DImode);
  21681. +})
  21682. +
  21683. +(define_peephole2
  21684. + [(set (match_operand:SI 0 "memory_operand" "")
  21685. + (const_int 0))
  21686. + (set (match_operand:SI 1 "memory_operand" "")
  21687. + (const_int 0))]
  21688. + "TARGET_V9
  21689. + && mems_ok_for_ldd_peep (operands[1], operands[0], NULL_RTX)"
  21690. + [(set (match_dup 1) (const_int 0))]
  21691. +{
  21692. + operands[1] = widen_mem_for_ldd_peep (operands[1], operands[0], DImode);
  21693. +})
  21694. +
  21695. +(define_peephole2
  21696. + [(set (match_operand:SI 0 "register_operand" "")
  21697. + (match_operand:SI 1 "memory_operand" ""))
  21698. + (set (match_operand:SI 2 "register_operand" "")
  21699. + (match_operand:SI 3 "memory_operand" ""))]
  21700. + "registers_ok_for_ldd_peep (operands[0], operands[2])
  21701. + && mems_ok_for_ldd_peep (operands[1], operands[3], operands[0])"
  21702. + [(set (match_dup 0) (match_dup 1))]
  21703. +{
  21704. + operands[1] = widen_mem_for_ldd_peep (operands[1], operands[3], DImode);
  21705. + operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
  21706. +})
  21707. +
  21708. +(define_peephole2
  21709. + [(set (match_operand:SI 0 "memory_operand" "")
  21710. + (match_operand:SI 1 "register_operand" ""))
  21711. + (set (match_operand:SI 2 "memory_operand" "")
  21712. + (match_operand:SI 3 "register_operand" ""))]
  21713. + "registers_ok_for_ldd_peep (operands[1], operands[3])
  21714. + && mems_ok_for_ldd_peep (operands[0], operands[2], NULL_RTX)"
  21715. + [(set (match_dup 0) (match_dup 1))]
  21716. +{
  21717. + operands[0] = widen_mem_for_ldd_peep (operands[0], operands[2], DImode);
  21718. + operands[1] = gen_rtx_REG (DImode, REGNO (operands[1]));
  21719. +})
  21720. +
  21721. +(define_peephole2
  21722. + [(set (match_operand:SF 0 "register_operand" "")
  21723. + (match_operand:SF 1 "memory_operand" ""))
  21724. + (set (match_operand:SF 2 "register_operand" "")
  21725. + (match_operand:SF 3 "memory_operand" ""))]
  21726. + "registers_ok_for_ldd_peep (operands[0], operands[2])
  21727. + && mems_ok_for_ldd_peep (operands[1], operands[3], operands[0])"
  21728. + [(set (match_dup 0) (match_dup 1))]
  21729. +{
  21730. + operands[1] = widen_mem_for_ldd_peep (operands[1], operands[3], DFmode);
  21731. + operands[0] = gen_rtx_REG (DFmode, REGNO (operands[0]));
  21732. +})
  21733. +
  21734. +(define_peephole2
  21735. + [(set (match_operand:SF 0 "memory_operand" "")
  21736. + (match_operand:SF 1 "register_operand" ""))
  21737. + (set (match_operand:SF 2 "memory_operand" "")
  21738. + (match_operand:SF 3 "register_operand" ""))]
  21739. + "registers_ok_for_ldd_peep (operands[1], operands[3])
  21740. + && mems_ok_for_ldd_peep (operands[0], operands[2], NULL_RTX)"
  21741. + [(set (match_dup 0) (match_dup 1))]
  21742. +{
  21743. + operands[0] = widen_mem_for_ldd_peep (operands[0], operands[2], DFmode);
  21744. + operands[1] = gen_rtx_REG (DFmode, REGNO (operands[1]));
  21745. +})
  21746. +
  21747. +(define_peephole2
  21748. + [(set (match_operand:SI 0 "register_operand" "")
  21749. + (match_operand:SI 1 "memory_operand" ""))
  21750. + (set (match_operand:SI 2 "register_operand" "")
  21751. + (match_operand:SI 3 "memory_operand" ""))]
  21752. + "registers_ok_for_ldd_peep (operands[2], operands[0])
  21753. + && mems_ok_for_ldd_peep (operands[3], operands[1], operands[0])"
  21754. + [(set (match_dup 2) (match_dup 3))]
  21755. +{
  21756. + operands[3] = widen_mem_for_ldd_peep (operands[3], operands[1], DImode);
  21757. + operands[2] = gen_rtx_REG (DImode, REGNO (operands[2]));
  21758. +})
  21759. +
  21760. +(define_peephole2
  21761. + [(set (match_operand:SI 0 "memory_operand" "")
  21762. + (match_operand:SI 1 "register_operand" ""))
  21763. + (set (match_operand:SI 2 "memory_operand" "")
  21764. + (match_operand:SI 3 "register_operand" ""))]
  21765. + "registers_ok_for_ldd_peep (operands[3], operands[1])
  21766. + && mems_ok_for_ldd_peep (operands[2], operands[0], NULL_RTX)"
  21767. + [(set (match_dup 2) (match_dup 3))]
  21768. +{
  21769. + operands[2] = widen_mem_for_ldd_peep (operands[2], operands[0], DImode);
  21770. + operands[3] = gen_rtx_REG (DImode, REGNO (operands[3]));
  21771. +})
  21772. +
  21773. +(define_peephole2
  21774. + [(set (match_operand:SF 0 "register_operand" "")
  21775. + (match_operand:SF 1 "memory_operand" ""))
  21776. + (set (match_operand:SF 2 "register_operand" "")
  21777. + (match_operand:SF 3 "memory_operand" ""))]
  21778. + "registers_ok_for_ldd_peep (operands[2], operands[0])
  21779. + && mems_ok_for_ldd_peep (operands[3], operands[1], operands[0])"
  21780. + [(set (match_dup 2) (match_dup 3))]
  21781. +{
  21782. + operands[3] = widen_mem_for_ldd_peep (operands[3], operands[1], DFmode);
  21783. + operands[2] = gen_rtx_REG (DFmode, REGNO (operands[2]));
  21784. +})
  21785. +
  21786. +(define_peephole2
  21787. + [(set (match_operand:SF 0 "memory_operand" "")
  21788. + (match_operand:SF 1 "register_operand" ""))
  21789. + (set (match_operand:SF 2 "memory_operand" "")
  21790. + (match_operand:SF 3 "register_operand" ""))]
  21791. + "registers_ok_for_ldd_peep (operands[3], operands[1])
  21792. + && mems_ok_for_ldd_peep (operands[2], operands[0], NULL_RTX)"
  21793. + [(set (match_dup 2) (match_dup 3))]
  21794. +{
  21795. + operands[2] = widen_mem_for_ldd_peep (operands[2], operands[0], DFmode);
  21796. + operands[3] = gen_rtx_REG (DFmode, REGNO (operands[3]));
  21797. +})
  21798. +
  21799. +;; Optimize the case of following a reg-reg move with a test
  21800. +;; of reg just moved. Don't allow floating point regs for operand 0 or 1.
  21801. +;; This can result from a float to fix conversion.
  21802. +
  21803. +(define_peephole2
  21804. + [(set (match_operand:SI 0 "register_operand" "")
  21805. + (match_operand:SI 1 "register_operand" ""))
  21806. + (set (reg:CC CC_REG)
  21807. + (compare:CC (match_operand:SI 2 "register_operand" "")
  21808. + (const_int 0)))]
  21809. + "(rtx_equal_p (operands[2], operands[0])
  21810. + || rtx_equal_p (operands[2], operands[1]))
  21811. + && !SPARC_FP_REG_P (REGNO (operands[0]))
  21812. + && !SPARC_FP_REG_P (REGNO (operands[1]))"
  21813. + [(parallel [(set (match_dup 0) (match_dup 1))
  21814. + (set (reg:CC CC_REG)
  21815. + (compare:CC (match_dup 1) (const_int 0)))])]
  21816. + "")
  21817. +
  21818. +(define_peephole2
  21819. + [(set (match_operand:DI 0 "register_operand" "")
  21820. + (match_operand:DI 1 "register_operand" ""))
  21821. + (set (reg:CCX CC_REG)
  21822. + (compare:CCX (match_operand:DI 2 "register_operand" "")
  21823. + (const_int 0)))]
  21824. + "TARGET_ARCH64
  21825. + && (rtx_equal_p (operands[2], operands[0])
  21826. + || rtx_equal_p (operands[2], operands[1]))
  21827. + && !SPARC_FP_REG_P (REGNO (operands[0]))
  21828. + && !SPARC_FP_REG_P (REGNO (operands[1]))"
  21829. + [(parallel [(set (match_dup 0) (match_dup 1))
  21830. + (set (reg:CCX CC_REG)
  21831. + (compare:CCX (match_dup 1) (const_int 0)))])]
  21832. + "")
  21833. +
  21834. +
  21835. +;; Prefetch instructions.
  21836. +
  21837. +;; ??? UltraSPARC-III note: A memory operation loading into the floating point
  21838. +;; register file, if it hits the prefetch cache, has a chance to dual-issue
  21839. +;; with other memory operations. With DFA we might be able to model this,
  21840. +;; but it requires a lot of state.
  21841. +(define_expand "prefetch"
  21842. + [(match_operand 0 "address_operand" "")
  21843. + (match_operand 1 "const_int_operand" "")
  21844. + (match_operand 2 "const_int_operand" "")]
  21845. + "TARGET_V9"
  21846. +{
  21847. + if (TARGET_ARCH64)
  21848. + emit_insn (gen_prefetch_64 (operands[0], operands[1], operands[2]));
  21849. + else
  21850. + emit_insn (gen_prefetch_32 (operands[0], operands[1], operands[2]));
  21851. + DONE;
  21852. +})
  21853. +
  21854. +(define_insn "prefetch_64"
  21855. + [(prefetch (match_operand:DI 0 "address_operand" "p")
  21856. + (match_operand:DI 1 "const_int_operand" "n")
  21857. + (match_operand:DI 2 "const_int_operand" "n"))]
  21858. + ""
  21859. +{
  21860. + static const char * const prefetch_instr[2][2] = {
  21861. + {
  21862. + "prefetch\t[%a0], 1", /* no locality: prefetch for one read */
  21863. + "prefetch\t[%a0], 0", /* medium to high locality: prefetch for several reads */
  21864. + },
  21865. + {
  21866. + "prefetch\t[%a0], 3", /* no locality: prefetch for one write */
  21867. + "prefetch\t[%a0], 2", /* medium to high locality: prefetch for several writes */
  21868. + }
  21869. + };
  21870. + int read_or_write = INTVAL (operands[1]);
  21871. + int locality = INTVAL (operands[2]);
  21872. +
  21873. + gcc_assert (read_or_write == 0 || read_or_write == 1);
  21874. + gcc_assert (locality >= 0 && locality < 4);
  21875. + return prefetch_instr [read_or_write][locality == 0 ? 0 : 1];
  21876. +}
  21877. + [(set_attr "type" "load")
  21878. + (set_attr "subtype" "prefetch")])
  21879. +
  21880. +(define_insn "prefetch_32"
  21881. + [(prefetch (match_operand:SI 0 "address_operand" "p")
  21882. + (match_operand:SI 1 "const_int_operand" "n")
  21883. + (match_operand:SI 2 "const_int_operand" "n"))]
  21884. + ""
  21885. +{
  21886. + static const char * const prefetch_instr[2][2] = {
  21887. + {
  21888. + "prefetch\t[%a0], 1", /* no locality: prefetch for one read */
  21889. + "prefetch\t[%a0], 0", /* medium to high locality: prefetch for several reads */
  21890. + },
  21891. + {
  21892. + "prefetch\t[%a0], 3", /* no locality: prefetch for one write */
  21893. + "prefetch\t[%a0], 2", /* medium to high locality: prefetch for several writes */
  21894. + }
  21895. + };
  21896. + int read_or_write = INTVAL (operands[1]);
  21897. + int locality = INTVAL (operands[2]);
  21898. +
  21899. + gcc_assert (read_or_write == 0 || read_or_write == 1);
  21900. + gcc_assert (locality >= 0 && locality < 4);
  21901. + return prefetch_instr [read_or_write][locality == 0 ? 0 : 1];
  21902. +}
  21903. + [(set_attr "type" "load")
  21904. + (set_attr "subtype" "prefetch")])
  21905. +
  21906. +
  21907. +;; Trap instructions.
  21908. +
  21909. +(define_insn "trap"
  21910. + [(trap_if (const_int 1) (const_int 5))]
  21911. + ""
  21912. + "ta\t5"
  21913. + [(set_attr "type" "trap")])
  21914. +
  21915. +(define_expand "ctrapsi4"
  21916. + [(trap_if (match_operator 0 "comparison_operator"
  21917. + [(match_operand:SI 1 "compare_operand" "")
  21918. + (match_operand:SI 2 "arith_operand" "")])
  21919. + (match_operand 3 "arith_operand"))]
  21920. + ""
  21921. +{
  21922. + operands[1] = gen_compare_reg (operands[0]);
  21923. + if (GET_MODE (operands[1]) != CCmode && GET_MODE (operands[1]) != CCXmode)
  21924. + FAIL;
  21925. + operands[2] = const0_rtx;
  21926. +})
  21927. +
  21928. +(define_expand "ctrapdi4"
  21929. + [(trap_if (match_operator 0 "comparison_operator"
  21930. + [(match_operand:DI 1 "compare_operand" "")
  21931. + (match_operand:DI 2 "arith_operand" "")])
  21932. + (match_operand 3 "arith_operand"))]
  21933. + "TARGET_ARCH64"
  21934. +{
  21935. + operands[1] = gen_compare_reg (operands[0]);
  21936. + if (GET_MODE (operands[1]) != CCmode && GET_MODE (operands[1]) != CCXmode)
  21937. + FAIL;
  21938. + operands[2] = const0_rtx;
  21939. +})
  21940. +
  21941. +(define_insn "*trapsi_insn"
  21942. + [(trap_if (match_operator 0 "icc_comparison_operator"
  21943. + [(reg:CC CC_REG) (const_int 0)])
  21944. + (match_operand:SI 1 "arith_operand" "rM"))]
  21945. + ""
  21946. +{
  21947. + if (TARGET_V9)
  21948. + return "t%C0\t%%icc, %1";
  21949. + else
  21950. + return "t%C0\t%1";
  21951. +}
  21952. + [(set_attr "type" "trap")])
  21953. +
  21954. +(define_insn "*trapdi_insn"
  21955. + [(trap_if (match_operator 0 "icc_comparison_operator"
  21956. + [(reg:CCX CC_REG) (const_int 0)])
  21957. + (match_operand:SI 1 "arith_operand" "rM"))]
  21958. + "TARGET_V9"
  21959. + "t%C0\t%%xcc, %1"
  21960. + [(set_attr "type" "trap")])
  21961. +
  21962. +
  21963. +;; TLS support instructions.
  21964. +
  21965. +(define_insn "tgd_hi22<P:mode>"
  21966. + [(set (match_operand:P 0 "register_operand" "=r")
  21967. + (high:P (unspec:P [(match_operand 1 "tgd_symbolic_operand" "")]
  21968. + UNSPEC_TLSGD)))]
  21969. + "TARGET_TLS"
  21970. + "sethi\\t%%tgd_hi22(%a1), %0")
  21971. +
  21972. +(define_insn "tgd_lo10<P:mode>"
  21973. + [(set (match_operand:P 0 "register_operand" "=r")
  21974. + (lo_sum:P (match_operand:P 1 "register_operand" "r")
  21975. + (unspec:P [(match_operand 2 "tgd_symbolic_operand" "")]
  21976. + UNSPEC_TLSGD)))]
  21977. + "TARGET_TLS"
  21978. + "add\\t%1, %%tgd_lo10(%a2), %0")
  21979. +
  21980. +(define_insn "tgd_add<P:mode>"
  21981. + [(set (match_operand:P 0 "register_operand" "=r")
  21982. + (plus:P (match_operand:P 1 "register_operand" "r")
  21983. + (unspec:P [(match_operand:P 2 "register_operand" "r")
  21984. + (match_operand 3 "tgd_symbolic_operand" "")]
  21985. + UNSPEC_TLSGD)))]
  21986. + "TARGET_TLS"
  21987. + "add\\t%1, %2, %0, %%tgd_add(%a3)")
  21988. +
  21989. +(define_insn "tgd_call<P:mode>"
  21990. + [(set (match_operand 0 "register_operand" "=r")
  21991. + (call (mem:P (unspec:P [(match_operand:P 1 "symbolic_operand" "s")
  21992. + (match_operand 2 "tgd_symbolic_operand" "")]
  21993. + UNSPEC_TLSGD))
  21994. + (match_operand 3 "" "")))
  21995. + (clobber (reg:P O7_REG))]
  21996. + "TARGET_TLS"
  21997. + "call\t%a1, %%tgd_call(%a2)%#"
  21998. + [(set (attr "type") (if_then_else (eq_attr "tls_delay_slot" "true")
  21999. + (const_string "call")
  22000. + (const_string "call_no_delay_slot")))])
  22001. +
  22002. +(define_insn "tldm_hi22<P:mode>"
  22003. + [(set (match_operand:P 0 "register_operand" "=r")
  22004. + (high:P (unspec:P [(const_int 0)] UNSPEC_TLSLDM)))]
  22005. + "TARGET_TLS"
  22006. + "sethi\\t%%tldm_hi22(%&), %0")
  22007. +
  22008. +(define_insn "tldm_lo10<P:mode>"
  22009. + [(set (match_operand:P 0 "register_operand" "=r")
  22010. + (lo_sum:P (match_operand:P 1 "register_operand" "r")
  22011. + (unspec:P [(const_int 0)] UNSPEC_TLSLDM)))]
  22012. + "TARGET_TLS"
  22013. + "add\\t%1, %%tldm_lo10(%&), %0")
  22014. +
  22015. +(define_insn "tldm_add<P:mode>"
  22016. + [(set (match_operand:P 0 "register_operand" "=r")
  22017. + (plus:P (match_operand:P 1 "register_operand" "r")
  22018. + (unspec:P [(match_operand:P 2 "register_operand" "r")]
  22019. + UNSPEC_TLSLDM)))]
  22020. + "TARGET_TLS"
  22021. + "add\\t%1, %2, %0, %%tldm_add(%&)")
  22022. +
  22023. +(define_insn "tldm_call<P:mode>"
  22024. + [(set (match_operand 0 "register_operand" "=r")
  22025. + (call (mem:P (unspec:P [(match_operand:P 1 "symbolic_operand" "s")]
  22026. + UNSPEC_TLSLDM))
  22027. + (match_operand 2 "" "")))
  22028. + (clobber (reg:P O7_REG))]
  22029. + "TARGET_TLS"
  22030. + "call\t%a1, %%tldm_call(%&)%#"
  22031. + [(set (attr "type") (if_then_else (eq_attr "tls_delay_slot" "true")
  22032. + (const_string "call")
  22033. + (const_string "call_no_delay_slot")))])
  22034. +
  22035. +(define_insn "tldo_hix22<P:mode>"
  22036. + [(set (match_operand:P 0 "register_operand" "=r")
  22037. + (high:P (unspec:P [(match_operand 1 "tld_symbolic_operand" "")]
  22038. + UNSPEC_TLSLDO)))]
  22039. + "TARGET_TLS"
  22040. + "sethi\\t%%tldo_hix22(%a1), %0")
  22041. +
  22042. +(define_insn "tldo_lox10<P:mode>"
  22043. + [(set (match_operand:P 0 "register_operand" "=r")
  22044. + (lo_sum:P (match_operand:P 1 "register_operand" "r")
  22045. + (unspec:P [(match_operand 2 "tld_symbolic_operand" "")]
  22046. + UNSPEC_TLSLDO)))]
  22047. + "TARGET_TLS"
  22048. + "xor\\t%1, %%tldo_lox10(%a2), %0")
  22049. +
  22050. +(define_insn "tldo_add<P:mode>"
  22051. + [(set (match_operand:P 0 "register_operand" "=r")
  22052. + (plus:P (match_operand:P 1 "register_operand" "r")
  22053. + (unspec:P [(match_operand:P 2 "register_operand" "r")
  22054. + (match_operand 3 "tld_symbolic_operand" "")]
  22055. + UNSPEC_TLSLDO)))]
  22056. + "TARGET_TLS"
  22057. + "add\\t%1, %2, %0, %%tldo_add(%a3)")
  22058. +
  22059. +(define_insn "tie_hi22<P:mode>"
  22060. + [(set (match_operand:P 0 "register_operand" "=r")
  22061. + (high:P (unspec:P [(match_operand 1 "tie_symbolic_operand" "")]
  22062. + UNSPEC_TLSIE)))]
  22063. + "TARGET_TLS"
  22064. + "sethi\\t%%tie_hi22(%a1), %0")
  22065. +
  22066. +(define_insn "tie_lo10<P:mode>"
  22067. + [(set (match_operand:P 0 "register_operand" "=r")
  22068. + (lo_sum:P (match_operand:P 1 "register_operand" "r")
  22069. + (unspec:P [(match_operand 2 "tie_symbolic_operand" "")]
  22070. + UNSPEC_TLSIE)))]
  22071. + "TARGET_TLS"
  22072. + "add\\t%1, %%tie_lo10(%a2), %0")
  22073. +
  22074. +; Note the %%tie_ld operator
  22075. +(define_insn "tie_ld32"
  22076. + [(set (match_operand:SI 0 "register_operand" "=r")
  22077. + (unspec:SI [(match_operand:SI 1 "register_operand" "r")
  22078. + (match_operand:SI 2 "register_operand" "r")
  22079. + (match_operand 3 "tie_symbolic_operand" "")]
  22080. + UNSPEC_TLSIE))]
  22081. + "TARGET_TLS && TARGET_ARCH32"
  22082. + "ld\\t[%1 + %2], %0, %%tie_ld(%a3)"
  22083. + [(set_attr "type" "load")
  22084. + (set_attr "subtype" "regular")])
  22085. +
  22086. +; Note the %%tie_ldx operator
  22087. +(define_insn "tie_ld64"
  22088. + [(set (match_operand:DI 0 "register_operand" "=r")
  22089. + (unspec:DI [(match_operand:DI 1 "register_operand" "r")
  22090. + (match_operand:DI 2 "register_operand" "r")
  22091. + (match_operand 3 "tie_symbolic_operand" "")]
  22092. + UNSPEC_TLSIE))]
  22093. + "TARGET_TLS && TARGET_ARCH64"
  22094. + "ldx\\t[%1 + %2], %0, %%tie_ldx(%a3)"
  22095. + [(set_attr "type" "load")
  22096. + (set_attr "subtype" "regular")])
  22097. +
  22098. +(define_insn "tie_add<P:mode>"
  22099. + [(set (match_operand:P 0 "register_operand" "=r")
  22100. + (plus:P (match_operand:P 1 "register_operand" "r")
  22101. + (unspec:P [(match_operand:P 2 "register_operand" "r")
  22102. + (match_operand 3 "tie_symbolic_operand" "")]
  22103. + UNSPEC_TLSIE)))]
  22104. + "TARGET_SUN_TLS"
  22105. + "add\\t%1, %2, %0, %%tie_add(%a3)")
  22106. +
  22107. +(define_insn "tle_hix22<P:mode>"
  22108. + [(set (match_operand:P 0 "register_operand" "=r")
  22109. + (high:P (unspec:P [(match_operand 1 "tle_symbolic_operand" "")]
  22110. + UNSPEC_TLSLE)))]
  22111. + "TARGET_TLS"
  22112. + "sethi\\t%%tle_hix22(%a1), %0")
  22113. +
  22114. +(define_insn "tle_lox10<P:mode>"
  22115. + [(set (match_operand:P 0 "register_operand" "=r")
  22116. + (lo_sum:P (match_operand:P 1 "register_operand" "r")
  22117. + (unspec:P [(match_operand 2 "tle_symbolic_operand" "")]
  22118. + UNSPEC_TLSLE)))]
  22119. + "TARGET_TLS"
  22120. + "xor\\t%1, %%tle_lox10(%a2), %0")
  22121. +
  22122. +;; Now patterns combining tldo_add with some integer loads or stores
  22123. +(define_insn "*tldo_ldub<P:mode>"
  22124. + [(set (match_operand:QI 0 "register_operand" "=r")
  22125. + (mem:QI (plus:P (unspec:P [(match_operand:P 2 "register_operand" "r")
  22126. + (match_operand 3 "tld_symbolic_operand" "")]
  22127. + UNSPEC_TLSLDO)
  22128. + (match_operand:P 1 "register_operand" "r"))))]
  22129. + "TARGET_TLS"
  22130. + "ldub\t[%1 + %2], %0, %%tldo_add(%3)"
  22131. + [(set_attr "type" "load")
  22132. + (set_attr "subtype" "regular")
  22133. + (set_attr "us3load_type" "3cycle")])
  22134. +
  22135. +(define_insn "*tldo_ldub1<P:mode>"
  22136. + [(set (match_operand:HI 0 "register_operand" "=r")
  22137. + (zero_extend:HI
  22138. + (mem:QI (plus:P (unspec:P [(match_operand:P 2 "register_operand" "r")
  22139. + (match_operand 3 "tld_symbolic_operand" "")]
  22140. + UNSPEC_TLSLDO)
  22141. + (match_operand:P 1 "register_operand" "r")))))]
  22142. + "TARGET_TLS"
  22143. + "ldub\t[%1 + %2], %0, %%tldo_add(%3)"
  22144. + [(set_attr "type" "load")
  22145. + (set_attr "subtype" "regular")
  22146. + (set_attr "us3load_type" "3cycle")])
  22147. +
  22148. +(define_insn "*tldo_ldub2<P:mode>"
  22149. + [(set (match_operand:SI 0 "register_operand" "=r")
  22150. + (zero_extend:SI
  22151. + (mem:QI (plus:P (unspec:P [(match_operand:P 2 "register_operand" "r")
  22152. + (match_operand 3 "tld_symbolic_operand" "")]
  22153. + UNSPEC_TLSLDO)
  22154. + (match_operand:P 1 "register_operand" "r")))))]
  22155. + "TARGET_TLS"
  22156. + "ldub\t[%1 + %2], %0, %%tldo_add(%3)"
  22157. + [(set_attr "type" "load")
  22158. + (set_attr "subtype" "regular")
  22159. + (set_attr "us3load_type" "3cycle")])
  22160. +
  22161. +(define_insn "*tldo_ldsb1<P:mode>"
  22162. + [(set (match_operand:HI 0 "register_operand" "=r")
  22163. + (sign_extend:HI
  22164. + (mem:QI (plus:P (unspec:P [(match_operand:P 2 "register_operand" "r")
  22165. + (match_operand 3 "tld_symbolic_operand" "")]
  22166. + UNSPEC_TLSLDO)
  22167. + (match_operand:P 1 "register_operand" "r")))))]
  22168. + "TARGET_TLS"
  22169. + "ldsb\t[%1 + %2], %0, %%tldo_add(%3)"
  22170. + [(set_attr "type" "sload")
  22171. + (set_attr "us3load_type" "3cycle")])
  22172. +
  22173. +(define_insn "*tldo_ldsb2<P:mode>"
  22174. + [(set (match_operand:SI 0 "register_operand" "=r")
  22175. + (sign_extend:SI
  22176. + (mem:QI (plus:P (unspec:P [(match_operand:P 2 "register_operand" "r")
  22177. + (match_operand 3 "tld_symbolic_operand" "")]
  22178. + UNSPEC_TLSLDO)
  22179. + (match_operand:P 1 "register_operand" "r")))))]
  22180. + "TARGET_TLS"
  22181. + "ldsb\t[%1 + %2], %0, %%tldo_add(%3)"
  22182. + [(set_attr "type" "sload")
  22183. + (set_attr "us3load_type" "3cycle")])
  22184. +
  22185. +(define_insn "*tldo_ldub3_sp64"
  22186. + [(set (match_operand:DI 0 "register_operand" "=r")
  22187. + (zero_extend:DI
  22188. + (mem:QI (plus:DI (unspec:DI [(match_operand:DI 2 "register_operand" "r")
  22189. + (match_operand 3 "tld_symbolic_operand" "")]
  22190. + UNSPEC_TLSLDO)
  22191. + (match_operand:DI 1 "register_operand" "r")))))]
  22192. + "TARGET_TLS && TARGET_ARCH64"
  22193. + "ldub\t[%1 + %2], %0, %%tldo_add(%3)"
  22194. + [(set_attr "type" "load")
  22195. + (set_attr "subtype" "regular")
  22196. + (set_attr "us3load_type" "3cycle")])
  22197. +
  22198. +(define_insn "*tldo_ldsb3_sp64"
  22199. + [(set (match_operand:DI 0 "register_operand" "=r")
  22200. + (sign_extend:DI
  22201. + (mem:QI (plus:DI (unspec:DI [(match_operand:DI 2 "register_operand" "r")
  22202. + (match_operand 3 "tld_symbolic_operand" "")]
  22203. + UNSPEC_TLSLDO)
  22204. + (match_operand:DI 1 "register_operand" "r")))))]
  22205. + "TARGET_TLS && TARGET_ARCH64"
  22206. + "ldsb\t[%1 + %2], %0, %%tldo_add(%3)"
  22207. + [(set_attr "type" "sload")
  22208. + (set_attr "us3load_type" "3cycle")])
  22209. +
  22210. +(define_insn "*tldo_lduh<P:mode>"
  22211. + [(set (match_operand:HI 0 "register_operand" "=r")
  22212. + (mem:HI (plus:P (unspec:P [(match_operand:P 2 "register_operand" "r")
  22213. + (match_operand 3 "tld_symbolic_operand" "")]
  22214. + UNSPEC_TLSLDO)
  22215. + (match_operand:P 1 "register_operand" "r"))))]
  22216. + "TARGET_TLS"
  22217. + "lduh\t[%1 + %2], %0, %%tldo_add(%3)"
  22218. + [(set_attr "type" "load")
  22219. + (set_attr "subtype" "regular")
  22220. + (set_attr "us3load_type" "3cycle")])
  22221. +
  22222. +(define_insn "*tldo_lduh1<P:mode>"
  22223. + [(set (match_operand:SI 0 "register_operand" "=r")
  22224. + (zero_extend:SI
  22225. + (mem:HI (plus:P (unspec:P [(match_operand:P 2 "register_operand" "r")
  22226. + (match_operand 3 "tld_symbolic_operand" "")]
  22227. + UNSPEC_TLSLDO)
  22228. + (match_operand:P 1 "register_operand" "r")))))]
  22229. + "TARGET_TLS"
  22230. + "lduh\t[%1 + %2], %0, %%tldo_add(%3)"
  22231. + [(set_attr "type" "load")
  22232. + (set_attr "subtype" "regular")
  22233. + (set_attr "us3load_type" "3cycle")])
  22234. +
  22235. +(define_insn "*tldo_ldsh1<P:mode>"
  22236. + [(set (match_operand:SI 0 "register_operand" "=r")
  22237. + (sign_extend:SI
  22238. + (mem:HI (plus:P (unspec:P [(match_operand:P 2 "register_operand" "r")
  22239. + (match_operand 3 "tld_symbolic_operand" "")]
  22240. + UNSPEC_TLSLDO)
  22241. + (match_operand:P 1 "register_operand" "r")))))]
  22242. + "TARGET_TLS"
  22243. + "ldsh\t[%1 + %2], %0, %%tldo_add(%3)"
  22244. + [(set_attr "type" "sload")
  22245. + (set_attr "us3load_type" "3cycle")])
  22246. +
  22247. +(define_insn "*tldo_lduh2_sp64"
  22248. + [(set (match_operand:DI 0 "register_operand" "=r")
  22249. + (zero_extend:DI
  22250. + (mem:HI (plus:DI (unspec:DI [(match_operand:DI 2 "register_operand" "r")
  22251. + (match_operand 3 "tld_symbolic_operand" "")]
  22252. + UNSPEC_TLSLDO)
  22253. + (match_operand:DI 1 "register_operand" "r")))))]
  22254. + "TARGET_TLS && TARGET_ARCH64"
  22255. + "lduh\t[%1 + %2], %0, %%tldo_add(%3)"
  22256. + [(set_attr "type" "load")
  22257. + (set_attr "subtype" "regular")
  22258. + (set_attr "us3load_type" "3cycle")])
  22259. +
  22260. +(define_insn "*tldo_ldsh2_sp64"
  22261. + [(set (match_operand:DI 0 "register_operand" "=r")
  22262. + (sign_extend:DI
  22263. + (mem:HI (plus:DI (unspec:DI [(match_operand:DI 2 "register_operand" "r")
  22264. + (match_operand 3 "tld_symbolic_operand" "")]
  22265. + UNSPEC_TLSLDO)
  22266. + (match_operand:DI 1 "register_operand" "r")))))]
  22267. + "TARGET_TLS && TARGET_ARCH64"
  22268. + "ldsh\t[%1 + %2], %0, %%tldo_add(%3)"
  22269. + [(set_attr "type" "sload")
  22270. + (set_attr "us3load_type" "3cycle")])
  22271. +
  22272. +(define_insn "*tldo_lduw<P:mode>"
  22273. + [(set (match_operand:SI 0 "register_operand" "=r")
  22274. + (mem:SI (plus:P (unspec:P [(match_operand:P 2 "register_operand" "r")
  22275. + (match_operand 3 "tld_symbolic_operand" "")]
  22276. + UNSPEC_TLSLDO)
  22277. + (match_operand:P 1 "register_operand" "r"))))]
  22278. + "TARGET_TLS"
  22279. + "ld\t[%1 + %2], %0, %%tldo_add(%3)"
  22280. + [(set_attr "type" "load")
  22281. + (set_attr "subtype" "regular")])
  22282. +
  22283. +(define_insn "*tldo_lduw1_sp64"
  22284. + [(set (match_operand:DI 0 "register_operand" "=r")
  22285. + (zero_extend:DI
  22286. + (mem:SI (plus:DI (unspec:DI [(match_operand:DI 2 "register_operand" "r")
  22287. + (match_operand 3 "tld_symbolic_operand" "")]
  22288. + UNSPEC_TLSLDO)
  22289. + (match_operand:DI 1 "register_operand" "r")))))]
  22290. + "TARGET_TLS && TARGET_ARCH64"
  22291. + "lduw\t[%1 + %2], %0, %%tldo_add(%3)"
  22292. + [(set_attr "type" "load")
  22293. + (set_attr "subtype" "regular")])
  22294. +
  22295. +(define_insn "*tldo_ldsw1_sp64"
  22296. + [(set (match_operand:DI 0 "register_operand" "=r")
  22297. + (sign_extend:DI
  22298. + (mem:SI (plus:DI (unspec:DI [(match_operand:DI 2 "register_operand" "r")
  22299. + (match_operand 3 "tld_symbolic_operand" "")]
  22300. + UNSPEC_TLSLDO)
  22301. + (match_operand:DI 1 "register_operand" "r")))))]
  22302. + "TARGET_TLS && TARGET_ARCH64"
  22303. + "ldsw\t[%1 + %2], %0, %%tldo_add(%3)"
  22304. + [(set_attr "type" "sload")
  22305. + (set_attr "us3load_type" "3cycle")])
  22306. +
  22307. +(define_insn "*tldo_ldx_sp64"
  22308. + [(set (match_operand:DI 0 "register_operand" "=r")
  22309. + (mem:DI (plus:DI (unspec:DI [(match_operand:DI 2 "register_operand" "r")
  22310. + (match_operand 3 "tld_symbolic_operand" "")]
  22311. + UNSPEC_TLSLDO)
  22312. + (match_operand:DI 1 "register_operand" "r"))))]
  22313. + "TARGET_TLS && TARGET_ARCH64"
  22314. + "ldx\t[%1 + %2], %0, %%tldo_add(%3)"
  22315. + [(set_attr "type" "load")
  22316. + (set_attr "subtype" "regular")])
  22317. +
  22318. +(define_insn "*tldo_stb<P:mode>"
  22319. + [(set (mem:QI (plus:P (unspec:P [(match_operand:P 2 "register_operand" "r")
  22320. + (match_operand 3 "tld_symbolic_operand" "")]
  22321. + UNSPEC_TLSLDO)
  22322. + (match_operand:P 1 "register_operand" "r")))
  22323. + (match_operand:QI 0 "register_operand" "r"))]
  22324. + "TARGET_TLS"
  22325. + "stb\t%0, [%1 + %2], %%tldo_add(%3)"
  22326. + [(set_attr "type" "store")])
  22327. +
  22328. +(define_insn "*tldo_sth<P:mode>"
  22329. + [(set (mem:HI (plus:P (unspec:P [(match_operand:P 2 "register_operand" "r")
  22330. + (match_operand 3 "tld_symbolic_operand" "")]
  22331. + UNSPEC_TLSLDO)
  22332. + (match_operand:P 1 "register_operand" "r")))
  22333. + (match_operand:HI 0 "register_operand" "r"))]
  22334. + "TARGET_TLS"
  22335. + "sth\t%0, [%1 + %2], %%tldo_add(%3)"
  22336. + [(set_attr "type" "store")])
  22337. +
  22338. +(define_insn "*tldo_stw<P:mode>"
  22339. + [(set (mem:SI (plus:P (unspec:P [(match_operand:P 2 "register_operand" "r")
  22340. + (match_operand 3 "tld_symbolic_operand" "")]
  22341. + UNSPEC_TLSLDO)
  22342. + (match_operand:P 1 "register_operand" "r")))
  22343. + (match_operand:SI 0 "register_operand" "r"))]
  22344. + "TARGET_TLS"
  22345. + "st\t%0, [%1 + %2], %%tldo_add(%3)"
  22346. + [(set_attr "type" "store")])
  22347. +
  22348. +(define_insn "*tldo_stx_sp64"
  22349. + [(set (mem:DI (plus:DI (unspec:DI [(match_operand:DI 2 "register_operand" "r")
  22350. + (match_operand 3 "tld_symbolic_operand" "")]
  22351. + UNSPEC_TLSLDO)
  22352. + (match_operand:DI 1 "register_operand" "r")))
  22353. + (match_operand:DI 0 "register_operand" "r"))]
  22354. + "TARGET_TLS && TARGET_ARCH64"
  22355. + "stx\t%0, [%1 + %2], %%tldo_add(%3)"
  22356. + [(set_attr "type" "store")])
  22357. +
  22358. +
  22359. +;; Stack protector instructions.
  22360. +
  22361. +(define_expand "stack_protect_set"
  22362. + [(match_operand 0 "memory_operand" "")
  22363. + (match_operand 1 "memory_operand" "")]
  22364. + ""
  22365. +{
  22366. +#ifdef TARGET_THREAD_SSP_OFFSET
  22367. + rtx tlsreg = gen_rtx_REG (Pmode, 7);
  22368. + rtx addr = gen_rtx_PLUS (Pmode, tlsreg, GEN_INT (TARGET_THREAD_SSP_OFFSET));
  22369. + operands[1] = gen_rtx_MEM (Pmode, addr);
  22370. +#endif
  22371. + if (TARGET_ARCH64)
  22372. + emit_insn (gen_stack_protect_setdi (operands[0], operands[1]));
  22373. + else
  22374. + emit_insn (gen_stack_protect_setsi (operands[0], operands[1]));
  22375. + DONE;
  22376. +})
  22377. +
  22378. +(define_insn "stack_protect_setsi"
  22379. + [(set (match_operand:SI 0 "memory_operand" "=m")
  22380. + (unspec:SI [(match_operand:SI 1 "memory_operand" "m")] UNSPEC_SP_SET))
  22381. + (set (match_scratch:SI 2 "=&r") (const_int 0))]
  22382. + "TARGET_ARCH32"
  22383. + "ld\t%1, %2\;st\t%2, %0\;mov\t0, %2"
  22384. + [(set_attr "type" "multi")
  22385. + (set_attr "length" "3")])
  22386. +
  22387. +(define_insn "stack_protect_setdi"
  22388. + [(set (match_operand:DI 0 "memory_operand" "=m")
  22389. + (unspec:DI [(match_operand:DI 1 "memory_operand" "m")] UNSPEC_SP_SET))
  22390. + (set (match_scratch:DI 2 "=&r") (const_int 0))]
  22391. + "TARGET_ARCH64"
  22392. + "ldx\t%1, %2\;stx\t%2, %0\;mov\t0, %2"
  22393. + [(set_attr "type" "multi")
  22394. + (set_attr "length" "3")])
  22395. +
  22396. +(define_expand "stack_protect_test"
  22397. + [(match_operand 0 "memory_operand" "")
  22398. + (match_operand 1 "memory_operand" "")
  22399. + (match_operand 2 "" "")]
  22400. + ""
  22401. +{
  22402. + rtx result, test;
  22403. +#ifdef TARGET_THREAD_SSP_OFFSET
  22404. + rtx tlsreg = gen_rtx_REG (Pmode, 7);
  22405. + rtx addr = gen_rtx_PLUS (Pmode, tlsreg, GEN_INT (TARGET_THREAD_SSP_OFFSET));
  22406. + operands[1] = gen_rtx_MEM (Pmode, addr);
  22407. +#endif
  22408. + if (TARGET_ARCH64)
  22409. + {
  22410. + result = gen_reg_rtx (Pmode);
  22411. + emit_insn (gen_stack_protect_testdi (result, operands[0], operands[1]));
  22412. + test = gen_rtx_EQ (VOIDmode, result, const0_rtx);
  22413. + emit_jump_insn (gen_cbranchdi4 (test, result, const0_rtx, operands[2]));
  22414. + }
  22415. + else
  22416. + {
  22417. + emit_insn (gen_stack_protect_testsi (operands[0], operands[1]));
  22418. + result = gen_rtx_REG (CCmode, SPARC_ICC_REG);
  22419. + test = gen_rtx_EQ (VOIDmode, result, const0_rtx);
  22420. + emit_jump_insn (gen_cbranchcc4 (test, result, const0_rtx, operands[2]));
  22421. + }
  22422. + DONE;
  22423. +})
  22424. +
  22425. +(define_insn "stack_protect_testsi"
  22426. + [(set (reg:CC CC_REG)
  22427. + (unspec:CC [(match_operand:SI 0 "memory_operand" "m")
  22428. + (match_operand:SI 1 "memory_operand" "m")]
  22429. + UNSPEC_SP_TEST))
  22430. + (set (match_scratch:SI 3 "=r") (const_int 0))
  22431. + (clobber (match_scratch:SI 2 "=&r"))]
  22432. + "TARGET_ARCH32"
  22433. + "ld\t%0, %2\;ld\t%1, %3\;xorcc\t%2, %3, %2\;mov\t0, %3"
  22434. + [(set_attr "type" "multi")
  22435. + (set_attr "length" "4")])
  22436. +
  22437. +(define_insn "stack_protect_testdi"
  22438. + [(set (match_operand:DI 0 "register_operand" "=&r")
  22439. + (unspec:DI [(match_operand:DI 1 "memory_operand" "m")
  22440. + (match_operand:DI 2 "memory_operand" "m")]
  22441. + UNSPEC_SP_TEST))
  22442. + (set (match_scratch:DI 3 "=r") (const_int 0))]
  22443. + "TARGET_ARCH64"
  22444. + "ldx\t%1, %0\;ldx\t%2, %3\;xor\t%0, %3, %0\;mov\t0, %3"
  22445. + [(set_attr "type" "multi")
  22446. + (set_attr "length" "4")])
  22447. +
  22448. +
  22449. +;; Vector instructions.
  22450. +
  22451. +(define_mode_iterator VM32 [V1SI V2HI V4QI])
  22452. +(define_mode_iterator VM64 [V1DI V2SI V4HI V8QI])
  22453. +(define_mode_iterator VMALL [V1SI V2HI V4QI V1DI V2SI V4HI V8QI])
  22454. +
  22455. +(define_mode_attr vbits [(V2SI "32") (V4HI "16") (V1SI "32s") (V2HI "16s")
  22456. + (V8QI "8")])
  22457. +(define_mode_attr vconstr [(V1SI "f") (V2HI "f") (V4QI "f")
  22458. + (V1DI "e") (V2SI "e") (V4HI "e") (V8QI "e")])
  22459. +(define_mode_attr vfptype [(V1SI "single") (V2HI "single") (V4QI "single")
  22460. + (V1DI "double") (V2SI "double") (V4HI "double")
  22461. + (V8QI "double")])
  22462. +(define_mode_attr veltmode [(V1SI "si") (V2HI "hi") (V4QI "qi") (V1DI "di")
  22463. + (V2SI "si") (V4HI "hi") (V8QI "qi")])
  22464. +
  22465. +(define_expand "mov<VMALL:mode>"
  22466. + [(set (match_operand:VMALL 0 "nonimmediate_operand" "")
  22467. + (match_operand:VMALL 1 "general_operand" ""))]
  22468. + "TARGET_VIS"
  22469. +{
  22470. + if (sparc_expand_move (<VMALL:MODE>mode, operands))
  22471. + DONE;
  22472. +})
  22473. +
  22474. +(define_insn "*mov<VM32:mode>_insn"
  22475. + [(set (match_operand:VM32 0 "nonimmediate_operand" "=f,f,f,f,m,m,*r, m,*r,*r, f")
  22476. + (match_operand:VM32 1 "input_operand" "Y,Z,f,m,f,Y, m,*r,*r, f,*r"))]
  22477. + "TARGET_VIS
  22478. + && (register_operand (operands[0], <VM32:MODE>mode)
  22479. + || register_or_zero_or_all_ones_operand (operands[1], <VM32:MODE>mode))"
  22480. + "@
  22481. + fzeros\t%0
  22482. + fones\t%0
  22483. + fsrc2s\t%1, %0
  22484. + ld\t%1, %0
  22485. + st\t%1, %0
  22486. + st\t%r1, %0
  22487. + ld\t%1, %0
  22488. + st\t%1, %0
  22489. + mov\t%1, %0
  22490. + movstouw\t%1, %0
  22491. + movwtos\t%1, %0"
  22492. + [(set_attr "type" "visl,visl,vismv,fpload,fpstore,store,load,store,*,vismv,vismv")
  22493. + (set_attr "subtype" "single,single,single,*,*,*,regular,*,*,movstouw,single")
  22494. + (set_attr "cpu_feature" "vis,vis,vis,*,*,*,*,*,*,vis3,vis3")])
  22495. +
  22496. +(define_insn "*mov<VM64:mode>_insn_sp64"
  22497. + [(set (match_operand:VM64 0 "nonimmediate_operand" "=e,e,e,e,W,m,*r, m,*r, e,*r")
  22498. + (match_operand:VM64 1 "input_operand" "Y,Z,e,W,e,Y, m,*r, e,*r,*r"))]
  22499. + "TARGET_VIS
  22500. + && TARGET_ARCH64
  22501. + && (register_operand (operands[0], <VM64:MODE>mode)
  22502. + || register_or_zero_or_all_ones_operand (operands[1], <VM64:MODE>mode))"
  22503. + "@
  22504. + fzero\t%0
  22505. + fone\t%0
  22506. + fsrc2\t%1, %0
  22507. + ldd\t%1, %0
  22508. + std\t%1, %0
  22509. + stx\t%r1, %0
  22510. + ldx\t%1, %0
  22511. + stx\t%1, %0
  22512. + movdtox\t%1, %0
  22513. + movxtod\t%1, %0
  22514. + mov\t%1, %0"
  22515. + [(set_attr "type" "visl,visl,vismv,fpload,fpstore,store,load,store,vismv,vismv,*")
  22516. + (set_attr "subtype" "double,double,double,*,*,*,regular,*,movdtox,movxtod,*")
  22517. + (set_attr "cpu_feature" "vis,vis,vis,*,*,*,*,*,vis3,vis3,*")])
  22518. +
  22519. +(define_insn "*mov<VM64:mode>_insn_sp32"
  22520. + [(set (match_operand:VM64 0 "nonimmediate_operand"
  22521. + "=T,o,e,e,e,*r, f,e,T,U,T,f,o,*r,*r, o")
  22522. + (match_operand:VM64 1 "input_operand"
  22523. + " Y,Y,Y,Z,e, f,*r,T,e,T,U,o,f,*r, o,*r"))]
  22524. + "TARGET_VIS
  22525. + && TARGET_ARCH32
  22526. + && (register_operand (operands[0], <VM64:MODE>mode)
  22527. + || register_or_zero_or_all_ones_operand (operands[1], <VM64:MODE>mode))"
  22528. + "@
  22529. + stx\t%r1, %0
  22530. + #
  22531. + fzero\t%0
  22532. + fone\t%0
  22533. + fsrc2\t%1, %0
  22534. + #
  22535. + #
  22536. + ldd\t%1, %0
  22537. + std\t%1, %0
  22538. + ldd\t%1, %0
  22539. + std\t%1, %0
  22540. + #
  22541. + #
  22542. + #
  22543. + ldd\t%1, %0
  22544. + std\t%1, %0"
  22545. + [(set_attr "type" "store,*,visl,visl,vismv,*,*,fpload,fpstore,load,store,*,*,*,load,store")
  22546. + (set_attr "subtype" "*,*,double,double,double,*,*,*,*,regular,*,*,*,*,regular,*")
  22547. + (set_attr "length" "*,2,*,*,*,2,2,*,*,*,*,2,2,2,*,*")
  22548. + (set_attr "cpu_feature" "*,*,vis,vis,vis,vis3,vis3,*,*,*,*,*,*,*,*,*")
  22549. + (set_attr "lra" "*,*,*,*,*,*,*,*,*,disabled,disabled,*,*,*,*,*")])
  22550. +
  22551. +(define_split
  22552. + [(set (match_operand:VM64 0 "register_operand" "")
  22553. + (match_operand:VM64 1 "register_operand" ""))]
  22554. + "reload_completed
  22555. + && TARGET_VIS
  22556. + && TARGET_ARCH32
  22557. + && sparc_split_reg_reg_legitimate (operands[0], operands[1])"
  22558. + [(clobber (const_int 0))]
  22559. +{
  22560. + sparc_split_reg_reg (operands[0], operands[1], SImode);
  22561. + DONE;
  22562. +})
  22563. +
  22564. +(define_split
  22565. + [(set (match_operand:VM64 0 "register_operand" "")
  22566. + (match_operand:VM64 1 "memory_operand" ""))]
  22567. + "reload_completed
  22568. + && TARGET_VIS
  22569. + && TARGET_ARCH32
  22570. + && sparc_split_reg_mem_legitimate (operands[0], operands[1])"
  22571. + [(clobber (const_int 0))]
  22572. +{
  22573. + sparc_split_reg_mem (operands[0], operands[1], SImode);
  22574. + DONE;
  22575. +})
  22576. +
  22577. +(define_split
  22578. + [(set (match_operand:VM64 0 "memory_operand" "")
  22579. + (match_operand:VM64 1 "register_operand" ""))]
  22580. + "reload_completed
  22581. + && TARGET_VIS
  22582. + && TARGET_ARCH32
  22583. + && sparc_split_reg_mem_legitimate (operands[1], operands[0])"
  22584. + [(clobber (const_int 0))]
  22585. +{
  22586. + sparc_split_mem_reg (operands[0], operands[1], SImode);
  22587. + DONE;
  22588. +})
  22589. +
  22590. +(define_split
  22591. + [(set (match_operand:VM64 0 "memory_operand" "")
  22592. + (match_operand:VM64 1 "const_zero_operand" ""))]
  22593. + "reload_completed
  22594. + && TARGET_VIS
  22595. + && TARGET_ARCH32
  22596. + && !mem_min_alignment (operands[0], 8)
  22597. + && offsettable_memref_p (operands[0])"
  22598. + [(clobber (const_int 0))]
  22599. +{
  22600. + emit_move_insn_1 (adjust_address (operands[0], SImode, 0), const0_rtx);
  22601. + emit_move_insn_1 (adjust_address (operands[0], SImode, 4), const0_rtx);
  22602. + DONE;
  22603. +})
  22604. +
  22605. +(define_expand "vec_init<VMALL:mode><VMALL:veltmode>"
  22606. + [(match_operand:VMALL 0 "register_operand" "")
  22607. + (match_operand:VMALL 1 "" "")]
  22608. + "TARGET_VIS"
  22609. +{
  22610. + sparc_expand_vector_init (operands[0], operands[1]);
  22611. + DONE;
  22612. +})
  22613. +
  22614. +(define_code_iterator plusminus [plus minus])
  22615. +(define_code_attr plusminus_insn [(plus "add") (minus "sub")])
  22616. +
  22617. +(define_mode_iterator VADDSUB [V1SI V2SI V2HI V4HI])
  22618. +
  22619. +(define_insn "<plusminus_insn><VADDSUB:mode>3"
  22620. + [(set (match_operand:VADDSUB 0 "register_operand" "=<vconstr>")
  22621. + (plusminus:VADDSUB (match_operand:VADDSUB 1 "register_operand" "<vconstr>")
  22622. + (match_operand:VADDSUB 2 "register_operand" "<vconstr>")))]
  22623. + "TARGET_VIS"
  22624. + "fp<plusminus_insn><vbits>\t%1, %2, %0"
  22625. + [(set_attr "type" "fga")
  22626. + (set_attr "subtype" "other")
  22627. + (set_attr "fptype" "<vfptype>")])
  22628. +
  22629. +(define_mode_iterator VL [V1SI V2HI V4QI V1DI V2SI V4HI V8QI])
  22630. +(define_mode_attr vlsuf [(V1SI "s") (V2HI "s") (V4QI "s")
  22631. + (V1DI "") (V2SI "") (V4HI "") (V8QI "")])
  22632. +(define_code_iterator vlop [ior and xor])
  22633. +(define_code_attr vlinsn [(ior "or") (and "and") (xor "xor")])
  22634. +(define_code_attr vlninsn [(ior "nor") (and "nand") (xor "xnor")])
  22635. +
  22636. +(define_insn "<vlop:code><VL:mode>3"
  22637. + [(set (match_operand:VL 0 "register_operand" "=<vconstr>")
  22638. + (vlop:VL (match_operand:VL 1 "register_operand" "<vconstr>")
  22639. + (match_operand:VL 2 "register_operand" "<vconstr>")))]
  22640. + "TARGET_VIS"
  22641. + "f<vlinsn><vlsuf>\t%1, %2, %0"
  22642. + [(set_attr "type" "visl")
  22643. + (set_attr "fptype" "<vfptype>")])
  22644. +
  22645. +(define_insn "*not_<vlop:code><VL:mode>3"
  22646. + [(set (match_operand:VL 0 "register_operand" "=<vconstr>")
  22647. + (not:VL (vlop:VL (match_operand:VL 1 "register_operand" "<vconstr>")
  22648. + (match_operand:VL 2 "register_operand" "<vconstr>"))))]
  22649. + "TARGET_VIS"
  22650. + "f<vlninsn><vlsuf>\t%1, %2, %0"
  22651. + [(set_attr "type" "visl")
  22652. + (set_attr "fptype" "<vfptype>")])
  22653. +
  22654. +;; (ior (not (op1)) (not (op2))) is the canonical form of NAND.
  22655. +(define_insn "*nand<VL:mode>_vis"
  22656. + [(set (match_operand:VL 0 "register_operand" "=<vconstr>")
  22657. + (ior:VL (not:VL (match_operand:VL 1 "register_operand" "<vconstr>"))
  22658. + (not:VL (match_operand:VL 2 "register_operand" "<vconstr>"))))]
  22659. + "TARGET_VIS"
  22660. + "fnand<vlsuf>\t%1, %2, %0"
  22661. + [(set_attr "type" "visl")
  22662. + (set_attr "fptype" "<vfptype>")])
  22663. +
  22664. +(define_code_iterator vlnotop [ior and])
  22665. +
  22666. +(define_insn "*<vlnotop:code>_not1<VL:mode>_vis"
  22667. + [(set (match_operand:VL 0 "register_operand" "=<vconstr>")
  22668. + (vlnotop:VL (not:VL (match_operand:VL 1 "register_operand" "<vconstr>"))
  22669. + (match_operand:VL 2 "register_operand" "<vconstr>")))]
  22670. + "TARGET_VIS"
  22671. + "f<vlinsn>not1<vlsuf>\t%1, %2, %0"
  22672. + [(set_attr "type" "visl")
  22673. + (set_attr "fptype" "<vfptype>")])
  22674. +
  22675. +(define_insn "*<vlnotop:code>_not2<VL:mode>_vis"
  22676. + [(set (match_operand:VL 0 "register_operand" "=<vconstr>")
  22677. + (vlnotop:VL (match_operand:VL 1 "register_operand" "<vconstr>")
  22678. + (not:VL (match_operand:VL 2 "register_operand" "<vconstr>"))))]
  22679. + "TARGET_VIS"
  22680. + "f<vlinsn>not2<vlsuf>\t%1, %2, %0"
  22681. + [(set_attr "type" "visl")
  22682. + (set_attr "fptype" "<vfptype>")])
  22683. +
  22684. +(define_insn "one_cmpl<VL:mode>2"
  22685. + [(set (match_operand:VL 0 "register_operand" "=<vconstr>")
  22686. + (not:VL (match_operand:VL 1 "register_operand" "<vconstr>")))]
  22687. + "TARGET_VIS"
  22688. + "fnot1<vlsuf>\t%1, %0"
  22689. + [(set_attr "type" "visl")
  22690. + (set_attr "fptype" "<vfptype>")])
  22691. +
  22692. +;; Hard to generate VIS instructions. We have builtins for these.
  22693. +
  22694. +(define_insn "fpack16_vis"
  22695. + [(set (match_operand:V4QI 0 "register_operand" "=f")
  22696. + (unspec:V4QI [(match_operand:V4HI 1 "register_operand" "e")
  22697. + (reg:DI GSR_REG)]
  22698. + UNSPEC_FPACK16))]
  22699. + "TARGET_VIS"
  22700. + "fpack16\t%1, %0"
  22701. + [(set_attr "type" "fgm_pack")
  22702. + (set_attr "fptype" "double")])
  22703. +
  22704. +(define_insn "fpackfix_vis"
  22705. + [(set (match_operand:V2HI 0 "register_operand" "=f")
  22706. + (unspec:V2HI [(match_operand:V2SI 1 "register_operand" "e")
  22707. + (reg:DI GSR_REG)]
  22708. + UNSPEC_FPACKFIX))]
  22709. + "TARGET_VIS"
  22710. + "fpackfix\t%1, %0"
  22711. + [(set_attr "type" "fgm_pack")
  22712. + (set_attr "fptype" "double")])
  22713. +
  22714. +(define_insn "fpack32_vis"
  22715. + [(set (match_operand:V8QI 0 "register_operand" "=e")
  22716. + (unspec:V8QI [(match_operand:V2SI 1 "register_operand" "e")
  22717. + (match_operand:V8QI 2 "register_operand" "e")
  22718. + (reg:DI GSR_REG)]
  22719. + UNSPEC_FPACK32))]
  22720. + "TARGET_VIS"
  22721. + "fpack32\t%1, %2, %0"
  22722. + [(set_attr "type" "fgm_pack")
  22723. + (set_attr "fptype" "double")])
  22724. +
  22725. +(define_insn "fexpand_vis"
  22726. + [(set (match_operand:V4HI 0 "register_operand" "=e")
  22727. + (unspec:V4HI [(match_operand:V4QI 1 "register_operand" "f")]
  22728. + UNSPEC_FEXPAND))]
  22729. + "TARGET_VIS"
  22730. + "fexpand\t%1, %0"
  22731. + [(set_attr "type" "fga")
  22732. + (set_attr "subtype" "fpu")
  22733. + (set_attr "fptype" "double")])
  22734. +
  22735. +(define_insn "fpmerge_vis"
  22736. + [(set (match_operand:V8QI 0 "register_operand" "=e")
  22737. + (vec_select:V8QI
  22738. + (vec_concat:V8QI (match_operand:V4QI 1 "register_operand" "f")
  22739. + (match_operand:V4QI 2 "register_operand" "f"))
  22740. + (parallel [(const_int 0) (const_int 4)
  22741. + (const_int 1) (const_int 5)
  22742. + (const_int 2) (const_int 6)
  22743. + (const_int 3) (const_int 7)])))]
  22744. + "TARGET_VIS"
  22745. + "fpmerge\t%1, %2, %0"
  22746. + [(set_attr "type" "fga")
  22747. + (set_attr "subtype" "fpu")
  22748. + (set_attr "fptype" "double")])
  22749. +
  22750. +;; Partitioned multiply instructions
  22751. +(define_insn "fmul8x16_vis"
  22752. + [(set (match_operand:V4HI 0 "register_operand" "=e")
  22753. + (unspec:V4HI [(match_operand:V4QI 1 "register_operand" "f")
  22754. + (match_operand:V4HI 2 "register_operand" "e")]
  22755. + UNSPEC_MUL8))]
  22756. + "TARGET_VIS"
  22757. + "fmul8x16\t%1, %2, %0"
  22758. + [(set_attr "type" "fgm_mul")
  22759. + (set_attr "fptype" "double")])
  22760. +
  22761. +(define_insn "fmul8x16au_vis"
  22762. + [(set (match_operand:V4HI 0 "register_operand" "=e")
  22763. + (unspec:V4HI [(match_operand:V4QI 1 "register_operand" "f")
  22764. + (match_operand:V2HI 2 "register_operand" "f")]
  22765. + UNSPEC_MUL16AU))]
  22766. + "TARGET_VIS"
  22767. + "fmul8x16au\t%1, %2, %0"
  22768. + [(set_attr "type" "fgm_mul")
  22769. + (set_attr "fptype" "double")])
  22770. +
  22771. +(define_insn "fmul8x16al_vis"
  22772. + [(set (match_operand:V4HI 0 "register_operand" "=e")
  22773. + (unspec:V4HI [(match_operand:V4QI 1 "register_operand" "f")
  22774. + (match_operand:V2HI 2 "register_operand" "f")]
  22775. + UNSPEC_MUL16AL))]
  22776. + "TARGET_VIS"
  22777. + "fmul8x16al\t%1, %2, %0"
  22778. + [(set_attr "type" "fgm_mul")
  22779. + (set_attr "fptype" "double")])
  22780. +
  22781. +(define_insn "fmul8sux16_vis"
  22782. + [(set (match_operand:V4HI 0 "register_operand" "=e")
  22783. + (unspec:V4HI [(match_operand:V8QI 1 "register_operand" "e")
  22784. + (match_operand:V4HI 2 "register_operand" "e")]
  22785. + UNSPEC_MUL8SU))]
  22786. + "TARGET_VIS"
  22787. + "fmul8sux16\t%1, %2, %0"
  22788. + [(set_attr "type" "fgm_mul")
  22789. + (set_attr "fptype" "double")])
  22790. +
  22791. +(define_insn "fmul8ulx16_vis"
  22792. + [(set (match_operand:V4HI 0 "register_operand" "=e")
  22793. + (unspec:V4HI [(match_operand:V8QI 1 "register_operand" "e")
  22794. + (match_operand:V4HI 2 "register_operand" "e")]
  22795. + UNSPEC_MUL8UL))]
  22796. + "TARGET_VIS"
  22797. + "fmul8ulx16\t%1, %2, %0"
  22798. + [(set_attr "type" "fgm_mul")
  22799. + (set_attr "fptype" "double")])
  22800. +
  22801. +(define_insn "fmuld8sux16_vis"
  22802. + [(set (match_operand:V2SI 0 "register_operand" "=e")
  22803. + (unspec:V2SI [(match_operand:V4QI 1 "register_operand" "f")
  22804. + (match_operand:V2HI 2 "register_operand" "f")]
  22805. + UNSPEC_MULDSU))]
  22806. + "TARGET_VIS"
  22807. + "fmuld8sux16\t%1, %2, %0"
  22808. + [(set_attr "type" "fgm_mul")
  22809. + (set_attr "fptype" "double")])
  22810. +
  22811. +(define_insn "fmuld8ulx16_vis"
  22812. + [(set (match_operand:V2SI 0 "register_operand" "=e")
  22813. + (unspec:V2SI [(match_operand:V4QI 1 "register_operand" "f")
  22814. + (match_operand:V2HI 2 "register_operand" "f")]
  22815. + UNSPEC_MULDUL))]
  22816. + "TARGET_VIS"
  22817. + "fmuld8ulx16\t%1, %2, %0"
  22818. + [(set_attr "type" "fgm_mul")
  22819. + (set_attr "fptype" "double")])
  22820. +
  22821. +(define_expand "wrgsr_vis"
  22822. + [(set (reg:DI GSR_REG) (match_operand:DI 0 "arith_operand" ""))]
  22823. + "TARGET_VIS"
  22824. +{
  22825. + if (TARGET_ARCH32)
  22826. + {
  22827. + emit_insn (gen_wrgsr_v8plus (operands[0]));
  22828. + DONE;
  22829. + }
  22830. +})
  22831. +
  22832. +(define_insn "*wrgsr_sp64"
  22833. + [(set (reg:DI GSR_REG) (match_operand:DI 0 "arith_operand" "rI"))]
  22834. + "TARGET_VIS && TARGET_ARCH64"
  22835. + "wr\t%%g0, %0, %%gsr"
  22836. + [(set_attr "type" "gsr")
  22837. + (set_attr "subtype" "reg")])
  22838. +
  22839. +(define_insn "wrgsr_v8plus"
  22840. + [(set (reg:DI GSR_REG) (match_operand:DI 0 "arith_operand" "I,r"))
  22841. + (clobber (match_scratch:SI 1 "=X,&h"))]
  22842. + "TARGET_VIS && TARGET_ARCH32"
  22843. +{
  22844. + if (GET_CODE (operands[0]) == CONST_INT
  22845. + || sparc_check_64 (operands[0], insn))
  22846. + return "wr\t%%g0, %0, %%gsr";
  22847. +
  22848. + output_asm_insn("srl\t%L0, 0, %L0", operands);
  22849. + return "sllx\t%H0, 32, %1\n\tor\t%L0, %1, %1\n\twr\t%%g0, %1, %%gsr";
  22850. +}
  22851. + [(set_attr "type" "multi")])
  22852. +
  22853. +(define_expand "rdgsr_vis"
  22854. + [(set (match_operand:DI 0 "register_operand" "") (reg:DI GSR_REG))]
  22855. + "TARGET_VIS"
  22856. +{
  22857. + if (TARGET_ARCH32)
  22858. + {
  22859. + emit_insn (gen_rdgsr_v8plus (operands[0]));
  22860. + DONE;
  22861. + }
  22862. +})
  22863. +
  22864. +(define_insn "*rdgsr_sp64"
  22865. + [(set (match_operand:DI 0 "register_operand" "=r") (reg:DI GSR_REG))]
  22866. + "TARGET_VIS && TARGET_ARCH64"
  22867. + "rd\t%%gsr, %0"
  22868. + [(set_attr "type" "gsr")
  22869. + (set_attr "subtype" "reg")])
  22870. +
  22871. +(define_insn "rdgsr_v8plus"
  22872. + [(set (match_operand:DI 0 "register_operand" "=r") (reg:DI GSR_REG))
  22873. + (clobber (match_scratch:SI 1 "=&h"))]
  22874. + "TARGET_VIS && TARGET_ARCH32"
  22875. +{
  22876. + return "rd\t%%gsr, %1\n\tsrlx\t%1, 32, %H0\n\tmov %1, %L0";
  22877. +}
  22878. + [(set_attr "type" "multi")])
  22879. +
  22880. +;; Using faligndata only makes sense after an alignaddr since the choice of
  22881. +;; bytes to take out of each operand is dependent on the results of the last
  22882. +;; alignaddr.
  22883. +(define_insn "faligndata<VM64:mode>_vis"
  22884. + [(set (match_operand:VM64 0 "register_operand" "=e")
  22885. + (unspec:VM64 [(match_operand:VM64 1 "register_operand" "e")
  22886. + (match_operand:VM64 2 "register_operand" "e")
  22887. + (reg:DI GSR_REG)]
  22888. + UNSPEC_ALIGNDATA))]
  22889. + "TARGET_VIS"
  22890. + "faligndata\t%1, %2, %0"
  22891. + [(set_attr "type" "fga")
  22892. + (set_attr "subtype" "other")
  22893. + (set_attr "fptype" "double")])
  22894. +
  22895. +(define_insn "alignaddrsi_vis"
  22896. + [(set (match_operand:SI 0 "register_operand" "=r")
  22897. + (plus:SI (match_operand:SI 1 "register_or_zero_operand" "rJ")
  22898. + (match_operand:SI 2 "register_or_zero_operand" "rJ")))
  22899. + (set (zero_extract:DI (reg:DI GSR_REG) (const_int 3) (const_int 0))
  22900. + (zero_extend:DI (plus:SI (match_dup 1) (match_dup 2))))]
  22901. + "TARGET_VIS"
  22902. + "alignaddr\t%r1, %r2, %0"
  22903. + [(set_attr "type" "gsr")
  22904. + (set_attr "subtype" "alignaddr")])
  22905. +
  22906. +(define_insn "alignaddrdi_vis"
  22907. + [(set (match_operand:DI 0 "register_operand" "=r")
  22908. + (plus:DI (match_operand:DI 1 "register_or_zero_operand" "rJ")
  22909. + (match_operand:DI 2 "register_or_zero_operand" "rJ")))
  22910. + (set (zero_extract:DI (reg:DI GSR_REG) (const_int 3) (const_int 0))
  22911. + (plus:DI (match_dup 1) (match_dup 2)))]
  22912. + "TARGET_VIS"
  22913. + "alignaddr\t%r1, %r2, %0"
  22914. + [(set_attr "type" "gsr")
  22915. + (set_attr "subtype" "alignaddr")])
  22916. +
  22917. +(define_insn "alignaddrlsi_vis"
  22918. + [(set (match_operand:SI 0 "register_operand" "=r")
  22919. + (plus:SI (match_operand:SI 1 "register_or_zero_operand" "rJ")
  22920. + (match_operand:SI 2 "register_or_zero_operand" "rJ")))
  22921. + (set (zero_extract:DI (reg:DI GSR_REG) (const_int 3) (const_int 0))
  22922. + (xor:DI (zero_extend:DI (plus:SI (match_dup 1) (match_dup 2)))
  22923. + (const_int 7)))]
  22924. + "TARGET_VIS"
  22925. + "alignaddrl\t%r1, %r2, %0"
  22926. + [(set_attr "type" "gsr")
  22927. + (set_attr "subtype" "alignaddr")])
  22928. +
  22929. +(define_insn "alignaddrldi_vis"
  22930. + [(set (match_operand:DI 0 "register_operand" "=r")
  22931. + (plus:DI (match_operand:DI 1 "register_or_zero_operand" "rJ")
  22932. + (match_operand:DI 2 "register_or_zero_operand" "rJ")))
  22933. + (set (zero_extract:DI (reg:DI GSR_REG) (const_int 3) (const_int 0))
  22934. + (xor:DI (plus:DI (match_dup 1) (match_dup 2))
  22935. + (const_int 7)))]
  22936. + "TARGET_VIS"
  22937. + "alignaddrl\t%r1, %r2, %0"
  22938. + [(set_attr "type" "gsr")
  22939. + (set_attr "subtype" "alignaddr")])
  22940. +
  22941. +(define_insn "pdist_vis"
  22942. + [(set (match_operand:DI 0 "register_operand" "=e")
  22943. + (unspec:DI [(match_operand:V8QI 1 "register_operand" "e")
  22944. + (match_operand:V8QI 2 "register_operand" "e")
  22945. + (match_operand:DI 3 "register_operand" "0")]
  22946. + UNSPEC_PDIST))]
  22947. + "TARGET_VIS"
  22948. + "pdist\t%1, %2, %0"
  22949. + [(set_attr "type" "pdist")
  22950. + (set_attr "fptype" "double")])
  22951. +
  22952. +;; Edge instructions produce condition codes equivalent to a 'subcc'
  22953. +;; with the same operands.
  22954. +(define_insn "edge8<P:mode>_vis"
  22955. + [(set (reg:CCNZ CC_REG)
  22956. + (compare:CCNZ (minus:P (match_operand:P 1 "register_or_zero_operand" "rJ")
  22957. + (match_operand:P 2 "register_or_zero_operand" "rJ"))
  22958. + (const_int 0)))
  22959. + (set (match_operand:P 0 "register_operand" "=r")
  22960. + (unspec:P [(match_dup 1) (match_dup 2)] UNSPEC_EDGE8))]
  22961. + "TARGET_VIS"
  22962. + "edge8\t%r1, %r2, %0"
  22963. + [(set_attr "type" "edge")])
  22964. +
  22965. +(define_insn "edge8l<P:mode>_vis"
  22966. + [(set (reg:CCNZ CC_REG)
  22967. + (compare:CCNZ (minus:P (match_operand:P 1 "register_or_zero_operand" "rJ")
  22968. + (match_operand:P 2 "register_or_zero_operand" "rJ"))
  22969. + (const_int 0)))
  22970. + (set (match_operand:P 0 "register_operand" "=r")
  22971. + (unspec:P [(match_dup 1) (match_dup 2)] UNSPEC_EDGE8L))]
  22972. + "TARGET_VIS"
  22973. + "edge8l\t%r1, %r2, %0"
  22974. + [(set_attr "type" "edge")])
  22975. +
  22976. +(define_insn "edge16<P:mode>_vis"
  22977. + [(set (reg:CCNZ CC_REG)
  22978. + (compare:CCNZ (minus:P (match_operand:P 1 "register_or_zero_operand" "rJ")
  22979. + (match_operand:P 2 "register_or_zero_operand" "rJ"))
  22980. + (const_int 0)))
  22981. + (set (match_operand:P 0 "register_operand" "=r")
  22982. + (unspec:P [(match_dup 1) (match_dup 2)] UNSPEC_EDGE16))]
  22983. + "TARGET_VIS"
  22984. + "edge16\t%r1, %r2, %0"
  22985. + [(set_attr "type" "edge")])
  22986. +
  22987. +(define_insn "edge16l<P:mode>_vis"
  22988. + [(set (reg:CCNZ CC_REG)
  22989. + (compare:CCNZ (minus:P (match_operand:P 1 "register_or_zero_operand" "rJ")
  22990. + (match_operand:P 2 "register_or_zero_operand" "rJ"))
  22991. + (const_int 0)))
  22992. + (set (match_operand:P 0 "register_operand" "=r")
  22993. + (unspec:P [(match_dup 1) (match_dup 2)] UNSPEC_EDGE16L))]
  22994. + "TARGET_VIS"
  22995. + "edge16l\t%r1, %r2, %0"
  22996. + [(set_attr "type" "edge")])
  22997. +
  22998. +(define_insn "edge32<P:mode>_vis"
  22999. + [(set (reg:CCNZ CC_REG)
  23000. + (compare:CCNZ (minus:P (match_operand:P 1 "register_or_zero_operand" "rJ")
  23001. + (match_operand:P 2 "register_or_zero_operand" "rJ"))
  23002. + (const_int 0)))
  23003. + (set (match_operand:P 0 "register_operand" "=r")
  23004. + (unspec:P [(match_dup 1) (match_dup 2)] UNSPEC_EDGE32))]
  23005. + "TARGET_VIS"
  23006. + "edge32\t%r1, %r2, %0"
  23007. + [(set_attr "type" "edge")])
  23008. +
  23009. +(define_insn "edge32l<P:mode>_vis"
  23010. + [(set (reg:CCNZ CC_REG)
  23011. + (compare:CCNZ (minus:P (match_operand:P 1 "register_or_zero_operand" "rJ")
  23012. + (match_operand:P 2 "register_or_zero_operand" "rJ"))
  23013. + (const_int 0)))
  23014. + (set (match_operand:P 0 "register_operand" "=r")
  23015. + (unspec:P [(match_dup 1) (match_dup 2)] UNSPEC_EDGE32L))]
  23016. + "TARGET_VIS"
  23017. + "edge32l\t%r1, %r2, %0"
  23018. + [(set_attr "type" "edge")])
  23019. +
  23020. +(define_code_iterator gcond [le ne gt eq])
  23021. +(define_mode_iterator GCM [V4HI V2SI])
  23022. +(define_mode_attr gcm_name [(V4HI "16") (V2SI "32")])
  23023. +
  23024. +(define_insn "fcmp<gcond:code><GCM:gcm_name><P:mode>_vis"
  23025. + [(set (match_operand:P 0 "register_operand" "=r")
  23026. + (unspec:P [(gcond:GCM (match_operand:GCM 1 "register_operand" "e")
  23027. + (match_operand:GCM 2 "register_operand" "e"))]
  23028. + UNSPEC_FCMP))]
  23029. + "TARGET_VIS"
  23030. + "fcmp<gcond:code><GCM:gcm_name>\t%1, %2, %0"
  23031. + [(set_attr "type" "viscmp")])
  23032. +
  23033. +(define_insn "fpcmp<gcond:code>8<P:mode>_vis"
  23034. + [(set (match_operand:P 0 "register_operand" "=r")
  23035. + (unspec:P [(gcond:V8QI (match_operand:V8QI 1 "register_operand" "e")
  23036. + (match_operand:V8QI 2 "register_operand" "e"))]
  23037. + UNSPEC_FCMP))]
  23038. + "TARGET_VIS4"
  23039. + "fpcmp<gcond:code>8\t%1, %2, %0"
  23040. + [(set_attr "type" "viscmp")])
  23041. +
  23042. +(define_expand "vcond<GCM:mode><GCM:mode>"
  23043. + [(match_operand:GCM 0 "register_operand" "")
  23044. + (match_operand:GCM 1 "register_operand" "")
  23045. + (match_operand:GCM 2 "register_operand" "")
  23046. + (match_operator 3 ""
  23047. + [(match_operand:GCM 4 "register_operand" "")
  23048. + (match_operand:GCM 5 "register_operand" "")])]
  23049. + "TARGET_VIS3"
  23050. +{
  23051. + sparc_expand_vcond (<MODE>mode, operands, UNSPEC_CMASK<gcm_name>, UNSPEC_FCMP);
  23052. + DONE;
  23053. +})
  23054. +
  23055. +(define_expand "vconduv8qiv8qi"
  23056. + [(match_operand:V8QI 0 "register_operand" "")
  23057. + (match_operand:V8QI 1 "register_operand" "")
  23058. + (match_operand:V8QI 2 "register_operand" "")
  23059. + (match_operator 3 ""
  23060. + [(match_operand:V8QI 4 "register_operand" "")
  23061. + (match_operand:V8QI 5 "register_operand" "")])]
  23062. + "TARGET_VIS3"
  23063. +{
  23064. + sparc_expand_vcond (V8QImode, operands, UNSPEC_CMASK8, UNSPEC_FUCMP);
  23065. + DONE;
  23066. +})
  23067. +
  23068. +(define_insn "array8<P:mode>_vis"
  23069. + [(set (match_operand:P 0 "register_operand" "=r")
  23070. + (unspec:P [(match_operand:P 1 "register_or_zero_operand" "rJ")
  23071. + (match_operand:P 2 "register_or_zero_operand" "rJ")]
  23072. + UNSPEC_ARRAY8))]
  23073. + "TARGET_VIS"
  23074. + "array8\t%r1, %r2, %0"
  23075. + [(set_attr "type" "array")])
  23076. +
  23077. +(define_insn "array16<P:mode>_vis"
  23078. + [(set (match_operand:P 0 "register_operand" "=r")
  23079. + (unspec:P [(match_operand:P 1 "register_or_zero_operand" "rJ")
  23080. + (match_operand:P 2 "register_or_zero_operand" "rJ")]
  23081. + UNSPEC_ARRAY16))]
  23082. + "TARGET_VIS"
  23083. + "array16\t%r1, %r2, %0"
  23084. + [(set_attr "type" "array")])
  23085. +
  23086. +(define_insn "array32<P:mode>_vis"
  23087. + [(set (match_operand:P 0 "register_operand" "=r")
  23088. + (unspec:P [(match_operand:P 1 "register_or_zero_operand" "rJ")
  23089. + (match_operand:P 2 "register_or_zero_operand" "rJ")]
  23090. + UNSPEC_ARRAY32))]
  23091. + "TARGET_VIS"
  23092. + "array32\t%r1, %r2, %0"
  23093. + [(set_attr "type" "array")])
  23094. +
  23095. +(define_insn "bmaskdi_vis"
  23096. + [(set (match_operand:DI 0 "register_operand" "=r")
  23097. + (plus:DI (match_operand:DI 1 "register_or_zero_operand" "rJ")
  23098. + (match_operand:DI 2 "register_or_zero_operand" "rJ")))
  23099. + (set (zero_extract:DI (reg:DI GSR_REG) (const_int 32) (const_int 32))
  23100. + (plus:DI (match_dup 1) (match_dup 2)))]
  23101. + "TARGET_VIS2 && TARGET_ARCH64"
  23102. + "bmask\t%r1, %r2, %0"
  23103. + [(set_attr "type" "bmask")])
  23104. +
  23105. +(define_insn "bmasksi_vis"
  23106. + [(set (match_operand:SI 0 "register_operand" "=r")
  23107. + (plus:SI (match_operand:SI 1 "register_or_zero_operand" "rJ")
  23108. + (match_operand:SI 2 "register_or_zero_operand" "rJ")))
  23109. + (set (zero_extract:DI (reg:DI GSR_REG) (const_int 32) (const_int 32))
  23110. + (zero_extend:DI (plus:SI (match_dup 1) (match_dup 2))))]
  23111. + "TARGET_VIS2"
  23112. + "bmask\t%r1, %r2, %0"
  23113. + [(set_attr "type" "bmask")])
  23114. +
  23115. +(define_insn "bshuffle<VM64:mode>_vis"
  23116. + [(set (match_operand:VM64 0 "register_operand" "=e")
  23117. + (unspec:VM64 [(match_operand:VM64 1 "register_operand" "e")
  23118. + (match_operand:VM64 2 "register_operand" "e")
  23119. + (reg:DI GSR_REG)]
  23120. + UNSPEC_BSHUFFLE))]
  23121. + "TARGET_VIS2"
  23122. + "bshuffle\t%1, %2, %0"
  23123. + [(set_attr "type" "fga")
  23124. + (set_attr "subtype" "other")
  23125. + (set_attr "fptype" "double")])
  23126. +
  23127. +;; Unlike constant permutation, we can vastly simplify the compression of
  23128. +;; the 64-bit selector input to the 32-bit %gsr value by knowing what the
  23129. +;; width of the input is.
  23130. +(define_expand "vec_perm<VM64:mode>"
  23131. + [(match_operand:VM64 0 "register_operand" "")
  23132. + (match_operand:VM64 1 "register_operand" "")
  23133. + (match_operand:VM64 2 "register_operand" "")
  23134. + (match_operand:VM64 3 "register_operand" "")]
  23135. + "TARGET_VIS2"
  23136. +{
  23137. + sparc_expand_vec_perm_bmask (<MODE>mode, operands[3]);
  23138. + emit_insn (gen_bshuffle<VM64:mode>_vis (operands[0], operands[1], operands[2]));
  23139. + DONE;
  23140. +})
  23141. +
  23142. +;; VIS 2.0 adds edge variants which do not set the condition codes
  23143. +(define_insn "edge8n<P:mode>_vis"
  23144. + [(set (match_operand:P 0 "register_operand" "=r")
  23145. + (unspec:P [(match_operand:P 1 "register_or_zero_operand" "rJ")
  23146. + (match_operand:P 2 "register_or_zero_operand" "rJ")]
  23147. + UNSPEC_EDGE8N))]
  23148. + "TARGET_VIS2"
  23149. + "edge8n\t%r1, %r2, %0"
  23150. + [(set_attr "type" "edgen")])
  23151. +
  23152. +(define_insn "edge8ln<P:mode>_vis"
  23153. + [(set (match_operand:P 0 "register_operand" "=r")
  23154. + (unspec:P [(match_operand:P 1 "register_or_zero_operand" "rJ")
  23155. + (match_operand:P 2 "register_or_zero_operand" "rJ")]
  23156. + UNSPEC_EDGE8LN))]
  23157. + "TARGET_VIS2"
  23158. + "edge8ln\t%r1, %r2, %0"
  23159. + [(set_attr "type" "edgen")])
  23160. +
  23161. +(define_insn "edge16n<P:mode>_vis"
  23162. + [(set (match_operand:P 0 "register_operand" "=r")
  23163. + (unspec:P [(match_operand:P 1 "register_or_zero_operand" "rJ")
  23164. + (match_operand:P 2 "register_or_zero_operand" "rJ")]
  23165. + UNSPEC_EDGE16N))]
  23166. + "TARGET_VIS2"
  23167. + "edge16n\t%r1, %r2, %0"
  23168. + [(set_attr "type" "edgen")])
  23169. +
  23170. +(define_insn "edge16ln<P:mode>_vis"
  23171. + [(set (match_operand:P 0 "register_operand" "=r")
  23172. + (unspec:P [(match_operand:P 1 "register_or_zero_operand" "rJ")
  23173. + (match_operand:P 2 "register_or_zero_operand" "rJ")]
  23174. + UNSPEC_EDGE16LN))]
  23175. + "TARGET_VIS2"
  23176. + "edge16ln\t%r1, %r2, %0"
  23177. + [(set_attr "type" "edgen")])
  23178. +
  23179. +(define_insn "edge32n<P:mode>_vis"
  23180. + [(set (match_operand:P 0 "register_operand" "=r")
  23181. + (unspec:P [(match_operand:P 1 "register_or_zero_operand" "rJ")
  23182. + (match_operand:P 2 "register_or_zero_operand" "rJ")]
  23183. + UNSPEC_EDGE32N))]
  23184. + "TARGET_VIS2"
  23185. + "edge32n\t%r1, %r2, %0"
  23186. + [(set_attr "type" "edgen")])
  23187. +
  23188. +(define_insn "edge32ln<P:mode>_vis"
  23189. + [(set (match_operand:P 0 "register_operand" "=r")
  23190. + (unspec:P [(match_operand:P 1 "register_or_zero_operand" "rJ")
  23191. + (match_operand:P 2 "register_or_zero_operand" "rJ")]
  23192. + UNSPEC_EDGE32LN))]
  23193. + "TARGET_VIS2"
  23194. + "edge32ln\t%r1, %r2, %0"
  23195. + [(set_attr "type" "edge")])
  23196. +
  23197. +;; Conditional moves are possible via fcmpX --> cmaskX -> bshuffle
  23198. +(define_insn "cmask8<P:mode>_vis"
  23199. + [(set (reg:DI GSR_REG)
  23200. + (unspec:DI [(match_operand:P 0 "register_or_zero_operand" "rJ")
  23201. + (reg:DI GSR_REG)]
  23202. + UNSPEC_CMASK8))]
  23203. + "TARGET_VIS3"
  23204. + "cmask8\t%r0"
  23205. + [(set_attr "type" "fga")
  23206. + (set_attr "subtype" "cmask")])
  23207. +
  23208. +(define_insn "cmask16<P:mode>_vis"
  23209. + [(set (reg:DI GSR_REG)
  23210. + (unspec:DI [(match_operand:P 0 "register_or_zero_operand" "rJ")
  23211. + (reg:DI GSR_REG)]
  23212. + UNSPEC_CMASK16))]
  23213. + "TARGET_VIS3"
  23214. + "cmask16\t%r0"
  23215. + [(set_attr "type" "fga")
  23216. + (set_attr "subtype" "cmask")])
  23217. +
  23218. +(define_insn "cmask32<P:mode>_vis"
  23219. + [(set (reg:DI GSR_REG)
  23220. + (unspec:DI [(match_operand:P 0 "register_or_zero_operand" "rJ")
  23221. + (reg:DI GSR_REG)]
  23222. + UNSPEC_CMASK32))]
  23223. + "TARGET_VIS3"
  23224. + "cmask32\t%r0"
  23225. + [(set_attr "type" "fga")
  23226. + (set_attr "subtype" "cmask")])
  23227. +
  23228. +(define_insn "fchksm16_vis"
  23229. + [(set (match_operand:V4HI 0 "register_operand" "=e")
  23230. + (unspec:V4HI [(match_operand:V4HI 1 "register_operand" "e")
  23231. + (match_operand:V4HI 2 "register_operand" "e")]
  23232. + UNSPEC_FCHKSM16))]
  23233. + "TARGET_VIS3"
  23234. + "fchksm16\t%1, %2, %0"
  23235. + [(set_attr "type" "fga")
  23236. + (set_attr "subtype" "fpu")])
  23237. +
  23238. +(define_code_iterator vis3_shift [ashift ss_ashift lshiftrt ashiftrt])
  23239. +(define_code_attr vis3_shift_insn
  23240. + [(ashift "fsll") (ss_ashift "fslas") (lshiftrt "fsrl") (ashiftrt "fsra")])
  23241. +(define_code_attr vis3_shift_patname
  23242. + [(ashift "ashl") (ss_ashift "ssashl") (lshiftrt "lshr") (ashiftrt "ashr")])
  23243. +
  23244. +(define_insn "v<vis3_shift_patname><GCM:mode>3"
  23245. + [(set (match_operand:GCM 0 "register_operand" "=<vconstr>")
  23246. + (vis3_shift:GCM (match_operand:GCM 1 "register_operand" "<vconstr>")
  23247. + (match_operand:GCM 2 "register_operand" "<vconstr>")))]
  23248. + "TARGET_VIS3"
  23249. + "<vis3_shift_insn><vbits>\t%1, %2, %0"
  23250. + [(set_attr "type" "fga")
  23251. + (set_attr "subtype" "fpu")])
  23252. +
  23253. +(define_insn "pdistn<P:mode>_vis"
  23254. + [(set (match_operand:P 0 "register_operand" "=r")
  23255. + (unspec:P [(match_operand:V8QI 1 "register_operand" "e")
  23256. + (match_operand:V8QI 2 "register_operand" "e")]
  23257. + UNSPEC_PDISTN))]
  23258. + "TARGET_VIS3"
  23259. + "pdistn\t%1, %2, %0"
  23260. + [(set_attr "type" "pdistn")
  23261. + (set_attr "fptype" "double")])
  23262. +
  23263. +(define_insn "fmean16_vis"
  23264. + [(set (match_operand:V4HI 0 "register_operand" "=e")
  23265. + (truncate:V4HI
  23266. + (lshiftrt:V4SI
  23267. + (plus:V4SI
  23268. + (plus:V4SI
  23269. + (zero_extend:V4SI
  23270. + (match_operand:V4HI 1 "register_operand" "e"))
  23271. + (zero_extend:V4SI
  23272. + (match_operand:V4HI 2 "register_operand" "e")))
  23273. + (const_vector:V4SI [(const_int 1) (const_int 1)
  23274. + (const_int 1) (const_int 1)]))
  23275. + (const_int 1))))]
  23276. + "TARGET_VIS3"
  23277. + "fmean16\t%1, %2, %0"
  23278. + [(set_attr "type" "fga")
  23279. + (set_attr "subtype" "fpu")])
  23280. +
  23281. +(define_insn "fp<plusminus_insn>64_vis"
  23282. + [(set (match_operand:V1DI 0 "register_operand" "=e")
  23283. + (plusminus:V1DI (match_operand:V1DI 1 "register_operand" "e")
  23284. + (match_operand:V1DI 2 "register_operand" "e")))]
  23285. + "TARGET_VIS3"
  23286. + "fp<plusminus_insn>64\t%1, %2, %0"
  23287. + [(set_attr "type" "fga")
  23288. + (set_attr "subtype" "addsub64")])
  23289. +
  23290. +(define_insn "<plusminus_insn>v8qi3"
  23291. + [(set (match_operand:V8QI 0 "register_operand" "=e")
  23292. + (plusminus:V8QI (match_operand:V8QI 1 "register_operand" "e")
  23293. + (match_operand:V8QI 2 "register_operand" "e")))]
  23294. + "TARGET_VIS4"
  23295. + "fp<plusminus_insn>8\t%1, %2, %0"
  23296. + [(set_attr "type" "fga")
  23297. + (set_attr "subtype" "other")])
  23298. +
  23299. +(define_mode_iterator VASS [V4HI V2SI V2HI V1SI])
  23300. +(define_code_iterator vis3_addsub_ss [ss_plus ss_minus])
  23301. +(define_code_attr vis3_addsub_ss_insn
  23302. + [(ss_plus "fpadds") (ss_minus "fpsubs")])
  23303. +(define_code_attr vis3_addsub_ss_patname
  23304. + [(ss_plus "ssadd") (ss_minus "sssub")])
  23305. +
  23306. +(define_insn "<vis3_addsub_ss_patname><VASS:mode>3"
  23307. + [(set (match_operand:VASS 0 "register_operand" "=<vconstr>")
  23308. + (vis3_addsub_ss:VASS (match_operand:VASS 1 "register_operand" "<vconstr>")
  23309. + (match_operand:VASS 2 "register_operand" "<vconstr>")))]
  23310. + "TARGET_VIS3"
  23311. + "<vis3_addsub_ss_insn><vbits>\t%1, %2, %0"
  23312. + [(set_attr "type" "fga")
  23313. + (set_attr "subtype" "other")])
  23314. +
  23315. +(define_mode_iterator VMMAX [V8QI V4HI V2SI])
  23316. +(define_code_iterator vis4_minmax [smin smax])
  23317. +(define_code_attr vis4_minmax_insn
  23318. + [(smin "fpmin") (smax "fpmax")])
  23319. +(define_code_attr vis4_minmax_patname
  23320. + [(smin "min") (smax "max")])
  23321. +
  23322. +(define_insn "<vis4_minmax_patname><VMMAX:mode>3"
  23323. + [(set (match_operand:VMMAX 0 "register_operand" "=<vconstr>")
  23324. + (vis4_minmax:VMMAX (match_operand:VMMAX 1 "register_operand" "<vconstr>")
  23325. + (match_operand:VMMAX 2 "register_operand" "<vconstr>")))]
  23326. + "TARGET_VIS4"
  23327. + "<vis4_minmax_insn><vbits>\t%1, %2, %0"
  23328. + [(set_attr "type" "fga")
  23329. + (set_attr "subtype" "maxmin")])
  23330. +
  23331. +(define_code_iterator vis4_uminmax [umin umax])
  23332. +(define_code_attr vis4_uminmax_insn
  23333. + [(umin "fpminu") (umax "fpmaxu")])
  23334. +(define_code_attr vis4_uminmax_patname
  23335. + [(umin "minu") (umax "maxu")])
  23336. +
  23337. +(define_insn "<vis4_uminmax_patname><VMMAX:mode>3"
  23338. + [(set (match_operand:VMMAX 0 "register_operand" "=<vconstr>")
  23339. + (vis4_uminmax:VMMAX (match_operand:VMMAX 1 "register_operand" "<vconstr>")
  23340. + (match_operand:VMMAX 2 "register_operand" "<vconstr>")))]
  23341. + "TARGET_VIS4"
  23342. + "<vis4_uminmax_insn><vbits>\t%1, %2, %0"
  23343. + [(set_attr "type" "fga")
  23344. + (set_attr "subtype" "maxmin")])
  23345. +
  23346. +;; The use of vis3_addsub_ss_patname in the VIS4 instruction below is
  23347. +;; intended.
  23348. +(define_insn "<vis3_addsub_ss_patname>v8qi3"
  23349. + [(set (match_operand:V8QI 0 "register_operand" "=e")
  23350. + (vis3_addsub_ss:V8QI (match_operand:V8QI 1 "register_operand" "e")
  23351. + (match_operand:V8QI 2 "register_operand" "e")))]
  23352. + "TARGET_VIS4"
  23353. + "<vis3_addsub_ss_insn>8\t%1, %2, %0"
  23354. + [(set_attr "type" "fga")
  23355. + (set_attr "subtype" "other")])
  23356. +
  23357. +(define_mode_iterator VAUS [V4HI V8QI])
  23358. +(define_code_iterator vis4_addsub_us [us_plus us_minus])
  23359. +(define_code_attr vis4_addsub_us_insn
  23360. + [(us_plus "fpaddus") (us_minus "fpsubus")])
  23361. +(define_code_attr vis4_addsub_us_patname
  23362. + [(us_plus "usadd") (us_minus "ussub")])
  23363. +
  23364. +(define_insn "<vis4_addsub_us_patname><VAUS:mode>3"
  23365. + [(set (match_operand:VAUS 0 "register_operand" "=<vconstr>")
  23366. + (vis4_addsub_us:VAUS (match_operand:VAUS 1 "register_operand" "<vconstr>")
  23367. + (match_operand:VAUS 2 "register_operand" "<vconstr>")))]
  23368. + "TARGET_VIS4"
  23369. + "<vis4_addsub_us_insn><vbits>\t%1, %2, %0"
  23370. + [(set_attr "type" "fga")
  23371. + (set_attr "subtype" "other")])
  23372. +
  23373. +(define_insn "fucmp<gcond:code>8<P:mode>_vis"
  23374. + [(set (match_operand:P 0 "register_operand" "=r")
  23375. + (unspec:P [(gcond:V8QI (match_operand:V8QI 1 "register_operand" "e")
  23376. + (match_operand:V8QI 2 "register_operand" "e"))]
  23377. + UNSPEC_FUCMP))]
  23378. + "TARGET_VIS3"
  23379. + "fucmp<gcond:code>8\t%1, %2, %0"
  23380. + [(set_attr "type" "viscmp")])
  23381. +
  23382. +(define_insn "fpcmpu<gcond:code><GCM:gcm_name><P:mode>_vis"
  23383. + [(set (match_operand:P 0 "register_operand" "=r")
  23384. + (unspec:P [(gcond:GCM (match_operand:GCM 1 "register_operand" "e")
  23385. + (match_operand:GCM 2 "register_operand" "e"))]
  23386. + UNSPEC_FUCMP))]
  23387. + "TARGET_VIS4"
  23388. + "fpcmpu<gcond:code><GCM:gcm_name>\t%1, %2, %0"
  23389. + [(set_attr "type" "viscmp")])
  23390. +
  23391. +(define_insn "*naddsf3"
  23392. + [(set (match_operand:SF 0 "register_operand" "=f")
  23393. + (neg:SF (plus:SF (match_operand:SF 1 "register_operand" "f")
  23394. + (match_operand:SF 2 "register_operand" "f"))))]
  23395. + "TARGET_VIS3"
  23396. + "fnadds\t%1, %2, %0"
  23397. + [(set_attr "type" "fp")])
  23398. +
  23399. +(define_insn "*nadddf3"
  23400. + [(set (match_operand:DF 0 "register_operand" "=e")
  23401. + (neg:DF (plus:DF (match_operand:DF 1 "register_operand" "e")
  23402. + (match_operand:DF 2 "register_operand" "e"))))]
  23403. + "TARGET_VIS3"
  23404. + "fnaddd\t%1, %2, %0"
  23405. + [(set_attr "type" "fp")
  23406. + (set_attr "fptype" "double")])
  23407. +
  23408. +(define_insn "*nmulsf3"
  23409. + [(set (match_operand:SF 0 "register_operand" "=f")
  23410. + (mult:SF (neg:SF (match_operand:SF 1 "register_operand" "f"))
  23411. + (match_operand:SF 2 "register_operand" "f")))]
  23412. + "TARGET_VIS3"
  23413. + "fnmuls\t%1, %2, %0"
  23414. + [(set_attr "type" "fpmul")])
  23415. +
  23416. +(define_insn "*nmuldf3"
  23417. + [(set (match_operand:DF 0 "register_operand" "=e")
  23418. + (mult:DF (neg:DF (match_operand:DF 1 "register_operand" "e"))
  23419. + (match_operand:DF 2 "register_operand" "e")))]
  23420. + "TARGET_VIS3"
  23421. + "fnmuld\t%1, %2, %0"
  23422. + [(set_attr "type" "fpmul")
  23423. + (set_attr "fptype" "double")])
  23424. +
  23425. +(define_insn "*nmuldf3_extend"
  23426. + [(set (match_operand:DF 0 "register_operand" "=e")
  23427. + (mult:DF (neg:DF (float_extend:DF
  23428. + (match_operand:SF 1 "register_operand" "f")))
  23429. + (float_extend:DF
  23430. + (match_operand:SF 2 "register_operand" "f"))))]
  23431. + "TARGET_VIS3"
  23432. + "fnsmuld\t%1, %2, %0"
  23433. + [(set_attr "type" "fpmul")
  23434. + (set_attr "fptype" "double")])
  23435. +
  23436. +(define_insn "fhaddsf_vis"
  23437. + [(set (match_operand:SF 0 "register_operand" "=f")
  23438. + (unspec:SF [(match_operand:SF 1 "register_operand" "f")
  23439. + (match_operand:SF 2 "register_operand" "f")]
  23440. + UNSPEC_FHADD))]
  23441. + "TARGET_VIS3"
  23442. + "fhadds\t%1, %2, %0"
  23443. + [(set_attr "type" "fp")])
  23444. +
  23445. +(define_insn "fhadddf_vis"
  23446. + [(set (match_operand:DF 0 "register_operand" "=f")
  23447. + (unspec:DF [(match_operand:DF 1 "register_operand" "f")
  23448. + (match_operand:DF 2 "register_operand" "f")]
  23449. + UNSPEC_FHADD))]
  23450. + "TARGET_VIS3"
  23451. + "fhaddd\t%1, %2, %0"
  23452. + [(set_attr "type" "fp")
  23453. + (set_attr "fptype" "double")])
  23454. +
  23455. +(define_insn "fhsubsf_vis"
  23456. + [(set (match_operand:SF 0 "register_operand" "=f")
  23457. + (unspec:SF [(match_operand:SF 1 "register_operand" "f")
  23458. + (match_operand:SF 2 "register_operand" "f")]
  23459. + UNSPEC_FHSUB))]
  23460. + "TARGET_VIS3"
  23461. + "fhsubs\t%1, %2, %0"
  23462. + [(set_attr "type" "fp")])
  23463. +
  23464. +(define_insn "fhsubdf_vis"
  23465. + [(set (match_operand:DF 0 "register_operand" "=f")
  23466. + (unspec:DF [(match_operand:DF 1 "register_operand" "f")
  23467. + (match_operand:DF 2 "register_operand" "f")]
  23468. + UNSPEC_FHSUB))]
  23469. + "TARGET_VIS3"
  23470. + "fhsubd\t%1, %2, %0"
  23471. + [(set_attr "type" "fp")
  23472. + (set_attr "fptype" "double")])
  23473. +
  23474. +(define_insn "fnhaddsf_vis"
  23475. + [(set (match_operand:SF 0 "register_operand" "=f")
  23476. + (neg:SF (unspec:SF [(match_operand:SF 1 "register_operand" "f")
  23477. + (match_operand:SF 2 "register_operand" "f")]
  23478. + UNSPEC_FHADD)))]
  23479. + "TARGET_VIS3"
  23480. + "fnhadds\t%1, %2, %0"
  23481. + [(set_attr "type" "fp")])
  23482. +
  23483. +(define_insn "fnhadddf_vis"
  23484. + [(set (match_operand:DF 0 "register_operand" "=f")
  23485. + (neg:DF (unspec:DF [(match_operand:DF 1 "register_operand" "f")
  23486. + (match_operand:DF 2 "register_operand" "f")]
  23487. + UNSPEC_FHADD)))]
  23488. + "TARGET_VIS3"
  23489. + "fnhaddd\t%1, %2, %0"
  23490. + [(set_attr "type" "fp")
  23491. + (set_attr "fptype" "double")])
  23492. +
  23493. +;; VIS4B instructions.
  23494. +
  23495. +(define_mode_iterator DUMODE [V2SI V4HI V8QI])
  23496. +
  23497. +(define_insn "dictunpack<DUMODE:vbits>"
  23498. + [(set (match_operand:DUMODE 0 "register_operand" "=e")
  23499. + (unspec:DUMODE [(match_operand:DF 1 "register_operand" "e")
  23500. + (match_operand:SI 2 "imm5_operand_dictunpack<DUMODE:vbits>" "t")]
  23501. + UNSPEC_DICTUNPACK))]
  23502. + "TARGET_VIS4B"
  23503. + "dictunpack\t%1, %2, %0"
  23504. + [(set_attr "type" "fga")
  23505. + (set_attr "subtype" "other")])
  23506. +
  23507. +(define_mode_iterator FPCSMODE [V2SI V4HI V8QI])
  23508. +(define_code_iterator fpcscond [le gt eq ne])
  23509. +(define_code_iterator fpcsucond [le gt])
  23510. +
  23511. +(define_insn "fpcmp<fpcscond:code><FPCSMODE:vbits><P:mode>shl"
  23512. + [(set (match_operand:P 0 "register_operand" "=r")
  23513. + (unspec:P [(fpcscond:FPCSMODE (match_operand:FPCSMODE 1 "register_operand" "e")
  23514. + (match_operand:FPCSMODE 2 "register_operand" "e"))
  23515. + (match_operand:SI 3 "imm2_operand" "q")]
  23516. + UNSPEC_FPCMPSHL))]
  23517. + "TARGET_VIS4B"
  23518. + "fpcmp<fpcscond:code><FPCSMODE:vbits>shl\t%1, %2, %3, %0"
  23519. + [(set_attr "type" "viscmp")])
  23520. +
  23521. +(define_insn "fpcmpu<fpcsucond:code><FPCSMODE:vbits><P:mode>shl"
  23522. + [(set (match_operand:P 0 "register_operand" "=r")
  23523. + (unspec:P [(fpcsucond:FPCSMODE (match_operand:FPCSMODE 1 "register_operand" "e")
  23524. + (match_operand:FPCSMODE 2 "register_operand" "e"))
  23525. + (match_operand:SI 3 "imm2_operand" "q")]
  23526. + UNSPEC_FPUCMPSHL))]
  23527. + "TARGET_VIS4B"
  23528. + "fpcmpu<fpcsucond:code><FPCSMODE:vbits>shl\t%1, %2, %3, %0"
  23529. + [(set_attr "type" "viscmp")])
  23530. +
  23531. +(define_insn "fpcmpde<FPCSMODE:vbits><P:mode>shl"
  23532. + [(set (match_operand:P 0 "register_operand" "=r")
  23533. + (unspec:P [(match_operand:FPCSMODE 1 "register_operand" "e")
  23534. + (match_operand:FPCSMODE 2 "register_operand" "e")
  23535. + (match_operand:SI 3 "imm2_operand" "q")]
  23536. + UNSPEC_FPCMPDESHL))]
  23537. + "TARGET_VIS4B"
  23538. + "fpcmpde<FPCSMODE:vbits>shl\t%1, %2, %3, %0"
  23539. + [(set_attr "type" "viscmp")])
  23540. +
  23541. +(define_insn "fpcmpur<FPCSMODE:vbits><P:mode>shl"
  23542. + [(set (match_operand:P 0 "register_operand" "=r")
  23543. + (unspec:P [(match_operand:FPCSMODE 1 "register_operand" "e")
  23544. + (match_operand:FPCSMODE 2 "register_operand" "e")
  23545. + (match_operand:SI 3 "imm2_operand" "q")]
  23546. + UNSPEC_FPCMPURSHL))]
  23547. + "TARGET_VIS4B"
  23548. + "fpcmpur<FPCSMODE:vbits>shl\t%1, %2, %3, %0"
  23549. + [(set_attr "type" "viscmp")])
  23550. +
  23551. +(include "sync.md")
  23552. diff -Nur gcc-10.3.0.orig/gcc/config/sparc/sparc-protos.h gcc-10.3.0/gcc/config/sparc/sparc-protos.h
  23553. --- gcc-10.3.0.orig/gcc/config/sparc/sparc-protos.h 2021-04-08 13:56:28.201742273 +0200
  23554. +++ gcc-10.3.0/gcc/config/sparc/sparc-protos.h 2021-04-09 07:51:37.812496739 +0200
  23555. @@ -69,7 +69,6 @@
  23556. extern void sparc_split_mem_reg (rtx, rtx, machine_mode);
  23557. extern int sparc_split_reg_reg_legitimate (rtx, rtx);
  23558. extern void sparc_split_reg_reg (rtx, rtx, machine_mode);
  23559. -extern const char *output_load_pcrel_sym (rtx *);
  23560. extern const char *output_ubranch (rtx, rtx_insn *);
  23561. extern const char *output_cbranch (rtx, rtx, int, int, int, rtx_insn *);
  23562. extern const char *output_return (rtx_insn *);
  23563. diff -Nur gcc-10.3.0.orig/gcc/testsuite/gcc.c-torture/compile/20191108-1.c gcc-10.3.0/gcc/testsuite/gcc.c-torture/compile/20191108-1.c
  23564. --- gcc-10.3.0.orig/gcc/testsuite/gcc.c-torture/compile/20191108-1.c 2021-04-08 13:56:28.929751064 +0200
  23565. +++ gcc-10.3.0/gcc/testsuite/gcc.c-torture/compile/20191108-1.c 1970-01-01 01:00:00.000000000 +0100
  23566. @@ -1,14 +0,0 @@
  23567. -/* PR target/92095 */
  23568. -/* Testcase by Sergei Trofimovich <slyfox@inbox.ru> */
  23569. -
  23570. -typedef union {
  23571. - double a;
  23572. - int b[2];
  23573. -} c;
  23574. -
  23575. -double d(int e)
  23576. -{
  23577. - c f;
  23578. - (&f)->b[0] = 15728640;
  23579. - return e ? -(&f)->a : (&f)->a;
  23580. -}
  23581. diff -Nur gcc-10.3.0.orig/gcc/testsuite/gcc.target/sparc/overflow-3.c gcc-10.3.0/gcc/testsuite/gcc.target/sparc/overflow-3.c
  23582. --- gcc-10.3.0.orig/gcc/testsuite/gcc.target/sparc/overflow-3.c 2021-04-08 13:56:29.453757389 +0200
  23583. +++ gcc-10.3.0/gcc/testsuite/gcc.target/sparc/overflow-3.c 2021-04-09 07:51:37.988507907 +0200
  23584. @@ -1,6 +1,6 @@
  23585. /* { dg-do compile } */
  23586. /* { dg-require-effective-target lp64 } */
  23587. -/* { dg-options "-O -fno-pie" } */
  23588. +/* { dg-options "-O" } */
  23589. #include <stdbool.h>
  23590. #include <stdint.h>
  23591. diff -Nur gcc-10.3.0.orig/gcc/testsuite/gcc.target/sparc/overflow-4.c gcc-10.3.0/gcc/testsuite/gcc.target/sparc/overflow-4.c
  23592. --- gcc-10.3.0.orig/gcc/testsuite/gcc.target/sparc/overflow-4.c 2021-04-08 13:56:29.453757389 +0200
  23593. +++ gcc-10.3.0/gcc/testsuite/gcc.target/sparc/overflow-4.c 2021-04-09 07:51:37.988507907 +0200
  23594. @@ -1,6 +1,6 @@
  23595. /* { dg-do compile } */
  23596. /* { dg-require-effective-target lp64 } */
  23597. -/* { dg-options "-O -fno-pie -mno-vis3 -mno-vis4" } */
  23598. +/* { dg-options "-O -mno-vis3 -mno-vis4" } */
  23599. #include <stdbool.h>
  23600. #include <stdint.h>
  23601. diff -Nur gcc-10.3.0.orig/gcc/testsuite/gcc.target/sparc/overflow-5.c gcc-10.3.0/gcc/testsuite/gcc.target/sparc/overflow-5.c
  23602. --- gcc-10.3.0.orig/gcc/testsuite/gcc.target/sparc/overflow-5.c 2021-04-08 13:56:29.453757389 +0200
  23603. +++ gcc-10.3.0/gcc/testsuite/gcc.target/sparc/overflow-5.c 2021-04-09 07:51:37.992508161 +0200
  23604. @@ -1,6 +1,6 @@
  23605. /* { dg-do compile } */
  23606. /* { dg-require-effective-target lp64 } */
  23607. -/* { dg-options "-O -fno-pie -mvis3" } */
  23608. +/* { dg-options "-O -mvis3" } */
  23609. #include <stdbool.h>
  23610. #include <stdint.h>