solidrun-imx6-wlan.patch 85 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252
  1. diff -Nur linux-3.18.8.orig/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi linux-3.18.8/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
  2. --- linux-3.18.8.orig/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi 2015-02-27 02:49:36.000000000 +0100
  3. +++ linux-3.18.8/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi 2015-03-02 03:23:14.000000000 +0100
  4. @@ -170,6 +170,28 @@
  5. MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x13059
  6. >;
  7. };
  8. +
  9. + pinctrl_cubox_i_usdhc2_100mhz: cubox-i-usdhc2-100mhz {
  10. + fsl,pins = <
  11. + MX6QDL_PAD_SD2_CMD__SD2_CMD 0x170b9
  12. + MX6QDL_PAD_SD2_CLK__SD2_CLK 0x100b9
  13. + MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x170b9
  14. + MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x170b9
  15. + MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x170b9
  16. + MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x130b9
  17. + >;
  18. + };
  19. +
  20. + pinctrl_cubox_i_usdhc2_200mhz: cubox-i-usdhc2-200mhz {
  21. + fsl,pins = <
  22. + MX6QDL_PAD_SD2_CMD__SD2_CMD 0x170f9
  23. + MX6QDL_PAD_SD2_CLK__SD2_CLK 0x100f9
  24. + MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x170f9
  25. + MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x170f9
  26. + MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x170f9
  27. + MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x130f9
  28. + >;
  29. + };
  30. };
  31. };
  32. @@ -194,8 +216,10 @@
  33. };
  34. &usdhc2 {
  35. - pinctrl-names = "default";
  36. + pinctrl-names = "default", "state_100mhz", "state_200mhz";
  37. pinctrl-0 = <&pinctrl_cubox_i_usdhc2_aux &pinctrl_cubox_i_usdhc2>;
  38. + pinctrl-1 = <&pinctrl_cubox_i_usdhc2_aux &pinctrl_cubox_i_usdhc2_100mhz>;
  39. + pinctrl-2 = <&pinctrl_cubox_i_usdhc2_aux &pinctrl_cubox_i_usdhc2_200mhz>;
  40. vmmc-supply = <&reg_3p3v>;
  41. cd-gpios = <&gpio1 4 0>;
  42. status = "okay";
  43. diff -Nur linux-3.18.8.orig/arch/arm/boot/dts/imx6qdl-microsom.dtsi linux-3.18.8/arch/arm/boot/dts/imx6qdl-microsom.dtsi
  44. --- linux-3.18.8.orig/arch/arm/boot/dts/imx6qdl-microsom.dtsi 2015-02-27 02:49:36.000000000 +0100
  45. +++ linux-3.18.8/arch/arm/boot/dts/imx6qdl-microsom.dtsi 2015-03-02 02:58:12.000000000 +0100
  46. @@ -1,15 +1,95 @@
  47. /*
  48. * Copyright (C) 2013,2014 Russell King
  49. */
  50. +#include <dt-bindings/gpio/gpio.h>
  51. +/ {
  52. + regulators {
  53. + compatible = "simple-bus";
  54. +
  55. + reg_brcm_osc: brcm-osc-reg {
  56. + compatible = "regulator-fixed";
  57. + enable-active-high;
  58. + gpio = <&gpio5 5 0>;
  59. + pinctrl-names = "default";
  60. + pinctrl-0 = <&pinctrl_microsom_brcm_osc_reg>;
  61. + regulator-name = "brcm_osc_reg";
  62. + regulator-min-microvolt = <3300000>;
  63. + regulator-max-microvolt = <3300000>;
  64. + regulator-always-on;
  65. + regulator-boot-on;
  66. + };
  67. +
  68. + reg_brcm: brcm-reg {
  69. + compatible = "regulator-fixed";
  70. + enable-active-high;
  71. + gpio = <&gpio3 19 0>;
  72. + pinctrl-names = "default";
  73. + pinctrl-0 = <&pinctrl_microsom_brcm_reg>;
  74. + regulator-name = "brcm_reg";
  75. + regulator-min-microvolt = <3300000>;
  76. + regulator-max-microvolt = <3300000>;
  77. + startup-delay-us = <200000>;
  78. + };
  79. + };
  80. +};
  81. &iomuxc {
  82. microsom {
  83. + pinctrl_microsom_brcm_bt: microsom-brcm-bt {
  84. + fsl,pins = <
  85. + MX6QDL_PAD_CSI0_DAT14__GPIO6_IO00 0x40013070
  86. + MX6QDL_PAD_CSI0_DAT15__GPIO6_IO01 0x40013070
  87. + MX6QDL_PAD_CSI0_DAT18__GPIO6_IO04 0x40013070
  88. + >;
  89. + };
  90. +
  91. + pinctrl_microsom_brcm_osc_reg: microsom-brcm-osc-reg {
  92. + fsl,pins = <
  93. + MX6QDL_PAD_DISP0_DAT11__GPIO5_IO05 0x40013070
  94. + >;
  95. + };
  96. +
  97. + pinctrl_microsom_brcm_reg: microsom-brcm-reg {
  98. + fsl,pins = <
  99. + MX6QDL_PAD_EIM_D19__GPIO3_IO19 0x40013070
  100. + >;
  101. + };
  102. +
  103. + pinctrl_microsom_brcm_wifi: microsom-brcm-wifi {
  104. + fsl,pins = <
  105. + MX6QDL_PAD_GPIO_8__XTALOSC_REF_CLK_32K 0x1b0b0
  106. + MX6QDL_PAD_CSI0_DATA_EN__GPIO5_IO20 0x40013070
  107. + MX6QDL_PAD_CSI0_DAT8__GPIO5_IO26 0x40013070
  108. + MX6QDL_PAD_CSI0_DAT9__GPIO5_IO27 0x40013070
  109. + >;
  110. + };
  111. +
  112. pinctrl_microsom_uart1: microsom-uart1 {
  113. fsl,pins = <
  114. MX6QDL_PAD_CSI0_DAT10__UART1_TX_DATA 0x1b0b1
  115. MX6QDL_PAD_CSI0_DAT11__UART1_RX_DATA 0x1b0b1
  116. >;
  117. };
  118. +
  119. + pinctrl_microsom_uart4_1: microsom-uart4 {
  120. + fsl,pins = <
  121. + MX6QDL_PAD_CSI0_DAT12__UART4_TX_DATA 0x1b0b1
  122. + MX6QDL_PAD_CSI0_DAT13__UART4_RX_DATA 0x1b0b1
  123. + MX6QDL_PAD_CSI0_DAT16__UART4_RTS_B 0x1b0b1
  124. + MX6QDL_PAD_CSI0_DAT17__UART4_CTS_B 0x1b0b1
  125. + >;
  126. + };
  127. +
  128. + pinctrl_microsom_usdhc1: microsom-usdhc1 {
  129. + fsl,pins = <
  130. + MX6QDL_PAD_SD1_CMD__SD1_CMD 0x17059
  131. + MX6QDL_PAD_SD1_CLK__SD1_CLK 0x10059
  132. + MX6QDL_PAD_SD1_DAT0__SD1_DATA0 0x17059
  133. + MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x17059
  134. + MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x17059
  135. + MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x17059
  136. + >;
  137. + };
  138. };
  139. };
  140. @@ -18,3 +98,23 @@
  141. pinctrl-0 = <&pinctrl_microsom_uart1>;
  142. status = "okay";
  143. };
  144. +
  145. +/* UART4 - Connected to optional BRCM Wifi/BT/FM */
  146. +&uart4 {
  147. + pinctrl-names = "default";
  148. + pinctrl-0 = <&pinctrl_microsom_brcm_bt &pinctrl_microsom_uart4_1>;
  149. + fsl,uart-has-rtscts;
  150. + status = "okay";
  151. +};
  152. +
  153. +/* USDHC1 - Connected to optional BRCM Wifi/BT/FM */
  154. +&usdhc1 {
  155. + card-external-vcc-supply = <&reg_brcm>;
  156. + card-reset-gpios = <&gpio5 26 GPIO_ACTIVE_LOW>, <&gpio6 0 GPIO_ACTIVE_LOW>;
  157. + keep-power-in-suspend;
  158. + non-removable;
  159. + pinctrl-names = "default";
  160. + pinctrl-0 = <&pinctrl_microsom_brcm_wifi &pinctrl_microsom_usdhc1>;
  161. + vmmc-supply = <&reg_brcm>;
  162. + status = "okay";
  163. +};
  164. diff -Nur linux-3.18.8.orig/Documentation/devicetree/bindings/mmc/mmc.txt linux-3.18.8/Documentation/devicetree/bindings/mmc/mmc.txt
  165. --- linux-3.18.8.orig/Documentation/devicetree/bindings/mmc/mmc.txt 2015-02-27 02:49:36.000000000 +0100
  166. +++ linux-3.18.8/Documentation/devicetree/bindings/mmc/mmc.txt 2015-03-02 03:25:33.000000000 +0100
  167. @@ -5,6 +5,8 @@
  168. Interpreted by the OF core:
  169. - reg: Registers location and length.
  170. - interrupts: Interrupts used by the MMC controller.
  171. +- clocks: Clocks needed for the host controller, if any.
  172. +- clock-names: Goes with clocks above.
  173. Card detection:
  174. If no property below is supplied, host native card detect is used.
  175. @@ -43,6 +45,15 @@
  176. - dsr: Value the card's (optional) Driver Stage Register (DSR) should be
  177. programmed with. Valid range: [0 .. 0xffff].
  178. +Card power and reset control:
  179. +The following properties can be specified for cases where the MMC
  180. +peripheral needs additional reset, regulator and clock lines. It is for
  181. +example common for WiFi/BT adapters to have these separate from the main
  182. +MMC bus:
  183. + - card-reset-gpios: Specify GPIOs for card reset (reset active low)
  184. + - card-external-vcc-supply: Regulator to drive (independent) card VCC
  185. + - clock with name "card_ext_clock": External clock provided to the card
  186. +
  187. *NOTE* on CD and WP polarity. To use common for all SD/MMC host controllers line
  188. polarity properties, we have to fix the meaning of the "normal" and "inverted"
  189. line levels. We choose to follow the SDHCI standard, which specifies both those
  190. diff -Nur linux-3.18.8.orig/drivers/mmc/core/core.c linux-3.18.8/drivers/mmc/core/core.c
  191. --- linux-3.18.8.orig/drivers/mmc/core/core.c 2015-02-27 02:49:36.000000000 +0100
  192. +++ linux-3.18.8/drivers/mmc/core/core.c 2015-03-02 03:25:33.000000000 +0100
  193. @@ -13,11 +13,13 @@
  194. #include <linux/module.h>
  195. #include <linux/init.h>
  196. #include <linux/interrupt.h>
  197. +#include <linux/clk.h>
  198. #include <linux/completion.h>
  199. #include <linux/device.h>
  200. #include <linux/delay.h>
  201. #include <linux/pagemap.h>
  202. #include <linux/err.h>
  203. +#include <linux/gpio/consumer.h>
  204. #include <linux/leds.h>
  205. #include <linux/scatterlist.h>
  206. #include <linux/log2.h>
  207. @@ -1507,6 +1509,43 @@
  208. mmc_host_clk_release(host);
  209. }
  210. +static void mmc_card_power_up(struct mmc_host *host)
  211. +{
  212. + int i;
  213. + struct gpio_desc **gds = host->card_reset_gpios;
  214. +
  215. + for (i = 0; i < ARRAY_SIZE(host->card_reset_gpios); i++) {
  216. + if (gds[i]) {
  217. + dev_dbg(host->parent, "Asserting reset line %d", i);
  218. + gpiod_set_value(gds[i], 1);
  219. + }
  220. + }
  221. +
  222. + if (host->card_regulator) {
  223. + dev_dbg(host->parent, "Enabling external regulator");
  224. + if (regulator_enable(host->card_regulator))
  225. + dev_err(host->parent, "Failed to enable external regulator");
  226. + }
  227. +
  228. + if (host->card_clk) {
  229. + dev_dbg(host->parent, "Enabling external clock");
  230. + clk_prepare_enable(host->card_clk);
  231. + }
  232. +
  233. + /* 2ms delay to let clocks and power settle */
  234. + mmc_delay(20);
  235. +
  236. + for (i = 0; i < ARRAY_SIZE(host->card_reset_gpios); i++) {
  237. + if (gds[i]) {
  238. + dev_dbg(host->parent, "Deasserting reset line %d", i);
  239. + gpiod_set_value(gds[i], 0);
  240. + }
  241. + }
  242. +
  243. + /* 2ms delay to after reset release */
  244. + mmc_delay(20);
  245. +}
  246. +
  247. /*
  248. * Apply power to the MMC stack. This is a two-stage process.
  249. * First, we enable power to the card without the clock running.
  250. @@ -1523,6 +1562,9 @@
  251. if (host->ios.power_mode == MMC_POWER_ON)
  252. return;
  253. + /* Power up the card/module first, if needed */
  254. + mmc_card_power_up(host);
  255. +
  256. mmc_host_clk_hold(host);
  257. host->ios.vdd = fls(ocr) - 1;
  258. diff -Nur linux-3.18.8.orig/drivers/mmc/core/host.c linux-3.18.8/drivers/mmc/core/host.c
  259. --- linux-3.18.8.orig/drivers/mmc/core/host.c 2015-02-27 02:49:36.000000000 +0100
  260. +++ linux-3.18.8/drivers/mmc/core/host.c 2015-03-02 03:26:23.000000000 +0100
  261. @@ -12,14 +12,18 @@
  262. * MMC host class device management
  263. */
  264. +#include <linux/kernel.h>
  265. +#include <linux/clk.h>
  266. #include <linux/device.h>
  267. #include <linux/err.h>
  268. +#include <linux/gpio/consumer.h>
  269. #include <linux/idr.h>
  270. #include <linux/of.h>
  271. #include <linux/of_gpio.h>
  272. #include <linux/pagemap.h>
  273. #include <linux/export.h>
  274. #include <linux/leds.h>
  275. +#include <linux/regulator/consumer.h>
  276. #include <linux/slab.h>
  277. #include <linux/suspend.h>
  278. @@ -466,6 +470,66 @@
  279. EXPORT_SYMBOL(mmc_of_parse);
  280. +static int mmc_of_parse_child(struct mmc_host *host)
  281. +{
  282. + struct device_node *np;
  283. + struct clk *clk;
  284. + int i;
  285. +
  286. + if (!host->parent || !host->parent->of_node)
  287. + return 0;
  288. +
  289. + np = host->parent->of_node;
  290. +
  291. + host->card_regulator = regulator_get(host->parent, "card-external-vcc");
  292. + if (IS_ERR(host->card_regulator)) {
  293. + if (PTR_ERR(host->card_regulator) == -EPROBE_DEFER)
  294. + return PTR_ERR(host->card_regulator);
  295. + host->card_regulator = NULL;
  296. + }
  297. +
  298. + /* Parse card power/reset/clock control */
  299. + if (of_find_property(np, "card-reset-gpios", NULL)) {
  300. + struct gpio_desc *gpd;
  301. + int level = 0;
  302. +
  303. + /*
  304. + * If the regulator is enabled, then we can hold the
  305. + * card in reset with an active high resets. Otherwise,
  306. + * hold the resets low.
  307. + */
  308. + if (host->card_regulator && regulator_is_enabled(host->card_regulator))
  309. + level = 1;
  310. +
  311. + for (i = 0; i < ARRAY_SIZE(host->card_reset_gpios); i++) {
  312. + gpd = devm_gpiod_get_index(host->parent, "card-reset", i);
  313. + if (IS_ERR(gpd)) {
  314. + if (PTR_ERR(gpd) == -EPROBE_DEFER)
  315. + return PTR_ERR(gpd);
  316. + break;
  317. + }
  318. + gpiod_direction_output(gpd, gpiod_is_active_low(gpd) | level);
  319. + host->card_reset_gpios[i] = gpd;
  320. + }
  321. +
  322. + gpd = devm_gpiod_get_index(host->parent, "card-reset", ARRAY_SIZE(host->card_reset_gpios));
  323. + if (!IS_ERR(gpd)) {
  324. + dev_warn(host->parent, "More reset gpios than we can handle");
  325. + gpiod_put(gpd);
  326. + }
  327. + }
  328. +
  329. + clk = of_clk_get_by_name(np, "card_ext_clock");
  330. + if (IS_ERR(clk)) {
  331. + if (PTR_ERR(clk) == -EPROBE_DEFER)
  332. + return PTR_ERR(clk);
  333. + clk = NULL;
  334. + }
  335. + host->card_clk = clk;
  336. +
  337. + return 0;
  338. +}
  339. +
  340. /**
  341. * mmc_alloc_host - initialise the per-host structure.
  342. * @extra: sizeof private data structure
  343. @@ -545,6 +609,10 @@
  344. {
  345. int err;
  346. + err = mmc_of_parse_child(host);
  347. + if (err)
  348. + return err;
  349. +
  350. WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) &&
  351. !host->ops->enable_sdio_irq);
  352. diff -Nur linux-3.18.8.orig/drivers/mmc/host/dw_mmc.c linux-3.18.8/drivers/mmc/host/dw_mmc.c
  353. --- linux-3.18.8.orig/drivers/mmc/host/dw_mmc.c 2015-02-27 02:49:36.000000000 +0100
  354. +++ linux-3.18.8/drivers/mmc/host/dw_mmc.c 2015-03-02 03:25:56.000000000 +0100
  355. @@ -2211,6 +2211,8 @@
  356. if (!mmc)
  357. return -ENOMEM;
  358. + mmc_of_parse(mmc);
  359. +
  360. slot = mmc_priv(mmc);
  361. slot->id = id;
  362. slot->mmc = mmc;
  363. diff -Nur linux-3.18.8.orig/drivers/mmc/host/dw_mmc.c.orig linux-3.18.8/drivers/mmc/host/dw_mmc.c.orig
  364. --- linux-3.18.8.orig/drivers/mmc/host/dw_mmc.c.orig 1970-01-01 01:00:00.000000000 +0100
  365. +++ linux-3.18.8/drivers/mmc/host/dw_mmc.c.orig 2015-02-27 02:49:36.000000000 +0100
  366. @@ -0,0 +1,2855 @@
  367. +/*
  368. + * Synopsys DesignWare Multimedia Card Interface driver
  369. + * (Based on NXP driver for lpc 31xx)
  370. + *
  371. + * Copyright (C) 2009 NXP Semiconductors
  372. + * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
  373. + *
  374. + * This program is free software; you can redistribute it and/or modify
  375. + * it under the terms of the GNU General Public License as published by
  376. + * the Free Software Foundation; either version 2 of the License, or
  377. + * (at your option) any later version.
  378. + */
  379. +
  380. +#include <linux/blkdev.h>
  381. +#include <linux/clk.h>
  382. +#include <linux/debugfs.h>
  383. +#include <linux/device.h>
  384. +#include <linux/dma-mapping.h>
  385. +#include <linux/err.h>
  386. +#include <linux/init.h>
  387. +#include <linux/interrupt.h>
  388. +#include <linux/ioport.h>
  389. +#include <linux/module.h>
  390. +#include <linux/platform_device.h>
  391. +#include <linux/seq_file.h>
  392. +#include <linux/slab.h>
  393. +#include <linux/stat.h>
  394. +#include <linux/delay.h>
  395. +#include <linux/irq.h>
  396. +#include <linux/mmc/host.h>
  397. +#include <linux/mmc/mmc.h>
  398. +#include <linux/mmc/sd.h>
  399. +#include <linux/mmc/sdio.h>
  400. +#include <linux/mmc/dw_mmc.h>
  401. +#include <linux/bitops.h>
  402. +#include <linux/regulator/consumer.h>
  403. +#include <linux/workqueue.h>
  404. +#include <linux/of.h>
  405. +#include <linux/of_gpio.h>
  406. +#include <linux/mmc/slot-gpio.h>
  407. +
  408. +#include "dw_mmc.h"
  409. +
  410. +/* Common flag combinations */
  411. +#define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
  412. + SDMMC_INT_HTO | SDMMC_INT_SBE | \
  413. + SDMMC_INT_EBE)
  414. +#define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
  415. + SDMMC_INT_RESP_ERR)
  416. +#define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
  417. + DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
  418. +#define DW_MCI_SEND_STATUS 1
  419. +#define DW_MCI_RECV_STATUS 2
  420. +#define DW_MCI_DMA_THRESHOLD 16
  421. +
  422. +#define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
  423. +#define DW_MCI_FREQ_MIN 400000 /* unit: HZ */
  424. +
  425. +#ifdef CONFIG_MMC_DW_IDMAC
  426. +#define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
  427. + SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
  428. + SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
  429. + SDMMC_IDMAC_INT_TI)
  430. +
  431. +struct idmac_desc {
  432. + u32 des0; /* Control Descriptor */
  433. +#define IDMAC_DES0_DIC BIT(1)
  434. +#define IDMAC_DES0_LD BIT(2)
  435. +#define IDMAC_DES0_FD BIT(3)
  436. +#define IDMAC_DES0_CH BIT(4)
  437. +#define IDMAC_DES0_ER BIT(5)
  438. +#define IDMAC_DES0_CES BIT(30)
  439. +#define IDMAC_DES0_OWN BIT(31)
  440. +
  441. + u32 des1; /* Buffer sizes */
  442. +#define IDMAC_SET_BUFFER1_SIZE(d, s) \
  443. + ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
  444. +
  445. + u32 des2; /* buffer 1 physical address */
  446. +
  447. + u32 des3; /* buffer 2 physical address */
  448. +};
  449. +#endif /* CONFIG_MMC_DW_IDMAC */
  450. +
  451. +static bool dw_mci_reset(struct dw_mci *host);
  452. +
  453. +#if defined(CONFIG_DEBUG_FS)
  454. +static int dw_mci_req_show(struct seq_file *s, void *v)
  455. +{
  456. + struct dw_mci_slot *slot = s->private;
  457. + struct mmc_request *mrq;
  458. + struct mmc_command *cmd;
  459. + struct mmc_command *stop;
  460. + struct mmc_data *data;
  461. +
  462. + /* Make sure we get a consistent snapshot */
  463. + spin_lock_bh(&slot->host->lock);
  464. + mrq = slot->mrq;
  465. +
  466. + if (mrq) {
  467. + cmd = mrq->cmd;
  468. + data = mrq->data;
  469. + stop = mrq->stop;
  470. +
  471. + if (cmd)
  472. + seq_printf(s,
  473. + "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
  474. + cmd->opcode, cmd->arg, cmd->flags,
  475. + cmd->resp[0], cmd->resp[1], cmd->resp[2],
  476. + cmd->resp[2], cmd->error);
  477. + if (data)
  478. + seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
  479. + data->bytes_xfered, data->blocks,
  480. + data->blksz, data->flags, data->error);
  481. + if (stop)
  482. + seq_printf(s,
  483. + "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
  484. + stop->opcode, stop->arg, stop->flags,
  485. + stop->resp[0], stop->resp[1], stop->resp[2],
  486. + stop->resp[2], stop->error);
  487. + }
  488. +
  489. + spin_unlock_bh(&slot->host->lock);
  490. +
  491. + return 0;
  492. +}
  493. +
  494. +static int dw_mci_req_open(struct inode *inode, struct file *file)
  495. +{
  496. + return single_open(file, dw_mci_req_show, inode->i_private);
  497. +}
  498. +
  499. +static const struct file_operations dw_mci_req_fops = {
  500. + .owner = THIS_MODULE,
  501. + .open = dw_mci_req_open,
  502. + .read = seq_read,
  503. + .llseek = seq_lseek,
  504. + .release = single_release,
  505. +};
  506. +
  507. +static int dw_mci_regs_show(struct seq_file *s, void *v)
  508. +{
  509. + seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
  510. + seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
  511. + seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
  512. + seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
  513. + seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
  514. + seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
  515. +
  516. + return 0;
  517. +}
  518. +
  519. +static int dw_mci_regs_open(struct inode *inode, struct file *file)
  520. +{
  521. + return single_open(file, dw_mci_regs_show, inode->i_private);
  522. +}
  523. +
  524. +static const struct file_operations dw_mci_regs_fops = {
  525. + .owner = THIS_MODULE,
  526. + .open = dw_mci_regs_open,
  527. + .read = seq_read,
  528. + .llseek = seq_lseek,
  529. + .release = single_release,
  530. +};
  531. +
  532. +static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
  533. +{
  534. + struct mmc_host *mmc = slot->mmc;
  535. + struct dw_mci *host = slot->host;
  536. + struct dentry *root;
  537. + struct dentry *node;
  538. +
  539. + root = mmc->debugfs_root;
  540. + if (!root)
  541. + return;
  542. +
  543. + node = debugfs_create_file("regs", S_IRUSR, root, host,
  544. + &dw_mci_regs_fops);
  545. + if (!node)
  546. + goto err;
  547. +
  548. + node = debugfs_create_file("req", S_IRUSR, root, slot,
  549. + &dw_mci_req_fops);
  550. + if (!node)
  551. + goto err;
  552. +
  553. + node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
  554. + if (!node)
  555. + goto err;
  556. +
  557. + node = debugfs_create_x32("pending_events", S_IRUSR, root,
  558. + (u32 *)&host->pending_events);
  559. + if (!node)
  560. + goto err;
  561. +
  562. + node = debugfs_create_x32("completed_events", S_IRUSR, root,
  563. + (u32 *)&host->completed_events);
  564. + if (!node)
  565. + goto err;
  566. +
  567. + return;
  568. +
  569. +err:
  570. + dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
  571. +}
  572. +#endif /* defined(CONFIG_DEBUG_FS) */
  573. +
  574. +static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg);
  575. +
  576. +static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
  577. +{
  578. + struct mmc_data *data;
  579. + struct dw_mci_slot *slot = mmc_priv(mmc);
  580. + struct dw_mci *host = slot->host;
  581. + const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
  582. + u32 cmdr;
  583. + cmd->error = -EINPROGRESS;
  584. +
  585. + cmdr = cmd->opcode;
  586. +
  587. + if (cmd->opcode == MMC_STOP_TRANSMISSION ||
  588. + cmd->opcode == MMC_GO_IDLE_STATE ||
  589. + cmd->opcode == MMC_GO_INACTIVE_STATE ||
  590. + (cmd->opcode == SD_IO_RW_DIRECT &&
  591. + ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
  592. + cmdr |= SDMMC_CMD_STOP;
  593. + else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
  594. + cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
  595. +
  596. + if (cmd->opcode == SD_SWITCH_VOLTAGE) {
  597. + u32 clk_en_a;
  598. +
  599. + /* Special bit makes CMD11 not die */
  600. + cmdr |= SDMMC_CMD_VOLT_SWITCH;
  601. +
  602. + /* Change state to continue to handle CMD11 weirdness */
  603. + WARN_ON(slot->host->state != STATE_SENDING_CMD);
  604. + slot->host->state = STATE_SENDING_CMD11;
  605. +
  606. + /*
  607. + * We need to disable low power mode (automatic clock stop)
  608. + * while doing voltage switch so we don't confuse the card,
  609. + * since stopping the clock is a specific part of the UHS
  610. + * voltage change dance.
  611. + *
  612. + * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be
  613. + * unconditionally turned back on in dw_mci_setup_bus() if it's
  614. + * ever called with a non-zero clock. That shouldn't happen
  615. + * until the voltage change is all done.
  616. + */
  617. + clk_en_a = mci_readl(host, CLKENA);
  618. + clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id);
  619. + mci_writel(host, CLKENA, clk_en_a);
  620. + mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
  621. + SDMMC_CMD_PRV_DAT_WAIT, 0);
  622. + }
  623. +
  624. + if (cmd->flags & MMC_RSP_PRESENT) {
  625. + /* We expect a response, so set this bit */
  626. + cmdr |= SDMMC_CMD_RESP_EXP;
  627. + if (cmd->flags & MMC_RSP_136)
  628. + cmdr |= SDMMC_CMD_RESP_LONG;
  629. + }
  630. +
  631. + if (cmd->flags & MMC_RSP_CRC)
  632. + cmdr |= SDMMC_CMD_RESP_CRC;
  633. +
  634. + data = cmd->data;
  635. + if (data) {
  636. + cmdr |= SDMMC_CMD_DAT_EXP;
  637. + if (data->flags & MMC_DATA_STREAM)
  638. + cmdr |= SDMMC_CMD_STRM_MODE;
  639. + if (data->flags & MMC_DATA_WRITE)
  640. + cmdr |= SDMMC_CMD_DAT_WR;
  641. + }
  642. +
  643. + if (drv_data && drv_data->prepare_command)
  644. + drv_data->prepare_command(slot->host, &cmdr);
  645. +
  646. + return cmdr;
  647. +}
  648. +
  649. +static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
  650. +{
  651. + struct mmc_command *stop;
  652. + u32 cmdr;
  653. +
  654. + if (!cmd->data)
  655. + return 0;
  656. +
  657. + stop = &host->stop_abort;
  658. + cmdr = cmd->opcode;
  659. + memset(stop, 0, sizeof(struct mmc_command));
  660. +
  661. + if (cmdr == MMC_READ_SINGLE_BLOCK ||
  662. + cmdr == MMC_READ_MULTIPLE_BLOCK ||
  663. + cmdr == MMC_WRITE_BLOCK ||
  664. + cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
  665. + stop->opcode = MMC_STOP_TRANSMISSION;
  666. + stop->arg = 0;
  667. + stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
  668. + } else if (cmdr == SD_IO_RW_EXTENDED) {
  669. + stop->opcode = SD_IO_RW_DIRECT;
  670. + stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
  671. + ((cmd->arg >> 28) & 0x7);
  672. + stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
  673. + } else {
  674. + return 0;
  675. + }
  676. +
  677. + cmdr = stop->opcode | SDMMC_CMD_STOP |
  678. + SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
  679. +
  680. + return cmdr;
  681. +}
  682. +
  683. +static void dw_mci_start_command(struct dw_mci *host,
  684. + struct mmc_command *cmd, u32 cmd_flags)
  685. +{
  686. + host->cmd = cmd;
  687. + dev_vdbg(host->dev,
  688. + "start command: ARGR=0x%08x CMDR=0x%08x\n",
  689. + cmd->arg, cmd_flags);
  690. +
  691. + mci_writel(host, CMDARG, cmd->arg);
  692. + wmb();
  693. +
  694. + mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
  695. +}
  696. +
  697. +static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
  698. +{
  699. + struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
  700. + dw_mci_start_command(host, stop, host->stop_cmdr);
  701. +}
  702. +
  703. +/* DMA interface functions */
  704. +static void dw_mci_stop_dma(struct dw_mci *host)
  705. +{
  706. + if (host->using_dma) {
  707. + host->dma_ops->stop(host);
  708. + host->dma_ops->cleanup(host);
  709. + }
  710. +
  711. + /* Data transfer was stopped by the interrupt handler */
  712. + set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
  713. +}
  714. +
  715. +static int dw_mci_get_dma_dir(struct mmc_data *data)
  716. +{
  717. + if (data->flags & MMC_DATA_WRITE)
  718. + return DMA_TO_DEVICE;
  719. + else
  720. + return DMA_FROM_DEVICE;
  721. +}
  722. +
  723. +#ifdef CONFIG_MMC_DW_IDMAC
  724. +static void dw_mci_dma_cleanup(struct dw_mci *host)
  725. +{
  726. + struct mmc_data *data = host->data;
  727. +
  728. + if (data)
  729. + if (!data->host_cookie)
  730. + dma_unmap_sg(host->dev,
  731. + data->sg,
  732. + data->sg_len,
  733. + dw_mci_get_dma_dir(data));
  734. +}
  735. +
  736. +static void dw_mci_idmac_reset(struct dw_mci *host)
  737. +{
  738. + u32 bmod = mci_readl(host, BMOD);
  739. + /* Software reset of DMA */
  740. + bmod |= SDMMC_IDMAC_SWRESET;
  741. + mci_writel(host, BMOD, bmod);
  742. +}
  743. +
  744. +static void dw_mci_idmac_stop_dma(struct dw_mci *host)
  745. +{
  746. + u32 temp;
  747. +
  748. + /* Disable and reset the IDMAC interface */
  749. + temp = mci_readl(host, CTRL);
  750. + temp &= ~SDMMC_CTRL_USE_IDMAC;
  751. + temp |= SDMMC_CTRL_DMA_RESET;
  752. + mci_writel(host, CTRL, temp);
  753. +
  754. + /* Stop the IDMAC running */
  755. + temp = mci_readl(host, BMOD);
  756. + temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
  757. + temp |= SDMMC_IDMAC_SWRESET;
  758. + mci_writel(host, BMOD, temp);
  759. +}
  760. +
  761. +static void dw_mci_idmac_complete_dma(struct dw_mci *host)
  762. +{
  763. + struct mmc_data *data = host->data;
  764. +
  765. + dev_vdbg(host->dev, "DMA complete\n");
  766. +
  767. + host->dma_ops->cleanup(host);
  768. +
  769. + /*
  770. + * If the card was removed, data will be NULL. No point in trying to
  771. + * send the stop command or waiting for NBUSY in this case.
  772. + */
  773. + if (data) {
  774. + set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
  775. + tasklet_schedule(&host->tasklet);
  776. + }
  777. +}
  778. +
  779. +static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
  780. + unsigned int sg_len)
  781. +{
  782. + int i;
  783. + struct idmac_desc *desc = host->sg_cpu;
  784. +
  785. + for (i = 0; i < sg_len; i++, desc++) {
  786. + unsigned int length = sg_dma_len(&data->sg[i]);
  787. + u32 mem_addr = sg_dma_address(&data->sg[i]);
  788. +
  789. + /* Set the OWN bit and disable interrupts for this descriptor */
  790. + desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
  791. +
  792. + /* Buffer length */
  793. + IDMAC_SET_BUFFER1_SIZE(desc, length);
  794. +
  795. + /* Physical address to DMA to/from */
  796. + desc->des2 = mem_addr;
  797. + }
  798. +
  799. + /* Set first descriptor */
  800. + desc = host->sg_cpu;
  801. + desc->des0 |= IDMAC_DES0_FD;
  802. +
  803. + /* Set last descriptor */
  804. + desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
  805. + desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
  806. + desc->des0 |= IDMAC_DES0_LD;
  807. +
  808. + wmb();
  809. +}
  810. +
  811. +static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
  812. +{
  813. + u32 temp;
  814. +
  815. + dw_mci_translate_sglist(host, host->data, sg_len);
  816. +
  817. + /* Select IDMAC interface */
  818. + temp = mci_readl(host, CTRL);
  819. + temp |= SDMMC_CTRL_USE_IDMAC;
  820. + mci_writel(host, CTRL, temp);
  821. +
  822. + wmb();
  823. +
  824. + /* Enable the IDMAC */
  825. + temp = mci_readl(host, BMOD);
  826. + temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
  827. + mci_writel(host, BMOD, temp);
  828. +
  829. + /* Start it running */
  830. + mci_writel(host, PLDMND, 1);
  831. +}
  832. +
  833. +static int dw_mci_idmac_init(struct dw_mci *host)
  834. +{
  835. + struct idmac_desc *p;
  836. + int i;
  837. +
  838. + /* Number of descriptors in the ring buffer */
  839. + host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
  840. +
  841. + /* Forward link the descriptor list */
  842. + for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
  843. + p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
  844. +
  845. + /* Set the last descriptor as the end-of-ring descriptor */
  846. + p->des3 = host->sg_dma;
  847. + p->des0 = IDMAC_DES0_ER;
  848. +
  849. + dw_mci_idmac_reset(host);
  850. +
  851. + /* Mask out interrupts - get Tx & Rx complete only */
  852. + mci_writel(host, IDSTS, IDMAC_INT_CLR);
  853. + mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
  854. + SDMMC_IDMAC_INT_TI);
  855. +
  856. + /* Set the descriptor base address */
  857. + mci_writel(host, DBADDR, host->sg_dma);
  858. + return 0;
  859. +}
  860. +
  861. +static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
  862. + .init = dw_mci_idmac_init,
  863. + .start = dw_mci_idmac_start_dma,
  864. + .stop = dw_mci_idmac_stop_dma,
  865. + .complete = dw_mci_idmac_complete_dma,
  866. + .cleanup = dw_mci_dma_cleanup,
  867. +};
  868. +#endif /* CONFIG_MMC_DW_IDMAC */
  869. +
  870. +static int dw_mci_pre_dma_transfer(struct dw_mci *host,
  871. + struct mmc_data *data,
  872. + bool next)
  873. +{
  874. + struct scatterlist *sg;
  875. + unsigned int i, sg_len;
  876. +
  877. + if (!next && data->host_cookie)
  878. + return data->host_cookie;
  879. +
  880. + /*
  881. + * We don't do DMA on "complex" transfers, i.e. with
  882. + * non-word-aligned buffers or lengths. Also, we don't bother
  883. + * with all the DMA setup overhead for short transfers.
  884. + */
  885. + if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
  886. + return -EINVAL;
  887. +
  888. + if (data->blksz & 3)
  889. + return -EINVAL;
  890. +
  891. + for_each_sg(data->sg, sg, data->sg_len, i) {
  892. + if (sg->offset & 3 || sg->length & 3)
  893. + return -EINVAL;
  894. + }
  895. +
  896. + sg_len = dma_map_sg(host->dev,
  897. + data->sg,
  898. + data->sg_len,
  899. + dw_mci_get_dma_dir(data));
  900. + if (sg_len == 0)
  901. + return -EINVAL;
  902. +
  903. + if (next)
  904. + data->host_cookie = sg_len;
  905. +
  906. + return sg_len;
  907. +}
  908. +
  909. +static void dw_mci_pre_req(struct mmc_host *mmc,
  910. + struct mmc_request *mrq,
  911. + bool is_first_req)
  912. +{
  913. + struct dw_mci_slot *slot = mmc_priv(mmc);
  914. + struct mmc_data *data = mrq->data;
  915. +
  916. + if (!slot->host->use_dma || !data)
  917. + return;
  918. +
  919. + if (data->host_cookie) {
  920. + data->host_cookie = 0;
  921. + return;
  922. + }
  923. +
  924. + if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
  925. + data->host_cookie = 0;
  926. +}
  927. +
  928. +static void dw_mci_post_req(struct mmc_host *mmc,
  929. + struct mmc_request *mrq,
  930. + int err)
  931. +{
  932. + struct dw_mci_slot *slot = mmc_priv(mmc);
  933. + struct mmc_data *data = mrq->data;
  934. +
  935. + if (!slot->host->use_dma || !data)
  936. + return;
  937. +
  938. + if (data->host_cookie)
  939. + dma_unmap_sg(slot->host->dev,
  940. + data->sg,
  941. + data->sg_len,
  942. + dw_mci_get_dma_dir(data));
  943. + data->host_cookie = 0;
  944. +}
  945. +
  946. +static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
  947. +{
  948. +#ifdef CONFIG_MMC_DW_IDMAC
  949. + unsigned int blksz = data->blksz;
  950. + const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
  951. + u32 fifo_width = 1 << host->data_shift;
  952. + u32 blksz_depth = blksz / fifo_width, fifoth_val;
  953. + u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
  954. + int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
  955. +
  956. + tx_wmark = (host->fifo_depth) / 2;
  957. + tx_wmark_invers = host->fifo_depth - tx_wmark;
  958. +
  959. + /*
  960. + * MSIZE is '1',
  961. + * if blksz is not a multiple of the FIFO width
  962. + */
  963. + if (blksz % fifo_width) {
  964. + msize = 0;
  965. + rx_wmark = 1;
  966. + goto done;
  967. + }
  968. +
  969. + do {
  970. + if (!((blksz_depth % mszs[idx]) ||
  971. + (tx_wmark_invers % mszs[idx]))) {
  972. + msize = idx;
  973. + rx_wmark = mszs[idx] - 1;
  974. + break;
  975. + }
  976. + } while (--idx > 0);
  977. + /*
  978. + * If idx is '0', it won't be tried
  979. + * Thus, initial values are uesed
  980. + */
  981. +done:
  982. + fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
  983. + mci_writel(host, FIFOTH, fifoth_val);
  984. +#endif
  985. +}
  986. +
  987. +static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
  988. +{
  989. + unsigned int blksz = data->blksz;
  990. + u32 blksz_depth, fifo_depth;
  991. + u16 thld_size;
  992. +
  993. + WARN_ON(!(data->flags & MMC_DATA_READ));
  994. +
  995. + /*
  996. + * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is
  997. + * in the FIFO region, so we really shouldn't access it).
  998. + */
  999. + if (host->verid < DW_MMC_240A)
  1000. + return;
  1001. +
  1002. + if (host->timing != MMC_TIMING_MMC_HS200 &&
  1003. + host->timing != MMC_TIMING_UHS_SDR104)
  1004. + goto disable;
  1005. +
  1006. + blksz_depth = blksz / (1 << host->data_shift);
  1007. + fifo_depth = host->fifo_depth;
  1008. +
  1009. + if (blksz_depth > fifo_depth)
  1010. + goto disable;
  1011. +
  1012. + /*
  1013. + * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
  1014. + * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
  1015. + * Currently just choose blksz.
  1016. + */
  1017. + thld_size = blksz;
  1018. + mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
  1019. + return;
  1020. +
  1021. +disable:
  1022. + mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
  1023. +}
  1024. +
  1025. +static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
  1026. +{
  1027. + int sg_len;
  1028. + u32 temp;
  1029. +
  1030. + host->using_dma = 0;
  1031. +
  1032. + /* If we don't have a channel, we can't do DMA */
  1033. + if (!host->use_dma)
  1034. + return -ENODEV;
  1035. +
  1036. + sg_len = dw_mci_pre_dma_transfer(host, data, 0);
  1037. + if (sg_len < 0) {
  1038. + host->dma_ops->stop(host);
  1039. + return sg_len;
  1040. + }
  1041. +
  1042. + host->using_dma = 1;
  1043. +
  1044. + dev_vdbg(host->dev,
  1045. + "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
  1046. + (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
  1047. + sg_len);
  1048. +
  1049. + /*
  1050. + * Decide the MSIZE and RX/TX Watermark.
  1051. + * If current block size is same with previous size,
  1052. + * no need to update fifoth.
  1053. + */
  1054. + if (host->prev_blksz != data->blksz)
  1055. + dw_mci_adjust_fifoth(host, data);
  1056. +
  1057. + /* Enable the DMA interface */
  1058. + temp = mci_readl(host, CTRL);
  1059. + temp |= SDMMC_CTRL_DMA_ENABLE;
  1060. + mci_writel(host, CTRL, temp);
  1061. +
  1062. + /* Disable RX/TX IRQs, let DMA handle it */
  1063. + temp = mci_readl(host, INTMASK);
  1064. + temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
  1065. + mci_writel(host, INTMASK, temp);
  1066. +
  1067. + host->dma_ops->start(host, sg_len);
  1068. +
  1069. + return 0;
  1070. +}
  1071. +
  1072. +static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
  1073. +{
  1074. + u32 temp;
  1075. +
  1076. + data->error = -EINPROGRESS;
  1077. +
  1078. + WARN_ON(host->data);
  1079. + host->sg = NULL;
  1080. + host->data = data;
  1081. +
  1082. + if (data->flags & MMC_DATA_READ) {
  1083. + host->dir_status = DW_MCI_RECV_STATUS;
  1084. + dw_mci_ctrl_rd_thld(host, data);
  1085. + } else {
  1086. + host->dir_status = DW_MCI_SEND_STATUS;
  1087. + }
  1088. +
  1089. + if (dw_mci_submit_data_dma(host, data)) {
  1090. + int flags = SG_MITER_ATOMIC;
  1091. + if (host->data->flags & MMC_DATA_READ)
  1092. + flags |= SG_MITER_TO_SG;
  1093. + else
  1094. + flags |= SG_MITER_FROM_SG;
  1095. +
  1096. + sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
  1097. + host->sg = data->sg;
  1098. + host->part_buf_start = 0;
  1099. + host->part_buf_count = 0;
  1100. +
  1101. + mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
  1102. + temp = mci_readl(host, INTMASK);
  1103. + temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
  1104. + mci_writel(host, INTMASK, temp);
  1105. +
  1106. + temp = mci_readl(host, CTRL);
  1107. + temp &= ~SDMMC_CTRL_DMA_ENABLE;
  1108. + mci_writel(host, CTRL, temp);
  1109. +
  1110. + /*
  1111. + * Use the initial fifoth_val for PIO mode.
  1112. + * If next issued data may be transfered by DMA mode,
  1113. + * prev_blksz should be invalidated.
  1114. + */
  1115. + mci_writel(host, FIFOTH, host->fifoth_val);
  1116. + host->prev_blksz = 0;
  1117. + } else {
  1118. + /*
  1119. + * Keep the current block size.
  1120. + * It will be used to decide whether to update
  1121. + * fifoth register next time.
  1122. + */
  1123. + host->prev_blksz = data->blksz;
  1124. + }
  1125. +}
  1126. +
  1127. +static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
  1128. +{
  1129. + struct dw_mci *host = slot->host;
  1130. + unsigned long timeout = jiffies + msecs_to_jiffies(500);
  1131. + unsigned int cmd_status = 0;
  1132. +
  1133. + mci_writel(host, CMDARG, arg);
  1134. + wmb();
  1135. + mci_writel(host, CMD, SDMMC_CMD_START | cmd);
  1136. +
  1137. + while (time_before(jiffies, timeout)) {
  1138. + cmd_status = mci_readl(host, CMD);
  1139. + if (!(cmd_status & SDMMC_CMD_START))
  1140. + return;
  1141. + }
  1142. + dev_err(&slot->mmc->class_dev,
  1143. + "Timeout sending command (cmd %#x arg %#x status %#x)\n",
  1144. + cmd, arg, cmd_status);
  1145. +}
  1146. +
  1147. +static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
  1148. +{
  1149. + struct dw_mci *host = slot->host;
  1150. + unsigned int clock = slot->clock;
  1151. + u32 div;
  1152. + u32 clk_en_a;
  1153. + u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT;
  1154. +
  1155. + /* We must continue to set bit 28 in CMD until the change is complete */
  1156. + if (host->state == STATE_WAITING_CMD11_DONE)
  1157. + sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH;
  1158. +
  1159. + if (!clock) {
  1160. + mci_writel(host, CLKENA, 0);
  1161. + mci_send_cmd(slot, sdmmc_cmd_bits, 0);
  1162. + } else if (clock != host->current_speed || force_clkinit) {
  1163. + div = host->bus_hz / clock;
  1164. + if (host->bus_hz % clock && host->bus_hz > clock)
  1165. + /*
  1166. + * move the + 1 after the divide to prevent
  1167. + * over-clocking the card.
  1168. + */
  1169. + div += 1;
  1170. +
  1171. + div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
  1172. +
  1173. + if ((clock << div) != slot->__clk_old || force_clkinit)
  1174. + dev_info(&slot->mmc->class_dev,
  1175. + "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
  1176. + slot->id, host->bus_hz, clock,
  1177. + div ? ((host->bus_hz / div) >> 1) :
  1178. + host->bus_hz, div);
  1179. +
  1180. + /* disable clock */
  1181. + mci_writel(host, CLKENA, 0);
  1182. + mci_writel(host, CLKSRC, 0);
  1183. +
  1184. + /* inform CIU */
  1185. + mci_send_cmd(slot, sdmmc_cmd_bits, 0);
  1186. +
  1187. + /* set clock to desired speed */
  1188. + mci_writel(host, CLKDIV, div);
  1189. +
  1190. + /* inform CIU */
  1191. + mci_send_cmd(slot, sdmmc_cmd_bits, 0);
  1192. +
  1193. + /* enable clock; only low power if no SDIO */
  1194. + clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
  1195. + if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
  1196. + clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
  1197. + mci_writel(host, CLKENA, clk_en_a);
  1198. +
  1199. + /* inform CIU */
  1200. + mci_send_cmd(slot, sdmmc_cmd_bits, 0);
  1201. +
  1202. + /* keep the clock with reflecting clock dividor */
  1203. + slot->__clk_old = clock << div;
  1204. + }
  1205. +
  1206. + host->current_speed = clock;
  1207. +
  1208. + /* Set the current slot bus width */
  1209. + mci_writel(host, CTYPE, (slot->ctype << slot->id));
  1210. +}
  1211. +
  1212. +static void __dw_mci_start_request(struct dw_mci *host,
  1213. + struct dw_mci_slot *slot,
  1214. + struct mmc_command *cmd)
  1215. +{
  1216. + struct mmc_request *mrq;
  1217. + struct mmc_data *data;
  1218. + u32 cmdflags;
  1219. +
  1220. + mrq = slot->mrq;
  1221. +
  1222. + host->cur_slot = slot;
  1223. + host->mrq = mrq;
  1224. +
  1225. + host->pending_events = 0;
  1226. + host->completed_events = 0;
  1227. + host->cmd_status = 0;
  1228. + host->data_status = 0;
  1229. + host->dir_status = 0;
  1230. +
  1231. + data = cmd->data;
  1232. + if (data) {
  1233. + mci_writel(host, TMOUT, 0xFFFFFFFF);
  1234. + mci_writel(host, BYTCNT, data->blksz*data->blocks);
  1235. + mci_writel(host, BLKSIZ, data->blksz);
  1236. + }
  1237. +
  1238. + cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
  1239. +
  1240. + /* this is the first command, send the initialization clock */
  1241. + if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
  1242. + cmdflags |= SDMMC_CMD_INIT;
  1243. +
  1244. + if (data) {
  1245. + dw_mci_submit_data(host, data);
  1246. + wmb();
  1247. + }
  1248. +
  1249. + dw_mci_start_command(host, cmd, cmdflags);
  1250. +
  1251. + if (mrq->stop)
  1252. + host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
  1253. + else
  1254. + host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
  1255. +}
  1256. +
  1257. +static void dw_mci_start_request(struct dw_mci *host,
  1258. + struct dw_mci_slot *slot)
  1259. +{
  1260. + struct mmc_request *mrq = slot->mrq;
  1261. + struct mmc_command *cmd;
  1262. +
  1263. + cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
  1264. + __dw_mci_start_request(host, slot, cmd);
  1265. +}
  1266. +
  1267. +/* must be called with host->lock held */
  1268. +static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
  1269. + struct mmc_request *mrq)
  1270. +{
  1271. + dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
  1272. + host->state);
  1273. +
  1274. + slot->mrq = mrq;
  1275. +
  1276. + if (host->state == STATE_WAITING_CMD11_DONE) {
  1277. + dev_warn(&slot->mmc->class_dev,
  1278. + "Voltage change didn't complete\n");
  1279. + /*
  1280. + * this case isn't expected to happen, so we can
  1281. + * either crash here or just try to continue on
  1282. + * in the closest possible state
  1283. + */
  1284. + host->state = STATE_IDLE;
  1285. + }
  1286. +
  1287. + if (host->state == STATE_IDLE) {
  1288. + host->state = STATE_SENDING_CMD;
  1289. + dw_mci_start_request(host, slot);
  1290. + } else {
  1291. + list_add_tail(&slot->queue_node, &host->queue);
  1292. + }
  1293. +}
  1294. +
  1295. +static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
  1296. +{
  1297. + struct dw_mci_slot *slot = mmc_priv(mmc);
  1298. + struct dw_mci *host = slot->host;
  1299. +
  1300. + WARN_ON(slot->mrq);
  1301. +
  1302. + /*
  1303. + * The check for card presence and queueing of the request must be
  1304. + * atomic, otherwise the card could be removed in between and the
  1305. + * request wouldn't fail until another card was inserted.
  1306. + */
  1307. + spin_lock_bh(&host->lock);
  1308. +
  1309. + if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
  1310. + spin_unlock_bh(&host->lock);
  1311. + mrq->cmd->error = -ENOMEDIUM;
  1312. + mmc_request_done(mmc, mrq);
  1313. + return;
  1314. + }
  1315. +
  1316. + dw_mci_queue_request(host, slot, mrq);
  1317. +
  1318. + spin_unlock_bh(&host->lock);
  1319. +}
  1320. +
  1321. +static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
  1322. +{
  1323. + struct dw_mci_slot *slot = mmc_priv(mmc);
  1324. + const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
  1325. + u32 regs;
  1326. + int ret;
  1327. +
  1328. + switch (ios->bus_width) {
  1329. + case MMC_BUS_WIDTH_4:
  1330. + slot->ctype = SDMMC_CTYPE_4BIT;
  1331. + break;
  1332. + case MMC_BUS_WIDTH_8:
  1333. + slot->ctype = SDMMC_CTYPE_8BIT;
  1334. + break;
  1335. + default:
  1336. + /* set default 1 bit mode */
  1337. + slot->ctype = SDMMC_CTYPE_1BIT;
  1338. + }
  1339. +
  1340. + regs = mci_readl(slot->host, UHS_REG);
  1341. +
  1342. + /* DDR mode set */
  1343. + if (ios->timing == MMC_TIMING_MMC_DDR52)
  1344. + regs |= ((0x1 << slot->id) << 16);
  1345. + else
  1346. + regs &= ~((0x1 << slot->id) << 16);
  1347. +
  1348. + mci_writel(slot->host, UHS_REG, regs);
  1349. + slot->host->timing = ios->timing;
  1350. +
  1351. + /*
  1352. + * Use mirror of ios->clock to prevent race with mmc
  1353. + * core ios update when finding the minimum.
  1354. + */
  1355. + slot->clock = ios->clock;
  1356. +
  1357. + if (drv_data && drv_data->set_ios)
  1358. + drv_data->set_ios(slot->host, ios);
  1359. +
  1360. + /* Slot specific timing and width adjustment */
  1361. + dw_mci_setup_bus(slot, false);
  1362. +
  1363. + if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0)
  1364. + slot->host->state = STATE_IDLE;
  1365. +
  1366. + switch (ios->power_mode) {
  1367. + case MMC_POWER_UP:
  1368. + if (!IS_ERR(mmc->supply.vmmc)) {
  1369. + ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
  1370. + ios->vdd);
  1371. + if (ret) {
  1372. + dev_err(slot->host->dev,
  1373. + "failed to enable vmmc regulator\n");
  1374. + /*return, if failed turn on vmmc*/
  1375. + return;
  1376. + }
  1377. + }
  1378. + if (!IS_ERR(mmc->supply.vqmmc) && !slot->host->vqmmc_enabled) {
  1379. + ret = regulator_enable(mmc->supply.vqmmc);
  1380. + if (ret < 0)
  1381. + dev_err(slot->host->dev,
  1382. + "failed to enable vqmmc regulator\n");
  1383. + else
  1384. + slot->host->vqmmc_enabled = true;
  1385. + }
  1386. + set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
  1387. + regs = mci_readl(slot->host, PWREN);
  1388. + regs |= (1 << slot->id);
  1389. + mci_writel(slot->host, PWREN, regs);
  1390. + break;
  1391. + case MMC_POWER_OFF:
  1392. + if (!IS_ERR(mmc->supply.vmmc))
  1393. + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
  1394. +
  1395. + if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled) {
  1396. + regulator_disable(mmc->supply.vqmmc);
  1397. + slot->host->vqmmc_enabled = false;
  1398. + }
  1399. +
  1400. + regs = mci_readl(slot->host, PWREN);
  1401. + regs &= ~(1 << slot->id);
  1402. + mci_writel(slot->host, PWREN, regs);
  1403. + break;
  1404. + default:
  1405. + break;
  1406. + }
  1407. +}
  1408. +
  1409. +static int dw_mci_card_busy(struct mmc_host *mmc)
  1410. +{
  1411. + struct dw_mci_slot *slot = mmc_priv(mmc);
  1412. + u32 status;
  1413. +
  1414. + /*
  1415. + * Check the busy bit which is low when DAT[3:0]
  1416. + * (the data lines) are 0000
  1417. + */
  1418. + status = mci_readl(slot->host, STATUS);
  1419. +
  1420. + return !!(status & SDMMC_STATUS_BUSY);
  1421. +}
  1422. +
  1423. +static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
  1424. +{
  1425. + struct dw_mci_slot *slot = mmc_priv(mmc);
  1426. + struct dw_mci *host = slot->host;
  1427. + u32 uhs;
  1428. + u32 v18 = SDMMC_UHS_18V << slot->id;
  1429. + int min_uv, max_uv;
  1430. + int ret;
  1431. +
  1432. + /*
  1433. + * Program the voltage. Note that some instances of dw_mmc may use
  1434. + * the UHS_REG for this. For other instances (like exynos) the UHS_REG
  1435. + * does no harm but you need to set the regulator directly. Try both.
  1436. + */
  1437. + uhs = mci_readl(host, UHS_REG);
  1438. + if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
  1439. + min_uv = 2700000;
  1440. + max_uv = 3600000;
  1441. + uhs &= ~v18;
  1442. + } else {
  1443. + min_uv = 1700000;
  1444. + max_uv = 1950000;
  1445. + uhs |= v18;
  1446. + }
  1447. + if (!IS_ERR(mmc->supply.vqmmc)) {
  1448. + ret = regulator_set_voltage(mmc->supply.vqmmc, min_uv, max_uv);
  1449. +
  1450. + if (ret) {
  1451. + dev_err(&mmc->class_dev,
  1452. + "Regulator set error %d: %d - %d\n",
  1453. + ret, min_uv, max_uv);
  1454. + return ret;
  1455. + }
  1456. + }
  1457. + mci_writel(host, UHS_REG, uhs);
  1458. +
  1459. + return 0;
  1460. +}
  1461. +
  1462. +static int dw_mci_get_ro(struct mmc_host *mmc)
  1463. +{
  1464. + int read_only;
  1465. + struct dw_mci_slot *slot = mmc_priv(mmc);
  1466. + int gpio_ro = mmc_gpio_get_ro(mmc);
  1467. +
  1468. + /* Use platform get_ro function, else try on board write protect */
  1469. + if ((slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT) ||
  1470. + (slot->host->quirks & DW_MCI_QUIRK_NO_WRITE_PROTECT))
  1471. + read_only = 0;
  1472. + else if (!IS_ERR_VALUE(gpio_ro))
  1473. + read_only = gpio_ro;
  1474. + else
  1475. + read_only =
  1476. + mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
  1477. +
  1478. + dev_dbg(&mmc->class_dev, "card is %s\n",
  1479. + read_only ? "read-only" : "read-write");
  1480. +
  1481. + return read_only;
  1482. +}
  1483. +
  1484. +static int dw_mci_get_cd(struct mmc_host *mmc)
  1485. +{
  1486. + int present;
  1487. + struct dw_mci_slot *slot = mmc_priv(mmc);
  1488. + struct dw_mci_board *brd = slot->host->pdata;
  1489. + struct dw_mci *host = slot->host;
  1490. + int gpio_cd = mmc_gpio_get_cd(mmc);
  1491. +
  1492. + /* Use platform get_cd function, else try onboard card detect */
  1493. + if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
  1494. + present = 1;
  1495. + else if (!IS_ERR_VALUE(gpio_cd))
  1496. + present = gpio_cd;
  1497. + else
  1498. + present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
  1499. + == 0 ? 1 : 0;
  1500. +
  1501. + spin_lock_bh(&host->lock);
  1502. + if (present) {
  1503. + set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
  1504. + dev_dbg(&mmc->class_dev, "card is present\n");
  1505. + } else {
  1506. + clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
  1507. + dev_dbg(&mmc->class_dev, "card is not present\n");
  1508. + }
  1509. + spin_unlock_bh(&host->lock);
  1510. +
  1511. + return present;
  1512. +}
  1513. +
  1514. +/*
  1515. + * Disable lower power mode.
  1516. + *
  1517. + * Low power mode will stop the card clock when idle. According to the
  1518. + * description of the CLKENA register we should disable low power mode
  1519. + * for SDIO cards if we need SDIO interrupts to work.
  1520. + *
  1521. + * This function is fast if low power mode is already disabled.
  1522. + */
  1523. +static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
  1524. +{
  1525. + struct dw_mci *host = slot->host;
  1526. + u32 clk_en_a;
  1527. + const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
  1528. +
  1529. + clk_en_a = mci_readl(host, CLKENA);
  1530. +
  1531. + if (clk_en_a & clken_low_pwr) {
  1532. + mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
  1533. + mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
  1534. + SDMMC_CMD_PRV_DAT_WAIT, 0);
  1535. + }
  1536. +}
  1537. +
  1538. +static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
  1539. +{
  1540. + struct dw_mci_slot *slot = mmc_priv(mmc);
  1541. + struct dw_mci *host = slot->host;
  1542. + u32 int_mask;
  1543. +
  1544. + /* Enable/disable Slot Specific SDIO interrupt */
  1545. + int_mask = mci_readl(host, INTMASK);
  1546. + if (enb) {
  1547. + /*
  1548. + * Turn off low power mode if it was enabled. This is a bit of
  1549. + * a heavy operation and we disable / enable IRQs a lot, so
  1550. + * we'll leave low power mode disabled and it will get
  1551. + * re-enabled again in dw_mci_setup_bus().
  1552. + */
  1553. + dw_mci_disable_low_power(slot);
  1554. +
  1555. + mci_writel(host, INTMASK,
  1556. + (int_mask | SDMMC_INT_SDIO(slot->id)));
  1557. + } else {
  1558. + mci_writel(host, INTMASK,
  1559. + (int_mask & ~SDMMC_INT_SDIO(slot->id)));
  1560. + }
  1561. +}
  1562. +
  1563. +static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
  1564. +{
  1565. + struct dw_mci_slot *slot = mmc_priv(mmc);
  1566. + struct dw_mci *host = slot->host;
  1567. + const struct dw_mci_drv_data *drv_data = host->drv_data;
  1568. + struct dw_mci_tuning_data tuning_data;
  1569. + int err = -ENOSYS;
  1570. +
  1571. + if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
  1572. + if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
  1573. + tuning_data.blk_pattern = tuning_blk_pattern_8bit;
  1574. + tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
  1575. + } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
  1576. + tuning_data.blk_pattern = tuning_blk_pattern_4bit;
  1577. + tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
  1578. + } else {
  1579. + return -EINVAL;
  1580. + }
  1581. + } else if (opcode == MMC_SEND_TUNING_BLOCK) {
  1582. + tuning_data.blk_pattern = tuning_blk_pattern_4bit;
  1583. + tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
  1584. + } else {
  1585. + dev_err(host->dev,
  1586. + "Undefined command(%d) for tuning\n", opcode);
  1587. + return -EINVAL;
  1588. + }
  1589. +
  1590. + if (drv_data && drv_data->execute_tuning)
  1591. + err = drv_data->execute_tuning(slot, opcode, &tuning_data);
  1592. + return err;
  1593. +}
  1594. +
  1595. +static const struct mmc_host_ops dw_mci_ops = {
  1596. + .request = dw_mci_request,
  1597. + .pre_req = dw_mci_pre_req,
  1598. + .post_req = dw_mci_post_req,
  1599. + .set_ios = dw_mci_set_ios,
  1600. + .get_ro = dw_mci_get_ro,
  1601. + .get_cd = dw_mci_get_cd,
  1602. + .enable_sdio_irq = dw_mci_enable_sdio_irq,
  1603. + .execute_tuning = dw_mci_execute_tuning,
  1604. + .card_busy = dw_mci_card_busy,
  1605. + .start_signal_voltage_switch = dw_mci_switch_voltage,
  1606. +
  1607. +};
  1608. +
  1609. +static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
  1610. + __releases(&host->lock)
  1611. + __acquires(&host->lock)
  1612. +{
  1613. + struct dw_mci_slot *slot;
  1614. + struct mmc_host *prev_mmc = host->cur_slot->mmc;
  1615. +
  1616. + WARN_ON(host->cmd || host->data);
  1617. +
  1618. + host->cur_slot->mrq = NULL;
  1619. + host->mrq = NULL;
  1620. + if (!list_empty(&host->queue)) {
  1621. + slot = list_entry(host->queue.next,
  1622. + struct dw_mci_slot, queue_node);
  1623. + list_del(&slot->queue_node);
  1624. + dev_vdbg(host->dev, "list not empty: %s is next\n",
  1625. + mmc_hostname(slot->mmc));
  1626. + host->state = STATE_SENDING_CMD;
  1627. + dw_mci_start_request(host, slot);
  1628. + } else {
  1629. + dev_vdbg(host->dev, "list empty\n");
  1630. +
  1631. + if (host->state == STATE_SENDING_CMD11)
  1632. + host->state = STATE_WAITING_CMD11_DONE;
  1633. + else
  1634. + host->state = STATE_IDLE;
  1635. + }
  1636. +
  1637. + spin_unlock(&host->lock);
  1638. + mmc_request_done(prev_mmc, mrq);
  1639. + spin_lock(&host->lock);
  1640. +}
  1641. +
  1642. +static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
  1643. +{
  1644. + u32 status = host->cmd_status;
  1645. +
  1646. + host->cmd_status = 0;
  1647. +
  1648. + /* Read the response from the card (up to 16 bytes) */
  1649. + if (cmd->flags & MMC_RSP_PRESENT) {
  1650. + if (cmd->flags & MMC_RSP_136) {
  1651. + cmd->resp[3] = mci_readl(host, RESP0);
  1652. + cmd->resp[2] = mci_readl(host, RESP1);
  1653. + cmd->resp[1] = mci_readl(host, RESP2);
  1654. + cmd->resp[0] = mci_readl(host, RESP3);
  1655. + } else {
  1656. + cmd->resp[0] = mci_readl(host, RESP0);
  1657. + cmd->resp[1] = 0;
  1658. + cmd->resp[2] = 0;
  1659. + cmd->resp[3] = 0;
  1660. + }
  1661. + }
  1662. +
  1663. + if (status & SDMMC_INT_RTO)
  1664. + cmd->error = -ETIMEDOUT;
  1665. + else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
  1666. + cmd->error = -EILSEQ;
  1667. + else if (status & SDMMC_INT_RESP_ERR)
  1668. + cmd->error = -EIO;
  1669. + else
  1670. + cmd->error = 0;
  1671. +
  1672. + if (cmd->error) {
  1673. + /* newer ip versions need a delay between retries */
  1674. + if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
  1675. + mdelay(20);
  1676. + }
  1677. +
  1678. + return cmd->error;
  1679. +}
  1680. +
  1681. +static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
  1682. +{
  1683. + u32 status = host->data_status;
  1684. +
  1685. + if (status & DW_MCI_DATA_ERROR_FLAGS) {
  1686. + if (status & SDMMC_INT_DRTO) {
  1687. + data->error = -ETIMEDOUT;
  1688. + } else if (status & SDMMC_INT_DCRC) {
  1689. + data->error = -EILSEQ;
  1690. + } else if (status & SDMMC_INT_EBE) {
  1691. + if (host->dir_status ==
  1692. + DW_MCI_SEND_STATUS) {
  1693. + /*
  1694. + * No data CRC status was returned.
  1695. + * The number of bytes transferred
  1696. + * will be exaggerated in PIO mode.
  1697. + */
  1698. + data->bytes_xfered = 0;
  1699. + data->error = -ETIMEDOUT;
  1700. + } else if (host->dir_status ==
  1701. + DW_MCI_RECV_STATUS) {
  1702. + data->error = -EIO;
  1703. + }
  1704. + } else {
  1705. + /* SDMMC_INT_SBE is included */
  1706. + data->error = -EIO;
  1707. + }
  1708. +
  1709. + dev_dbg(host->dev, "data error, status 0x%08x\n", status);
  1710. +
  1711. + /*
  1712. + * After an error, there may be data lingering
  1713. + * in the FIFO
  1714. + */
  1715. + dw_mci_reset(host);
  1716. + } else {
  1717. + data->bytes_xfered = data->blocks * data->blksz;
  1718. + data->error = 0;
  1719. + }
  1720. +
  1721. + return data->error;
  1722. +}
  1723. +
  1724. +static void dw_mci_tasklet_func(unsigned long priv)
  1725. +{
  1726. + struct dw_mci *host = (struct dw_mci *)priv;
  1727. + struct mmc_data *data;
  1728. + struct mmc_command *cmd;
  1729. + struct mmc_request *mrq;
  1730. + enum dw_mci_state state;
  1731. + enum dw_mci_state prev_state;
  1732. + unsigned int err;
  1733. +
  1734. + spin_lock(&host->lock);
  1735. +
  1736. + state = host->state;
  1737. + data = host->data;
  1738. + mrq = host->mrq;
  1739. +
  1740. + do {
  1741. + prev_state = state;
  1742. +
  1743. + switch (state) {
  1744. + case STATE_IDLE:
  1745. + case STATE_WAITING_CMD11_DONE:
  1746. + break;
  1747. +
  1748. + case STATE_SENDING_CMD11:
  1749. + case STATE_SENDING_CMD:
  1750. + if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
  1751. + &host->pending_events))
  1752. + break;
  1753. +
  1754. + cmd = host->cmd;
  1755. + host->cmd = NULL;
  1756. + set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
  1757. + err = dw_mci_command_complete(host, cmd);
  1758. + if (cmd == mrq->sbc && !err) {
  1759. + prev_state = state = STATE_SENDING_CMD;
  1760. + __dw_mci_start_request(host, host->cur_slot,
  1761. + mrq->cmd);
  1762. + goto unlock;
  1763. + }
  1764. +
  1765. + if (cmd->data && err) {
  1766. + dw_mci_stop_dma(host);
  1767. + send_stop_abort(host, data);
  1768. + state = STATE_SENDING_STOP;
  1769. + break;
  1770. + }
  1771. +
  1772. + if (!cmd->data || err) {
  1773. + dw_mci_request_end(host, mrq);
  1774. + goto unlock;
  1775. + }
  1776. +
  1777. + prev_state = state = STATE_SENDING_DATA;
  1778. + /* fall through */
  1779. +
  1780. + case STATE_SENDING_DATA:
  1781. + /*
  1782. + * We could get a data error and never a transfer
  1783. + * complete so we'd better check for it here.
  1784. + *
  1785. + * Note that we don't really care if we also got a
  1786. + * transfer complete; stopping the DMA and sending an
  1787. + * abort won't hurt.
  1788. + */
  1789. + if (test_and_clear_bit(EVENT_DATA_ERROR,
  1790. + &host->pending_events)) {
  1791. + dw_mci_stop_dma(host);
  1792. + send_stop_abort(host, data);
  1793. + state = STATE_DATA_ERROR;
  1794. + break;
  1795. + }
  1796. +
  1797. + if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
  1798. + &host->pending_events))
  1799. + break;
  1800. +
  1801. + set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
  1802. +
  1803. + /*
  1804. + * Handle an EVENT_DATA_ERROR that might have shown up
  1805. + * before the transfer completed. This might not have
  1806. + * been caught by the check above because the interrupt
  1807. + * could have gone off between the previous check and
  1808. + * the check for transfer complete.
  1809. + *
  1810. + * Technically this ought not be needed assuming we
  1811. + * get a DATA_COMPLETE eventually (we'll notice the
  1812. + * error and end the request), but it shouldn't hurt.
  1813. + *
  1814. + * This has the advantage of sending the stop command.
  1815. + */
  1816. + if (test_and_clear_bit(EVENT_DATA_ERROR,
  1817. + &host->pending_events)) {
  1818. + dw_mci_stop_dma(host);
  1819. + send_stop_abort(host, data);
  1820. + state = STATE_DATA_ERROR;
  1821. + break;
  1822. + }
  1823. + prev_state = state = STATE_DATA_BUSY;
  1824. +
  1825. + /* fall through */
  1826. +
  1827. + case STATE_DATA_BUSY:
  1828. + if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
  1829. + &host->pending_events))
  1830. + break;
  1831. +
  1832. + host->data = NULL;
  1833. + set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
  1834. + err = dw_mci_data_complete(host, data);
  1835. +
  1836. + if (!err) {
  1837. + if (!data->stop || mrq->sbc) {
  1838. + if (mrq->sbc && data->stop)
  1839. + data->stop->error = 0;
  1840. + dw_mci_request_end(host, mrq);
  1841. + goto unlock;
  1842. + }
  1843. +
  1844. + /* stop command for open-ended transfer*/
  1845. + if (data->stop)
  1846. + send_stop_abort(host, data);
  1847. + } else {
  1848. + /*
  1849. + * If we don't have a command complete now we'll
  1850. + * never get one since we just reset everything;
  1851. + * better end the request.
  1852. + *
  1853. + * If we do have a command complete we'll fall
  1854. + * through to the SENDING_STOP command and
  1855. + * everything will be peachy keen.
  1856. + */
  1857. + if (!test_bit(EVENT_CMD_COMPLETE,
  1858. + &host->pending_events)) {
  1859. + host->cmd = NULL;
  1860. + dw_mci_request_end(host, mrq);
  1861. + goto unlock;
  1862. + }
  1863. + }
  1864. +
  1865. + /*
  1866. + * If err has non-zero,
  1867. + * stop-abort command has been already issued.
  1868. + */
  1869. + prev_state = state = STATE_SENDING_STOP;
  1870. +
  1871. + /* fall through */
  1872. +
  1873. + case STATE_SENDING_STOP:
  1874. + if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
  1875. + &host->pending_events))
  1876. + break;
  1877. +
  1878. + /* CMD error in data command */
  1879. + if (mrq->cmd->error && mrq->data)
  1880. + dw_mci_reset(host);
  1881. +
  1882. + host->cmd = NULL;
  1883. + host->data = NULL;
  1884. +
  1885. + if (mrq->stop)
  1886. + dw_mci_command_complete(host, mrq->stop);
  1887. + else
  1888. + host->cmd_status = 0;
  1889. +
  1890. + dw_mci_request_end(host, mrq);
  1891. + goto unlock;
  1892. +
  1893. + case STATE_DATA_ERROR:
  1894. + if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
  1895. + &host->pending_events))
  1896. + break;
  1897. +
  1898. + state = STATE_DATA_BUSY;
  1899. + break;
  1900. + }
  1901. + } while (state != prev_state);
  1902. +
  1903. + host->state = state;
  1904. +unlock:
  1905. + spin_unlock(&host->lock);
  1906. +
  1907. +}
  1908. +
  1909. +/* push final bytes to part_buf, only use during push */
  1910. +static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
  1911. +{
  1912. + memcpy((void *)&host->part_buf, buf, cnt);
  1913. + host->part_buf_count = cnt;
  1914. +}
  1915. +
  1916. +/* append bytes to part_buf, only use during push */
  1917. +static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
  1918. +{
  1919. + cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
  1920. + memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
  1921. + host->part_buf_count += cnt;
  1922. + return cnt;
  1923. +}
  1924. +
  1925. +/* pull first bytes from part_buf, only use during pull */
  1926. +static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
  1927. +{
  1928. + cnt = min(cnt, (int)host->part_buf_count);
  1929. + if (cnt) {
  1930. + memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
  1931. + cnt);
  1932. + host->part_buf_count -= cnt;
  1933. + host->part_buf_start += cnt;
  1934. + }
  1935. + return cnt;
  1936. +}
  1937. +
  1938. +/* pull final bytes from the part_buf, assuming it's just been filled */
  1939. +static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
  1940. +{
  1941. + memcpy(buf, &host->part_buf, cnt);
  1942. + host->part_buf_start = cnt;
  1943. + host->part_buf_count = (1 << host->data_shift) - cnt;
  1944. +}
  1945. +
  1946. +static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
  1947. +{
  1948. + struct mmc_data *data = host->data;
  1949. + int init_cnt = cnt;
  1950. +
  1951. + /* try and push anything in the part_buf */
  1952. + if (unlikely(host->part_buf_count)) {
  1953. + int len = dw_mci_push_part_bytes(host, buf, cnt);
  1954. + buf += len;
  1955. + cnt -= len;
  1956. + if (host->part_buf_count == 2) {
  1957. + mci_writew(host, DATA(host->data_offset),
  1958. + host->part_buf16);
  1959. + host->part_buf_count = 0;
  1960. + }
  1961. + }
  1962. +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
  1963. + if (unlikely((unsigned long)buf & 0x1)) {
  1964. + while (cnt >= 2) {
  1965. + u16 aligned_buf[64];
  1966. + int len = min(cnt & -2, (int)sizeof(aligned_buf));
  1967. + int items = len >> 1;
  1968. + int i;
  1969. + /* memcpy from input buffer into aligned buffer */
  1970. + memcpy(aligned_buf, buf, len);
  1971. + buf += len;
  1972. + cnt -= len;
  1973. + /* push data from aligned buffer into fifo */
  1974. + for (i = 0; i < items; ++i)
  1975. + mci_writew(host, DATA(host->data_offset),
  1976. + aligned_buf[i]);
  1977. + }
  1978. + } else
  1979. +#endif
  1980. + {
  1981. + u16 *pdata = buf;
  1982. + for (; cnt >= 2; cnt -= 2)
  1983. + mci_writew(host, DATA(host->data_offset), *pdata++);
  1984. + buf = pdata;
  1985. + }
  1986. + /* put anything remaining in the part_buf */
  1987. + if (cnt) {
  1988. + dw_mci_set_part_bytes(host, buf, cnt);
  1989. + /* Push data if we have reached the expected data length */
  1990. + if ((data->bytes_xfered + init_cnt) ==
  1991. + (data->blksz * data->blocks))
  1992. + mci_writew(host, DATA(host->data_offset),
  1993. + host->part_buf16);
  1994. + }
  1995. +}
  1996. +
  1997. +static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
  1998. +{
  1999. +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
  2000. + if (unlikely((unsigned long)buf & 0x1)) {
  2001. + while (cnt >= 2) {
  2002. + /* pull data from fifo into aligned buffer */
  2003. + u16 aligned_buf[64];
  2004. + int len = min(cnt & -2, (int)sizeof(aligned_buf));
  2005. + int items = len >> 1;
  2006. + int i;
  2007. + for (i = 0; i < items; ++i)
  2008. + aligned_buf[i] = mci_readw(host,
  2009. + DATA(host->data_offset));
  2010. + /* memcpy from aligned buffer into output buffer */
  2011. + memcpy(buf, aligned_buf, len);
  2012. + buf += len;
  2013. + cnt -= len;
  2014. + }
  2015. + } else
  2016. +#endif
  2017. + {
  2018. + u16 *pdata = buf;
  2019. + for (; cnt >= 2; cnt -= 2)
  2020. + *pdata++ = mci_readw(host, DATA(host->data_offset));
  2021. + buf = pdata;
  2022. + }
  2023. + if (cnt) {
  2024. + host->part_buf16 = mci_readw(host, DATA(host->data_offset));
  2025. + dw_mci_pull_final_bytes(host, buf, cnt);
  2026. + }
  2027. +}
  2028. +
  2029. +static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
  2030. +{
  2031. + struct mmc_data *data = host->data;
  2032. + int init_cnt = cnt;
  2033. +
  2034. + /* try and push anything in the part_buf */
  2035. + if (unlikely(host->part_buf_count)) {
  2036. + int len = dw_mci_push_part_bytes(host, buf, cnt);
  2037. + buf += len;
  2038. + cnt -= len;
  2039. + if (host->part_buf_count == 4) {
  2040. + mci_writel(host, DATA(host->data_offset),
  2041. + host->part_buf32);
  2042. + host->part_buf_count = 0;
  2043. + }
  2044. + }
  2045. +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
  2046. + if (unlikely((unsigned long)buf & 0x3)) {
  2047. + while (cnt >= 4) {
  2048. + u32 aligned_buf[32];
  2049. + int len = min(cnt & -4, (int)sizeof(aligned_buf));
  2050. + int items = len >> 2;
  2051. + int i;
  2052. + /* memcpy from input buffer into aligned buffer */
  2053. + memcpy(aligned_buf, buf, len);
  2054. + buf += len;
  2055. + cnt -= len;
  2056. + /* push data from aligned buffer into fifo */
  2057. + for (i = 0; i < items; ++i)
  2058. + mci_writel(host, DATA(host->data_offset),
  2059. + aligned_buf[i]);
  2060. + }
  2061. + } else
  2062. +#endif
  2063. + {
  2064. + u32 *pdata = buf;
  2065. + for (; cnt >= 4; cnt -= 4)
  2066. + mci_writel(host, DATA(host->data_offset), *pdata++);
  2067. + buf = pdata;
  2068. + }
  2069. + /* put anything remaining in the part_buf */
  2070. + if (cnt) {
  2071. + dw_mci_set_part_bytes(host, buf, cnt);
  2072. + /* Push data if we have reached the expected data length */
  2073. + if ((data->bytes_xfered + init_cnt) ==
  2074. + (data->blksz * data->blocks))
  2075. + mci_writel(host, DATA(host->data_offset),
  2076. + host->part_buf32);
  2077. + }
  2078. +}
  2079. +
  2080. +static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
  2081. +{
  2082. +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
  2083. + if (unlikely((unsigned long)buf & 0x3)) {
  2084. + while (cnt >= 4) {
  2085. + /* pull data from fifo into aligned buffer */
  2086. + u32 aligned_buf[32];
  2087. + int len = min(cnt & -4, (int)sizeof(aligned_buf));
  2088. + int items = len >> 2;
  2089. + int i;
  2090. + for (i = 0; i < items; ++i)
  2091. + aligned_buf[i] = mci_readl(host,
  2092. + DATA(host->data_offset));
  2093. + /* memcpy from aligned buffer into output buffer */
  2094. + memcpy(buf, aligned_buf, len);
  2095. + buf += len;
  2096. + cnt -= len;
  2097. + }
  2098. + } else
  2099. +#endif
  2100. + {
  2101. + u32 *pdata = buf;
  2102. + for (; cnt >= 4; cnt -= 4)
  2103. + *pdata++ = mci_readl(host, DATA(host->data_offset));
  2104. + buf = pdata;
  2105. + }
  2106. + if (cnt) {
  2107. + host->part_buf32 = mci_readl(host, DATA(host->data_offset));
  2108. + dw_mci_pull_final_bytes(host, buf, cnt);
  2109. + }
  2110. +}
  2111. +
  2112. +static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
  2113. +{
  2114. + struct mmc_data *data = host->data;
  2115. + int init_cnt = cnt;
  2116. +
  2117. + /* try and push anything in the part_buf */
  2118. + if (unlikely(host->part_buf_count)) {
  2119. + int len = dw_mci_push_part_bytes(host, buf, cnt);
  2120. + buf += len;
  2121. + cnt -= len;
  2122. +
  2123. + if (host->part_buf_count == 8) {
  2124. + mci_writeq(host, DATA(host->data_offset),
  2125. + host->part_buf);
  2126. + host->part_buf_count = 0;
  2127. + }
  2128. + }
  2129. +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
  2130. + if (unlikely((unsigned long)buf & 0x7)) {
  2131. + while (cnt >= 8) {
  2132. + u64 aligned_buf[16];
  2133. + int len = min(cnt & -8, (int)sizeof(aligned_buf));
  2134. + int items = len >> 3;
  2135. + int i;
  2136. + /* memcpy from input buffer into aligned buffer */
  2137. + memcpy(aligned_buf, buf, len);
  2138. + buf += len;
  2139. + cnt -= len;
  2140. + /* push data from aligned buffer into fifo */
  2141. + for (i = 0; i < items; ++i)
  2142. + mci_writeq(host, DATA(host->data_offset),
  2143. + aligned_buf[i]);
  2144. + }
  2145. + } else
  2146. +#endif
  2147. + {
  2148. + u64 *pdata = buf;
  2149. + for (; cnt >= 8; cnt -= 8)
  2150. + mci_writeq(host, DATA(host->data_offset), *pdata++);
  2151. + buf = pdata;
  2152. + }
  2153. + /* put anything remaining in the part_buf */
  2154. + if (cnt) {
  2155. + dw_mci_set_part_bytes(host, buf, cnt);
  2156. + /* Push data if we have reached the expected data length */
  2157. + if ((data->bytes_xfered + init_cnt) ==
  2158. + (data->blksz * data->blocks))
  2159. + mci_writeq(host, DATA(host->data_offset),
  2160. + host->part_buf);
  2161. + }
  2162. +}
  2163. +
  2164. +static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
  2165. +{
  2166. +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
  2167. + if (unlikely((unsigned long)buf & 0x7)) {
  2168. + while (cnt >= 8) {
  2169. + /* pull data from fifo into aligned buffer */
  2170. + u64 aligned_buf[16];
  2171. + int len = min(cnt & -8, (int)sizeof(aligned_buf));
  2172. + int items = len >> 3;
  2173. + int i;
  2174. + for (i = 0; i < items; ++i)
  2175. + aligned_buf[i] = mci_readq(host,
  2176. + DATA(host->data_offset));
  2177. + /* memcpy from aligned buffer into output buffer */
  2178. + memcpy(buf, aligned_buf, len);
  2179. + buf += len;
  2180. + cnt -= len;
  2181. + }
  2182. + } else
  2183. +#endif
  2184. + {
  2185. + u64 *pdata = buf;
  2186. + for (; cnt >= 8; cnt -= 8)
  2187. + *pdata++ = mci_readq(host, DATA(host->data_offset));
  2188. + buf = pdata;
  2189. + }
  2190. + if (cnt) {
  2191. + host->part_buf = mci_readq(host, DATA(host->data_offset));
  2192. + dw_mci_pull_final_bytes(host, buf, cnt);
  2193. + }
  2194. +}
  2195. +
  2196. +static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
  2197. +{
  2198. + int len;
  2199. +
  2200. + /* get remaining partial bytes */
  2201. + len = dw_mci_pull_part_bytes(host, buf, cnt);
  2202. + if (unlikely(len == cnt))
  2203. + return;
  2204. + buf += len;
  2205. + cnt -= len;
  2206. +
  2207. + /* get the rest of the data */
  2208. + host->pull_data(host, buf, cnt);
  2209. +}
  2210. +
  2211. +static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
  2212. +{
  2213. + struct sg_mapping_iter *sg_miter = &host->sg_miter;
  2214. + void *buf;
  2215. + unsigned int offset;
  2216. + struct mmc_data *data = host->data;
  2217. + int shift = host->data_shift;
  2218. + u32 status;
  2219. + unsigned int len;
  2220. + unsigned int remain, fcnt;
  2221. +
  2222. + do {
  2223. + if (!sg_miter_next(sg_miter))
  2224. + goto done;
  2225. +
  2226. + host->sg = sg_miter->piter.sg;
  2227. + buf = sg_miter->addr;
  2228. + remain = sg_miter->length;
  2229. + offset = 0;
  2230. +
  2231. + do {
  2232. + fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
  2233. + << shift) + host->part_buf_count;
  2234. + len = min(remain, fcnt);
  2235. + if (!len)
  2236. + break;
  2237. + dw_mci_pull_data(host, (void *)(buf + offset), len);
  2238. + data->bytes_xfered += len;
  2239. + offset += len;
  2240. + remain -= len;
  2241. + } while (remain);
  2242. +
  2243. + sg_miter->consumed = offset;
  2244. + status = mci_readl(host, MINTSTS);
  2245. + mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
  2246. + /* if the RXDR is ready read again */
  2247. + } while ((status & SDMMC_INT_RXDR) ||
  2248. + (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
  2249. +
  2250. + if (!remain) {
  2251. + if (!sg_miter_next(sg_miter))
  2252. + goto done;
  2253. + sg_miter->consumed = 0;
  2254. + }
  2255. + sg_miter_stop(sg_miter);
  2256. + return;
  2257. +
  2258. +done:
  2259. + sg_miter_stop(sg_miter);
  2260. + host->sg = NULL;
  2261. + smp_wmb();
  2262. + set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
  2263. +}
  2264. +
  2265. +static void dw_mci_write_data_pio(struct dw_mci *host)
  2266. +{
  2267. + struct sg_mapping_iter *sg_miter = &host->sg_miter;
  2268. + void *buf;
  2269. + unsigned int offset;
  2270. + struct mmc_data *data = host->data;
  2271. + int shift = host->data_shift;
  2272. + u32 status;
  2273. + unsigned int len;
  2274. + unsigned int fifo_depth = host->fifo_depth;
  2275. + unsigned int remain, fcnt;
  2276. +
  2277. + do {
  2278. + if (!sg_miter_next(sg_miter))
  2279. + goto done;
  2280. +
  2281. + host->sg = sg_miter->piter.sg;
  2282. + buf = sg_miter->addr;
  2283. + remain = sg_miter->length;
  2284. + offset = 0;
  2285. +
  2286. + do {
  2287. + fcnt = ((fifo_depth -
  2288. + SDMMC_GET_FCNT(mci_readl(host, STATUS)))
  2289. + << shift) - host->part_buf_count;
  2290. + len = min(remain, fcnt);
  2291. + if (!len)
  2292. + break;
  2293. + host->push_data(host, (void *)(buf + offset), len);
  2294. + data->bytes_xfered += len;
  2295. + offset += len;
  2296. + remain -= len;
  2297. + } while (remain);
  2298. +
  2299. + sg_miter->consumed = offset;
  2300. + status = mci_readl(host, MINTSTS);
  2301. + mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
  2302. + } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
  2303. +
  2304. + if (!remain) {
  2305. + if (!sg_miter_next(sg_miter))
  2306. + goto done;
  2307. + sg_miter->consumed = 0;
  2308. + }
  2309. + sg_miter_stop(sg_miter);
  2310. + return;
  2311. +
  2312. +done:
  2313. + sg_miter_stop(sg_miter);
  2314. + host->sg = NULL;
  2315. + smp_wmb();
  2316. + set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
  2317. +}
  2318. +
  2319. +static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
  2320. +{
  2321. + if (!host->cmd_status)
  2322. + host->cmd_status = status;
  2323. +
  2324. + smp_wmb();
  2325. +
  2326. + set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
  2327. + tasklet_schedule(&host->tasklet);
  2328. +}
  2329. +
  2330. +static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
  2331. +{
  2332. + struct dw_mci *host = dev_id;
  2333. + u32 pending;
  2334. + int i;
  2335. +
  2336. + pending = mci_readl(host, MINTSTS); /* read-only mask reg */
  2337. +
  2338. + /*
  2339. + * DTO fix - version 2.10a and below, and only if internal DMA
  2340. + * is configured.
  2341. + */
  2342. + if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
  2343. + if (!pending &&
  2344. + ((mci_readl(host, STATUS) >> 17) & 0x1fff))
  2345. + pending |= SDMMC_INT_DATA_OVER;
  2346. + }
  2347. +
  2348. + if (pending) {
  2349. + /* Check volt switch first, since it can look like an error */
  2350. + if ((host->state == STATE_SENDING_CMD11) &&
  2351. + (pending & SDMMC_INT_VOLT_SWITCH)) {
  2352. + mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH);
  2353. + pending &= ~SDMMC_INT_VOLT_SWITCH;
  2354. + dw_mci_cmd_interrupt(host, pending);
  2355. + }
  2356. +
  2357. + if (pending & DW_MCI_CMD_ERROR_FLAGS) {
  2358. + mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
  2359. + host->cmd_status = pending;
  2360. + smp_wmb();
  2361. + set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
  2362. + }
  2363. +
  2364. + if (pending & DW_MCI_DATA_ERROR_FLAGS) {
  2365. + /* if there is an error report DATA_ERROR */
  2366. + mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
  2367. + host->data_status = pending;
  2368. + smp_wmb();
  2369. + set_bit(EVENT_DATA_ERROR, &host->pending_events);
  2370. + tasklet_schedule(&host->tasklet);
  2371. + }
  2372. +
  2373. + if (pending & SDMMC_INT_DATA_OVER) {
  2374. + mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
  2375. + if (!host->data_status)
  2376. + host->data_status = pending;
  2377. + smp_wmb();
  2378. + if (host->dir_status == DW_MCI_RECV_STATUS) {
  2379. + if (host->sg != NULL)
  2380. + dw_mci_read_data_pio(host, true);
  2381. + }
  2382. + set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
  2383. + tasklet_schedule(&host->tasklet);
  2384. + }
  2385. +
  2386. + if (pending & SDMMC_INT_RXDR) {
  2387. + mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
  2388. + if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
  2389. + dw_mci_read_data_pio(host, false);
  2390. + }
  2391. +
  2392. + if (pending & SDMMC_INT_TXDR) {
  2393. + mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
  2394. + if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
  2395. + dw_mci_write_data_pio(host);
  2396. + }
  2397. +
  2398. + if (pending & SDMMC_INT_CMD_DONE) {
  2399. + mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
  2400. + dw_mci_cmd_interrupt(host, pending);
  2401. + }
  2402. +
  2403. + if (pending & SDMMC_INT_CD) {
  2404. + mci_writel(host, RINTSTS, SDMMC_INT_CD);
  2405. + queue_work(host->card_workqueue, &host->card_work);
  2406. + }
  2407. +
  2408. + /* Handle SDIO Interrupts */
  2409. + for (i = 0; i < host->num_slots; i++) {
  2410. + struct dw_mci_slot *slot = host->slot[i];
  2411. + if (pending & SDMMC_INT_SDIO(i)) {
  2412. + mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
  2413. + mmc_signal_sdio_irq(slot->mmc);
  2414. + }
  2415. + }
  2416. +
  2417. + }
  2418. +
  2419. +#ifdef CONFIG_MMC_DW_IDMAC
  2420. + /* Handle DMA interrupts */
  2421. + pending = mci_readl(host, IDSTS);
  2422. + if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
  2423. + mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
  2424. + mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
  2425. + host->dma_ops->complete(host);
  2426. + }
  2427. +#endif
  2428. +
  2429. + return IRQ_HANDLED;
  2430. +}
  2431. +
  2432. +static void dw_mci_work_routine_card(struct work_struct *work)
  2433. +{
  2434. + struct dw_mci *host = container_of(work, struct dw_mci, card_work);
  2435. + int i;
  2436. +
  2437. + for (i = 0; i < host->num_slots; i++) {
  2438. + struct dw_mci_slot *slot = host->slot[i];
  2439. + struct mmc_host *mmc = slot->mmc;
  2440. + struct mmc_request *mrq;
  2441. + int present;
  2442. +
  2443. + present = dw_mci_get_cd(mmc);
  2444. + while (present != slot->last_detect_state) {
  2445. + dev_dbg(&slot->mmc->class_dev, "card %s\n",
  2446. + present ? "inserted" : "removed");
  2447. +
  2448. + spin_lock_bh(&host->lock);
  2449. +
  2450. + /* Card change detected */
  2451. + slot->last_detect_state = present;
  2452. +
  2453. + /* Clean up queue if present */
  2454. + mrq = slot->mrq;
  2455. + if (mrq) {
  2456. + if (mrq == host->mrq) {
  2457. + host->data = NULL;
  2458. + host->cmd = NULL;
  2459. +
  2460. + switch (host->state) {
  2461. + case STATE_IDLE:
  2462. + case STATE_WAITING_CMD11_DONE:
  2463. + break;
  2464. + case STATE_SENDING_CMD11:
  2465. + case STATE_SENDING_CMD:
  2466. + mrq->cmd->error = -ENOMEDIUM;
  2467. + if (!mrq->data)
  2468. + break;
  2469. + /* fall through */
  2470. + case STATE_SENDING_DATA:
  2471. + mrq->data->error = -ENOMEDIUM;
  2472. + dw_mci_stop_dma(host);
  2473. + break;
  2474. + case STATE_DATA_BUSY:
  2475. + case STATE_DATA_ERROR:
  2476. + if (mrq->data->error == -EINPROGRESS)
  2477. + mrq->data->error = -ENOMEDIUM;
  2478. + /* fall through */
  2479. + case STATE_SENDING_STOP:
  2480. + if (mrq->stop)
  2481. + mrq->stop->error = -ENOMEDIUM;
  2482. + break;
  2483. + }
  2484. +
  2485. + dw_mci_request_end(host, mrq);
  2486. + } else {
  2487. + list_del(&slot->queue_node);
  2488. + mrq->cmd->error = -ENOMEDIUM;
  2489. + if (mrq->data)
  2490. + mrq->data->error = -ENOMEDIUM;
  2491. + if (mrq->stop)
  2492. + mrq->stop->error = -ENOMEDIUM;
  2493. +
  2494. + spin_unlock(&host->lock);
  2495. + mmc_request_done(slot->mmc, mrq);
  2496. + spin_lock(&host->lock);
  2497. + }
  2498. + }
  2499. +
  2500. + /* Power down slot */
  2501. + if (present == 0)
  2502. + dw_mci_reset(host);
  2503. +
  2504. + spin_unlock_bh(&host->lock);
  2505. +
  2506. + present = dw_mci_get_cd(mmc);
  2507. + }
  2508. +
  2509. + mmc_detect_change(slot->mmc,
  2510. + msecs_to_jiffies(host->pdata->detect_delay_ms));
  2511. + }
  2512. +}
  2513. +
  2514. +#ifdef CONFIG_OF
  2515. +/* given a slot id, find out the device node representing that slot */
  2516. +static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
  2517. +{
  2518. + struct device_node *np;
  2519. + const __be32 *addr;
  2520. + int len;
  2521. +
  2522. + if (!dev || !dev->of_node)
  2523. + return NULL;
  2524. +
  2525. + for_each_child_of_node(dev->of_node, np) {
  2526. + addr = of_get_property(np, "reg", &len);
  2527. + if (!addr || (len < sizeof(int)))
  2528. + continue;
  2529. + if (be32_to_cpup(addr) == slot)
  2530. + return np;
  2531. + }
  2532. + return NULL;
  2533. +}
  2534. +
  2535. +static struct dw_mci_of_slot_quirks {
  2536. + char *quirk;
  2537. + int id;
  2538. +} of_slot_quirks[] = {
  2539. + {
  2540. + .quirk = "disable-wp",
  2541. + .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
  2542. + },
  2543. +};
  2544. +
  2545. +static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
  2546. +{
  2547. + struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
  2548. + int quirks = 0;
  2549. + int idx;
  2550. +
  2551. + /* get quirks */
  2552. + for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
  2553. + if (of_get_property(np, of_slot_quirks[idx].quirk, NULL)) {
  2554. + dev_warn(dev, "Slot quirk %s is deprecated\n",
  2555. + of_slot_quirks[idx].quirk);
  2556. + quirks |= of_slot_quirks[idx].id;
  2557. + }
  2558. +
  2559. + return quirks;
  2560. +}
  2561. +#else /* CONFIG_OF */
  2562. +static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
  2563. +{
  2564. + return 0;
  2565. +}
  2566. +#endif /* CONFIG_OF */
  2567. +
  2568. +static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
  2569. +{
  2570. + struct mmc_host *mmc;
  2571. + struct dw_mci_slot *slot;
  2572. + const struct dw_mci_drv_data *drv_data = host->drv_data;
  2573. + int ctrl_id, ret;
  2574. + u32 freq[2];
  2575. +
  2576. + mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
  2577. + if (!mmc)
  2578. + return -ENOMEM;
  2579. +
  2580. + slot = mmc_priv(mmc);
  2581. + slot->id = id;
  2582. + slot->mmc = mmc;
  2583. + slot->host = host;
  2584. + host->slot[id] = slot;
  2585. +
  2586. + slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
  2587. +
  2588. + mmc->ops = &dw_mci_ops;
  2589. + if (of_property_read_u32_array(host->dev->of_node,
  2590. + "clock-freq-min-max", freq, 2)) {
  2591. + mmc->f_min = DW_MCI_FREQ_MIN;
  2592. + mmc->f_max = DW_MCI_FREQ_MAX;
  2593. + } else {
  2594. + mmc->f_min = freq[0];
  2595. + mmc->f_max = freq[1];
  2596. + }
  2597. +
  2598. + /*if there are external regulators, get them*/
  2599. + ret = mmc_regulator_get_supply(mmc);
  2600. + if (ret == -EPROBE_DEFER)
  2601. + goto err_host_allocated;
  2602. +
  2603. + if (!mmc->ocr_avail)
  2604. + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
  2605. +
  2606. + if (host->pdata->caps)
  2607. + mmc->caps = host->pdata->caps;
  2608. +
  2609. + if (host->pdata->pm_caps)
  2610. + mmc->pm_caps = host->pdata->pm_caps;
  2611. +
  2612. + if (host->dev->of_node) {
  2613. + ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
  2614. + if (ctrl_id < 0)
  2615. + ctrl_id = 0;
  2616. + } else {
  2617. + ctrl_id = to_platform_device(host->dev)->id;
  2618. + }
  2619. + if (drv_data && drv_data->caps)
  2620. + mmc->caps |= drv_data->caps[ctrl_id];
  2621. +
  2622. + if (host->pdata->caps2)
  2623. + mmc->caps2 = host->pdata->caps2;
  2624. +
  2625. + ret = mmc_of_parse(mmc);
  2626. + if (ret)
  2627. + goto err_host_allocated;
  2628. +
  2629. + if (host->pdata->blk_settings) {
  2630. + mmc->max_segs = host->pdata->blk_settings->max_segs;
  2631. + mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
  2632. + mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
  2633. + mmc->max_req_size = host->pdata->blk_settings->max_req_size;
  2634. + mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
  2635. + } else {
  2636. + /* Useful defaults if platform data is unset. */
  2637. +#ifdef CONFIG_MMC_DW_IDMAC
  2638. + mmc->max_segs = host->ring_size;
  2639. + mmc->max_blk_size = 65536;
  2640. + mmc->max_blk_count = host->ring_size;
  2641. + mmc->max_seg_size = 0x1000;
  2642. + mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
  2643. +#else
  2644. + mmc->max_segs = 64;
  2645. + mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
  2646. + mmc->max_blk_count = 512;
  2647. + mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
  2648. + mmc->max_seg_size = mmc->max_req_size;
  2649. +#endif /* CONFIG_MMC_DW_IDMAC */
  2650. + }
  2651. +
  2652. + if (dw_mci_get_cd(mmc))
  2653. + set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
  2654. + else
  2655. + clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
  2656. +
  2657. + ret = mmc_add_host(mmc);
  2658. + if (ret)
  2659. + goto err_host_allocated;
  2660. +
  2661. +#if defined(CONFIG_DEBUG_FS)
  2662. + dw_mci_init_debugfs(slot);
  2663. +#endif
  2664. +
  2665. + /* Card initially undetected */
  2666. + slot->last_detect_state = 0;
  2667. +
  2668. + return 0;
  2669. +
  2670. +err_host_allocated:
  2671. + mmc_free_host(mmc);
  2672. + return ret;
  2673. +}
  2674. +
  2675. +static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
  2676. +{
  2677. + /* Debugfs stuff is cleaned up by mmc core */
  2678. + mmc_remove_host(slot->mmc);
  2679. + slot->host->slot[id] = NULL;
  2680. + mmc_free_host(slot->mmc);
  2681. +}
  2682. +
  2683. +static void dw_mci_init_dma(struct dw_mci *host)
  2684. +{
  2685. + /* Alloc memory for sg translation */
  2686. + host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
  2687. + &host->sg_dma, GFP_KERNEL);
  2688. + if (!host->sg_cpu) {
  2689. + dev_err(host->dev, "%s: could not alloc DMA memory\n",
  2690. + __func__);
  2691. + goto no_dma;
  2692. + }
  2693. +
  2694. + /* Determine which DMA interface to use */
  2695. +#ifdef CONFIG_MMC_DW_IDMAC
  2696. + host->dma_ops = &dw_mci_idmac_ops;
  2697. + dev_info(host->dev, "Using internal DMA controller.\n");
  2698. +#endif
  2699. +
  2700. + if (!host->dma_ops)
  2701. + goto no_dma;
  2702. +
  2703. + if (host->dma_ops->init && host->dma_ops->start &&
  2704. + host->dma_ops->stop && host->dma_ops->cleanup) {
  2705. + if (host->dma_ops->init(host)) {
  2706. + dev_err(host->dev, "%s: Unable to initialize "
  2707. + "DMA Controller.\n", __func__);
  2708. + goto no_dma;
  2709. + }
  2710. + } else {
  2711. + dev_err(host->dev, "DMA initialization not found.\n");
  2712. + goto no_dma;
  2713. + }
  2714. +
  2715. + host->use_dma = 1;
  2716. + return;
  2717. +
  2718. +no_dma:
  2719. + dev_info(host->dev, "Using PIO mode.\n");
  2720. + host->use_dma = 0;
  2721. + return;
  2722. +}
  2723. +
  2724. +static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
  2725. +{
  2726. + unsigned long timeout = jiffies + msecs_to_jiffies(500);
  2727. + u32 ctrl;
  2728. +
  2729. + ctrl = mci_readl(host, CTRL);
  2730. + ctrl |= reset;
  2731. + mci_writel(host, CTRL, ctrl);
  2732. +
  2733. + /* wait till resets clear */
  2734. + do {
  2735. + ctrl = mci_readl(host, CTRL);
  2736. + if (!(ctrl & reset))
  2737. + return true;
  2738. + } while (time_before(jiffies, timeout));
  2739. +
  2740. + dev_err(host->dev,
  2741. + "Timeout resetting block (ctrl reset %#x)\n",
  2742. + ctrl & reset);
  2743. +
  2744. + return false;
  2745. +}
  2746. +
  2747. +static bool dw_mci_reset(struct dw_mci *host)
  2748. +{
  2749. + u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET;
  2750. + bool ret = false;
  2751. +
  2752. + /*
  2753. + * Reseting generates a block interrupt, hence setting
  2754. + * the scatter-gather pointer to NULL.
  2755. + */
  2756. + if (host->sg) {
  2757. + sg_miter_stop(&host->sg_miter);
  2758. + host->sg = NULL;
  2759. + }
  2760. +
  2761. + if (host->use_dma)
  2762. + flags |= SDMMC_CTRL_DMA_RESET;
  2763. +
  2764. + if (dw_mci_ctrl_reset(host, flags)) {
  2765. + /*
  2766. + * In all cases we clear the RAWINTS register to clear any
  2767. + * interrupts.
  2768. + */
  2769. + mci_writel(host, RINTSTS, 0xFFFFFFFF);
  2770. +
  2771. + /* if using dma we wait for dma_req to clear */
  2772. + if (host->use_dma) {
  2773. + unsigned long timeout = jiffies + msecs_to_jiffies(500);
  2774. + u32 status;
  2775. + do {
  2776. + status = mci_readl(host, STATUS);
  2777. + if (!(status & SDMMC_STATUS_DMA_REQ))
  2778. + break;
  2779. + cpu_relax();
  2780. + } while (time_before(jiffies, timeout));
  2781. +
  2782. + if (status & SDMMC_STATUS_DMA_REQ) {
  2783. + dev_err(host->dev,
  2784. + "%s: Timeout waiting for dma_req to "
  2785. + "clear during reset\n", __func__);
  2786. + goto ciu_out;
  2787. + }
  2788. +
  2789. + /* when using DMA next we reset the fifo again */
  2790. + if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET))
  2791. + goto ciu_out;
  2792. + }
  2793. + } else {
  2794. + /* if the controller reset bit did clear, then set clock regs */
  2795. + if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
  2796. + dev_err(host->dev, "%s: fifo/dma reset bits didn't "
  2797. + "clear but ciu was reset, doing clock update\n",
  2798. + __func__);
  2799. + goto ciu_out;
  2800. + }
  2801. + }
  2802. +
  2803. +#if IS_ENABLED(CONFIG_MMC_DW_IDMAC)
  2804. + /* It is also recommended that we reset and reprogram idmac */
  2805. + dw_mci_idmac_reset(host);
  2806. +#endif
  2807. +
  2808. + ret = true;
  2809. +
  2810. +ciu_out:
  2811. + /* After a CTRL reset we need to have CIU set clock registers */
  2812. + mci_send_cmd(host->cur_slot, SDMMC_CMD_UPD_CLK, 0);
  2813. +
  2814. + return ret;
  2815. +}
  2816. +
  2817. +#ifdef CONFIG_OF
  2818. +static struct dw_mci_of_quirks {
  2819. + char *quirk;
  2820. + int id;
  2821. +} of_quirks[] = {
  2822. + {
  2823. + .quirk = "broken-cd",
  2824. + .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
  2825. + }, {
  2826. + .quirk = "disable-wp",
  2827. + .id = DW_MCI_QUIRK_NO_WRITE_PROTECT,
  2828. + },
  2829. +};
  2830. +
  2831. +static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
  2832. +{
  2833. + struct dw_mci_board *pdata;
  2834. + struct device *dev = host->dev;
  2835. + struct device_node *np = dev->of_node;
  2836. + const struct dw_mci_drv_data *drv_data = host->drv_data;
  2837. + int idx, ret;
  2838. + u32 clock_frequency;
  2839. +
  2840. + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
  2841. + if (!pdata) {
  2842. + dev_err(dev, "could not allocate memory for pdata\n");
  2843. + return ERR_PTR(-ENOMEM);
  2844. + }
  2845. +
  2846. + /* find out number of slots supported */
  2847. + if (of_property_read_u32(dev->of_node, "num-slots",
  2848. + &pdata->num_slots)) {
  2849. + dev_info(dev, "num-slots property not found, "
  2850. + "assuming 1 slot is available\n");
  2851. + pdata->num_slots = 1;
  2852. + }
  2853. +
  2854. + /* get quirks */
  2855. + for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
  2856. + if (of_get_property(np, of_quirks[idx].quirk, NULL))
  2857. + pdata->quirks |= of_quirks[idx].id;
  2858. +
  2859. + if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
  2860. + dev_info(dev, "fifo-depth property not found, using "
  2861. + "value of FIFOTH register as default\n");
  2862. +
  2863. + of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
  2864. +
  2865. + if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
  2866. + pdata->bus_hz = clock_frequency;
  2867. +
  2868. + if (drv_data && drv_data->parse_dt) {
  2869. + ret = drv_data->parse_dt(host);
  2870. + if (ret)
  2871. + return ERR_PTR(ret);
  2872. + }
  2873. +
  2874. + if (of_find_property(np, "supports-highspeed", NULL))
  2875. + pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
  2876. +
  2877. + return pdata;
  2878. +}
  2879. +
  2880. +#else /* CONFIG_OF */
  2881. +static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
  2882. +{
  2883. + return ERR_PTR(-EINVAL);
  2884. +}
  2885. +#endif /* CONFIG_OF */
  2886. +
  2887. +int dw_mci_probe(struct dw_mci *host)
  2888. +{
  2889. + const struct dw_mci_drv_data *drv_data = host->drv_data;
  2890. + int width, i, ret = 0;
  2891. + u32 fifo_size;
  2892. + int init_slots = 0;
  2893. +
  2894. + if (!host->pdata) {
  2895. + host->pdata = dw_mci_parse_dt(host);
  2896. + if (IS_ERR(host->pdata)) {
  2897. + dev_err(host->dev, "platform data not available\n");
  2898. + return -EINVAL;
  2899. + }
  2900. + }
  2901. +
  2902. + if (host->pdata->num_slots > 1) {
  2903. + dev_err(host->dev,
  2904. + "Platform data must supply num_slots.\n");
  2905. + return -ENODEV;
  2906. + }
  2907. +
  2908. + host->biu_clk = devm_clk_get(host->dev, "biu");
  2909. + if (IS_ERR(host->biu_clk)) {
  2910. + dev_dbg(host->dev, "biu clock not available\n");
  2911. + } else {
  2912. + ret = clk_prepare_enable(host->biu_clk);
  2913. + if (ret) {
  2914. + dev_err(host->dev, "failed to enable biu clock\n");
  2915. + return ret;
  2916. + }
  2917. + }
  2918. +
  2919. + host->ciu_clk = devm_clk_get(host->dev, "ciu");
  2920. + if (IS_ERR(host->ciu_clk)) {
  2921. + dev_dbg(host->dev, "ciu clock not available\n");
  2922. + host->bus_hz = host->pdata->bus_hz;
  2923. + } else {
  2924. + ret = clk_prepare_enable(host->ciu_clk);
  2925. + if (ret) {
  2926. + dev_err(host->dev, "failed to enable ciu clock\n");
  2927. + goto err_clk_biu;
  2928. + }
  2929. +
  2930. + if (host->pdata->bus_hz) {
  2931. + ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
  2932. + if (ret)
  2933. + dev_warn(host->dev,
  2934. + "Unable to set bus rate to %uHz\n",
  2935. + host->pdata->bus_hz);
  2936. + }
  2937. + host->bus_hz = clk_get_rate(host->ciu_clk);
  2938. + }
  2939. +
  2940. + if (!host->bus_hz) {
  2941. + dev_err(host->dev,
  2942. + "Platform data must supply bus speed\n");
  2943. + ret = -ENODEV;
  2944. + goto err_clk_ciu;
  2945. + }
  2946. +
  2947. + if (drv_data && drv_data->init) {
  2948. + ret = drv_data->init(host);
  2949. + if (ret) {
  2950. + dev_err(host->dev,
  2951. + "implementation specific init failed\n");
  2952. + goto err_clk_ciu;
  2953. + }
  2954. + }
  2955. +
  2956. + if (drv_data && drv_data->setup_clock) {
  2957. + ret = drv_data->setup_clock(host);
  2958. + if (ret) {
  2959. + dev_err(host->dev,
  2960. + "implementation specific clock setup failed\n");
  2961. + goto err_clk_ciu;
  2962. + }
  2963. + }
  2964. +
  2965. + host->quirks = host->pdata->quirks;
  2966. +
  2967. + spin_lock_init(&host->lock);
  2968. + INIT_LIST_HEAD(&host->queue);
  2969. +
  2970. + /*
  2971. + * Get the host data width - this assumes that HCON has been set with
  2972. + * the correct values.
  2973. + */
  2974. + i = (mci_readl(host, HCON) >> 7) & 0x7;
  2975. + if (!i) {
  2976. + host->push_data = dw_mci_push_data16;
  2977. + host->pull_data = dw_mci_pull_data16;
  2978. + width = 16;
  2979. + host->data_shift = 1;
  2980. + } else if (i == 2) {
  2981. + host->push_data = dw_mci_push_data64;
  2982. + host->pull_data = dw_mci_pull_data64;
  2983. + width = 64;
  2984. + host->data_shift = 3;
  2985. + } else {
  2986. + /* Check for a reserved value, and warn if it is */
  2987. + WARN((i != 1),
  2988. + "HCON reports a reserved host data width!\n"
  2989. + "Defaulting to 32-bit access.\n");
  2990. + host->push_data = dw_mci_push_data32;
  2991. + host->pull_data = dw_mci_pull_data32;
  2992. + width = 32;
  2993. + host->data_shift = 2;
  2994. + }
  2995. +
  2996. + /* Reset all blocks */
  2997. + if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS))
  2998. + return -ENODEV;
  2999. +
  3000. + host->dma_ops = host->pdata->dma_ops;
  3001. + dw_mci_init_dma(host);
  3002. +
  3003. + /* Clear the interrupts for the host controller */
  3004. + mci_writel(host, RINTSTS, 0xFFFFFFFF);
  3005. + mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
  3006. +
  3007. + /* Put in max timeout */
  3008. + mci_writel(host, TMOUT, 0xFFFFFFFF);
  3009. +
  3010. + /*
  3011. + * FIFO threshold settings RxMark = fifo_size / 2 - 1,
  3012. + * Tx Mark = fifo_size / 2 DMA Size = 8
  3013. + */
  3014. + if (!host->pdata->fifo_depth) {
  3015. + /*
  3016. + * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
  3017. + * have been overwritten by the bootloader, just like we're
  3018. + * about to do, so if you know the value for your hardware, you
  3019. + * should put it in the platform data.
  3020. + */
  3021. + fifo_size = mci_readl(host, FIFOTH);
  3022. + fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
  3023. + } else {
  3024. + fifo_size = host->pdata->fifo_depth;
  3025. + }
  3026. + host->fifo_depth = fifo_size;
  3027. + host->fifoth_val =
  3028. + SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
  3029. + mci_writel(host, FIFOTH, host->fifoth_val);
  3030. +
  3031. + /* disable clock to CIU */
  3032. + mci_writel(host, CLKENA, 0);
  3033. + mci_writel(host, CLKSRC, 0);
  3034. +
  3035. + /*
  3036. + * In 2.40a spec, Data offset is changed.
  3037. + * Need to check the version-id and set data-offset for DATA register.
  3038. + */
  3039. + host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
  3040. + dev_info(host->dev, "Version ID is %04x\n", host->verid);
  3041. +
  3042. + if (host->verid < DW_MMC_240A)
  3043. + host->data_offset = DATA_OFFSET;
  3044. + else
  3045. + host->data_offset = DATA_240A_OFFSET;
  3046. +
  3047. + tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
  3048. + host->card_workqueue = alloc_workqueue("dw-mci-card",
  3049. + WQ_MEM_RECLAIM, 1);
  3050. + if (!host->card_workqueue) {
  3051. + ret = -ENOMEM;
  3052. + goto err_dmaunmap;
  3053. + }
  3054. + INIT_WORK(&host->card_work, dw_mci_work_routine_card);
  3055. + ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
  3056. + host->irq_flags, "dw-mci", host);
  3057. + if (ret)
  3058. + goto err_workqueue;
  3059. +
  3060. + if (host->pdata->num_slots)
  3061. + host->num_slots = host->pdata->num_slots;
  3062. + else
  3063. + host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
  3064. +
  3065. + /*
  3066. + * Enable interrupts for command done, data over, data empty, card det,
  3067. + * receive ready and error such as transmit, receive timeout, crc error
  3068. + */
  3069. + mci_writel(host, RINTSTS, 0xFFFFFFFF);
  3070. + mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
  3071. + SDMMC_INT_TXDR | SDMMC_INT_RXDR |
  3072. + DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
  3073. + mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
  3074. +
  3075. + dev_info(host->dev, "DW MMC controller at irq %d, "
  3076. + "%d bit host data width, "
  3077. + "%u deep fifo\n",
  3078. + host->irq, width, fifo_size);
  3079. +
  3080. + /* We need at least one slot to succeed */
  3081. + for (i = 0; i < host->num_slots; i++) {
  3082. + ret = dw_mci_init_slot(host, i);
  3083. + if (ret)
  3084. + dev_dbg(host->dev, "slot %d init failed\n", i);
  3085. + else
  3086. + init_slots++;
  3087. + }
  3088. +
  3089. + if (init_slots) {
  3090. + dev_info(host->dev, "%d slots initialized\n", init_slots);
  3091. + } else {
  3092. + dev_dbg(host->dev, "attempted to initialize %d slots, "
  3093. + "but failed on all\n", host->num_slots);
  3094. + goto err_workqueue;
  3095. + }
  3096. +
  3097. + if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
  3098. + dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
  3099. +
  3100. + return 0;
  3101. +
  3102. +err_workqueue:
  3103. + destroy_workqueue(host->card_workqueue);
  3104. +
  3105. +err_dmaunmap:
  3106. + if (host->use_dma && host->dma_ops->exit)
  3107. + host->dma_ops->exit(host);
  3108. +
  3109. +err_clk_ciu:
  3110. + if (!IS_ERR(host->ciu_clk))
  3111. + clk_disable_unprepare(host->ciu_clk);
  3112. +
  3113. +err_clk_biu:
  3114. + if (!IS_ERR(host->biu_clk))
  3115. + clk_disable_unprepare(host->biu_clk);
  3116. +
  3117. + return ret;
  3118. +}
  3119. +EXPORT_SYMBOL(dw_mci_probe);
  3120. +
  3121. +void dw_mci_remove(struct dw_mci *host)
  3122. +{
  3123. + int i;
  3124. +
  3125. + mci_writel(host, RINTSTS, 0xFFFFFFFF);
  3126. + mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
  3127. +
  3128. + for (i = 0; i < host->num_slots; i++) {
  3129. + dev_dbg(host->dev, "remove slot %d\n", i);
  3130. + if (host->slot[i])
  3131. + dw_mci_cleanup_slot(host->slot[i], i);
  3132. + }
  3133. +
  3134. + /* disable clock to CIU */
  3135. + mci_writel(host, CLKENA, 0);
  3136. + mci_writel(host, CLKSRC, 0);
  3137. +
  3138. + destroy_workqueue(host->card_workqueue);
  3139. +
  3140. + if (host->use_dma && host->dma_ops->exit)
  3141. + host->dma_ops->exit(host);
  3142. +
  3143. + if (!IS_ERR(host->ciu_clk))
  3144. + clk_disable_unprepare(host->ciu_clk);
  3145. +
  3146. + if (!IS_ERR(host->biu_clk))
  3147. + clk_disable_unprepare(host->biu_clk);
  3148. +}
  3149. +EXPORT_SYMBOL(dw_mci_remove);
  3150. +
  3151. +
  3152. +
  3153. +#ifdef CONFIG_PM_SLEEP
  3154. +/*
  3155. + * TODO: we should probably disable the clock to the card in the suspend path.
  3156. + */
  3157. +int dw_mci_suspend(struct dw_mci *host)
  3158. +{
  3159. + return 0;
  3160. +}
  3161. +EXPORT_SYMBOL(dw_mci_suspend);
  3162. +
  3163. +int dw_mci_resume(struct dw_mci *host)
  3164. +{
  3165. + int i, ret;
  3166. +
  3167. + if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
  3168. + ret = -ENODEV;
  3169. + return ret;
  3170. + }
  3171. +
  3172. + if (host->use_dma && host->dma_ops->init)
  3173. + host->dma_ops->init(host);
  3174. +
  3175. + /*
  3176. + * Restore the initial value at FIFOTH register
  3177. + * And Invalidate the prev_blksz with zero
  3178. + */
  3179. + mci_writel(host, FIFOTH, host->fifoth_val);
  3180. + host->prev_blksz = 0;
  3181. +
  3182. + /* Put in max timeout */
  3183. + mci_writel(host, TMOUT, 0xFFFFFFFF);
  3184. +
  3185. + mci_writel(host, RINTSTS, 0xFFFFFFFF);
  3186. + mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
  3187. + SDMMC_INT_TXDR | SDMMC_INT_RXDR |
  3188. + DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
  3189. + mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
  3190. +
  3191. + for (i = 0; i < host->num_slots; i++) {
  3192. + struct dw_mci_slot *slot = host->slot[i];
  3193. + if (!slot)
  3194. + continue;
  3195. + if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
  3196. + dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
  3197. + dw_mci_setup_bus(slot, true);
  3198. + }
  3199. + }
  3200. + return 0;
  3201. +}
  3202. +EXPORT_SYMBOL(dw_mci_resume);
  3203. +#endif /* CONFIG_PM_SLEEP */
  3204. +
  3205. +static int __init dw_mci_init(void)
  3206. +{
  3207. + pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
  3208. + return 0;
  3209. +}
  3210. +
  3211. +static void __exit dw_mci_exit(void)
  3212. +{
  3213. +}
  3214. +
  3215. +module_init(dw_mci_init);
  3216. +module_exit(dw_mci_exit);
  3217. +
  3218. +MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
  3219. +MODULE_AUTHOR("NXP Semiconductor VietNam");
  3220. +MODULE_AUTHOR("Imagination Technologies Ltd");
  3221. +MODULE_LICENSE("GPL v2");
  3222. diff -Nur linux-3.18.8.orig/include/linux/mmc/host.h linux-3.18.8/include/linux/mmc/host.h
  3223. --- linux-3.18.8.orig/include/linux/mmc/host.h 2015-02-27 02:49:36.000000000 +0100
  3224. +++ linux-3.18.8/include/linux/mmc/host.h 2015-03-02 03:25:33.000000000 +0100
  3225. @@ -305,6 +305,11 @@
  3226. unsigned long clkgate_delay;
  3227. #endif
  3228. + /* card specific properties to deal with power and reset */
  3229. + struct regulator *card_regulator; /* External VCC needed by the card */
  3230. + struct gpio_desc *card_reset_gpios[2]; /* External resets, active low */
  3231. + struct clk *card_clk; /* External clock needed by the card */
  3232. +
  3233. /* host specific block data */
  3234. unsigned int max_seg_size; /* see blk_queue_max_segment_size */
  3235. unsigned short max_segs; /* see blk_queue_max_segments */