Du kan inte välja fler än 25 ämnen Ämnen måste starta med en bokstav eller siffra, kan innehålla bindestreck ('-') och vara max 35 tecken långa.

xhci.c 83KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192
  1. /*
  2. * Copyright (C) 2014 Michael Brown <mbrown@fensystems.co.uk>.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License as
  6. * published by the Free Software Foundation; either version 2 of the
  7. * License, or (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  17. * 02110-1301, USA.
  18. *
  19. * You can also choose to distribute this program under the terms of
  20. * the Unmodified Binary Distribution Licence (as given in the file
  21. * COPYING.UBDL), provided that you have satisfied its requirements.
  22. */
  23. FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
  24. #include <stdlib.h>
  25. #include <stdio.h>
  26. #include <unistd.h>
  27. #include <string.h>
  28. #include <strings.h>
  29. #include <errno.h>
  30. #include <byteswap.h>
  31. #include <ipxe/malloc.h>
  32. #include <ipxe/umalloc.h>
  33. #include <ipxe/pci.h>
  34. #include <ipxe/usb.h>
  35. #include <ipxe/profile.h>
  36. #include "xhci.h"
  37. /** @file
  38. *
  39. * USB eXtensible Host Controller Interface (xHCI) driver
  40. *
  41. */
  42. /** Message transfer profiler */
  43. static struct profiler xhci_message_profiler __profiler =
  44. { .name = "xhci.message" };
  45. /** Stream transfer profiler */
  46. static struct profiler xhci_stream_profiler __profiler =
  47. { .name = "xhci.stream" };
  48. /** Event ring profiler */
  49. static struct profiler xhci_event_profiler __profiler =
  50. { .name = "xhci.event" };
  51. /** Transfer event profiler */
  52. static struct profiler xhci_transfer_profiler __profiler =
  53. { .name = "xhci.transfer" };
  54. /* Disambiguate the various error causes */
  55. #define EIO_DATA \
  56. __einfo_error ( EINFO_EIO_DATA )
  57. #define EINFO_EIO_DATA \
  58. __einfo_uniqify ( EINFO_EIO, ( 2 - 0 ), \
  59. "Data buffer error" )
  60. #define EIO_BABBLE \
  61. __einfo_error ( EINFO_EIO_BABBLE )
  62. #define EINFO_EIO_BABBLE \
  63. __einfo_uniqify ( EINFO_EIO, ( 3 - 0 ), \
  64. "Babble detected" )
  65. #define EIO_USB \
  66. __einfo_error ( EINFO_EIO_USB )
  67. #define EINFO_EIO_USB \
  68. __einfo_uniqify ( EINFO_EIO, ( 4 - 0 ), \
  69. "USB transaction error" )
  70. #define EIO_TRB \
  71. __einfo_error ( EINFO_EIO_TRB )
  72. #define EINFO_EIO_TRB \
  73. __einfo_uniqify ( EINFO_EIO, ( 5 - 0 ), \
  74. "TRB error" )
  75. #define EIO_STALL \
  76. __einfo_error ( EINFO_EIO_STALL )
  77. #define EINFO_EIO_STALL \
  78. __einfo_uniqify ( EINFO_EIO, ( 6 - 0 ), \
  79. "Stall error" )
  80. #define EIO_RESOURCE \
  81. __einfo_error ( EINFO_EIO_RESOURCE )
  82. #define EINFO_EIO_RESOURCE \
  83. __einfo_uniqify ( EINFO_EIO, ( 7 - 0 ), \
  84. "Resource error" )
  85. #define EIO_BANDWIDTH \
  86. __einfo_error ( EINFO_EIO_BANDWIDTH )
  87. #define EINFO_EIO_BANDWIDTH \
  88. __einfo_uniqify ( EINFO_EIO, ( 8 - 0 ), \
  89. "Bandwidth error" )
  90. #define EIO_NO_SLOTS \
  91. __einfo_error ( EINFO_EIO_NO_SLOTS )
  92. #define EINFO_EIO_NO_SLOTS \
  93. __einfo_uniqify ( EINFO_EIO, ( 9 - 0 ), \
  94. "No slots available" )
  95. #define EIO_STREAM_TYPE \
  96. __einfo_error ( EINFO_EIO_STREAM_TYPE )
  97. #define EINFO_EIO_STREAM_TYPE \
  98. __einfo_uniqify ( EINFO_EIO, ( 10 - 0 ), \
  99. "Invalid stream type" )
  100. #define EIO_SLOT \
  101. __einfo_error ( EINFO_EIO_SLOT )
  102. #define EINFO_EIO_SLOT \
  103. __einfo_uniqify ( EINFO_EIO, ( 11 - 0 ), \
  104. "Slot not enabled" )
  105. #define EIO_ENDPOINT \
  106. __einfo_error ( EINFO_EIO_ENDPOINT )
  107. #define EINFO_EIO_ENDPOINT \
  108. __einfo_uniqify ( EINFO_EIO, ( 12 - 0 ), \
  109. "Endpoint not enabled" )
  110. #define EIO_SHORT \
  111. __einfo_error ( EINFO_EIO_SHORT )
  112. #define EINFO_EIO_SHORT \
  113. __einfo_uniqify ( EINFO_EIO, ( 13 - 0 ), \
  114. "Short packet" )
  115. #define EIO_UNDERRUN \
  116. __einfo_error ( EINFO_EIO_UNDERRUN )
  117. #define EINFO_EIO_UNDERRUN \
  118. __einfo_uniqify ( EINFO_EIO, ( 14 - 0 ), \
  119. "Ring underrun" )
  120. #define EIO_OVERRUN \
  121. __einfo_error ( EINFO_EIO_OVERRUN )
  122. #define EINFO_EIO_OVERRUN \
  123. __einfo_uniqify ( EINFO_EIO, ( 15 - 0 ), \
  124. "Ring overrun" )
  125. #define EIO_VF_RING_FULL \
  126. __einfo_error ( EINFO_EIO_VF_RING_FULL )
  127. #define EINFO_EIO_VF_RING_FULL \
  128. __einfo_uniqify ( EINFO_EIO, ( 16 - 0 ), \
  129. "Virtual function event ring full" )
  130. #define EIO_PARAMETER \
  131. __einfo_error ( EINFO_EIO_PARAMETER )
  132. #define EINFO_EIO_PARAMETER \
  133. __einfo_uniqify ( EINFO_EIO, ( 17 - 0 ), \
  134. "Parameter error" )
  135. #define EIO_BANDWIDTH_OVERRUN \
  136. __einfo_error ( EINFO_EIO_BANDWIDTH_OVERRUN )
  137. #define EINFO_EIO_BANDWIDTH_OVERRUN \
  138. __einfo_uniqify ( EINFO_EIO, ( 18 - 0 ), \
  139. "Bandwidth overrun" )
  140. #define EIO_CONTEXT \
  141. __einfo_error ( EINFO_EIO_CONTEXT )
  142. #define EINFO_EIO_CONTEXT \
  143. __einfo_uniqify ( EINFO_EIO, ( 19 - 0 ), \
  144. "Context state error" )
  145. #define EIO_NO_PING \
  146. __einfo_error ( EINFO_EIO_NO_PING )
  147. #define EINFO_EIO_NO_PING \
  148. __einfo_uniqify ( EINFO_EIO, ( 20 - 0 ), \
  149. "No ping response" )
  150. #define EIO_RING_FULL \
  151. __einfo_error ( EINFO_EIO_RING_FULL )
  152. #define EINFO_EIO_RING_FULL \
  153. __einfo_uniqify ( EINFO_EIO, ( 21 - 0 ), \
  154. "Event ring full" )
  155. #define EIO_INCOMPATIBLE \
  156. __einfo_error ( EINFO_EIO_INCOMPATIBLE )
  157. #define EINFO_EIO_INCOMPATIBLE \
  158. __einfo_uniqify ( EINFO_EIO, ( 22 - 0 ), \
  159. "Incompatible device" )
  160. #define EIO_MISSED \
  161. __einfo_error ( EINFO_EIO_MISSED )
  162. #define EINFO_EIO_MISSED \
  163. __einfo_uniqify ( EINFO_EIO, ( 23 - 0 ), \
  164. "Missed service error" )
  165. #define EIO_CMD_STOPPED \
  166. __einfo_error ( EINFO_EIO_CMD_STOPPED )
  167. #define EINFO_EIO_CMD_STOPPED \
  168. __einfo_uniqify ( EINFO_EIO, ( 24 - 0 ), \
  169. "Command ring stopped" )
  170. #define EIO_CMD_ABORTED \
  171. __einfo_error ( EINFO_EIO_CMD_ABORTED )
  172. #define EINFO_EIO_CMD_ABORTED \
  173. __einfo_uniqify ( EINFO_EIO, ( 25 - 0 ), \
  174. "Command aborted" )
  175. #define EIO_STOP \
  176. __einfo_error ( EINFO_EIO_STOP )
  177. #define EINFO_EIO_STOP \
  178. __einfo_uniqify ( EINFO_EIO, ( 26 - 0 ), \
  179. "Stopped" )
  180. #define EIO_STOP_LEN \
  181. __einfo_error ( EINFO_EIO_STOP_LEN )
  182. #define EINFO_EIO_STOP_LEN \
  183. __einfo_uniqify ( EINFO_EIO, ( 27 - 0 ), \
  184. "Stopped - length invalid" )
  185. #define EIO_STOP_SHORT \
  186. __einfo_error ( EINFO_EIO_STOP_SHORT )
  187. #define EINFO_EIO_STOP_SHORT \
  188. __einfo_uniqify ( EINFO_EIO, ( 28 - 0 ), \
  189. "Stopped - short packet" )
  190. #define EIO_LATENCY \
  191. __einfo_error ( EINFO_EIO_LATENCY )
  192. #define EINFO_EIO_LATENCY \
  193. __einfo_uniqify ( EINFO_EIO, ( 29 - 0 ), \
  194. "Maximum exit latency too large" )
  195. #define EIO_ISOCH \
  196. __einfo_error ( EINFO_EIO_ISOCH )
  197. #define EINFO_EIO_ISOCH \
  198. __einfo_uniqify ( EINFO_EIO, ( 31 - 0 ), \
  199. "Isochronous buffer overrun" )
  200. #define EPROTO_LOST \
  201. __einfo_error ( EINFO_EPROTO_LOST )
  202. #define EINFO_EPROTO_LOST \
  203. __einfo_uniqify ( EINFO_EPROTO, ( 32 - 32 ), \
  204. "Event lost" )
  205. #define EPROTO_UNDEFINED \
  206. __einfo_error ( EINFO_EPROTO_UNDEFINED )
  207. #define EINFO_EPROTO_UNDEFINED \
  208. __einfo_uniqify ( EINFO_EPROTO, ( 33 - 32 ), \
  209. "Undefined error" )
  210. #define EPROTO_STREAM_ID \
  211. __einfo_error ( EINFO_EPROTO_STREAM_ID )
  212. #define EINFO_EPROTO_STREAM_ID \
  213. __einfo_uniqify ( EINFO_EPROTO, ( 34 - 32 ), \
  214. "Invalid stream ID" )
  215. #define EPROTO_SECONDARY \
  216. __einfo_error ( EINFO_EPROTO_SECONDARY )
  217. #define EINFO_EPROTO_SECONDARY \
  218. __einfo_uniqify ( EINFO_EPROTO, ( 35 - 32 ), \
  219. "Secondary bandwidth error" )
  220. #define EPROTO_SPLIT \
  221. __einfo_error ( EINFO_EPROTO_SPLIT )
  222. #define EINFO_EPROTO_SPLIT \
  223. __einfo_uniqify ( EINFO_EPROTO, ( 36 - 32 ), \
  224. "Split transaction error" )
  225. #define ECODE(code) \
  226. ( ( (code) < 32 ) ? \
  227. EUNIQ ( EINFO_EIO, ( (code) & 31 ), EIO_DATA, EIO_BABBLE, \
  228. EIO_USB, EIO_TRB, EIO_STALL, EIO_RESOURCE, \
  229. EIO_BANDWIDTH, EIO_NO_SLOTS, EIO_STREAM_TYPE, \
  230. EIO_SLOT, EIO_ENDPOINT, EIO_SHORT, EIO_UNDERRUN, \
  231. EIO_OVERRUN, EIO_VF_RING_FULL, EIO_PARAMETER, \
  232. EIO_BANDWIDTH_OVERRUN, EIO_CONTEXT, EIO_NO_PING, \
  233. EIO_RING_FULL, EIO_INCOMPATIBLE, EIO_MISSED, \
  234. EIO_CMD_STOPPED, EIO_CMD_ABORTED, EIO_STOP, \
  235. EIO_STOP_LEN, EIO_STOP_SHORT, EIO_LATENCY, \
  236. EIO_ISOCH ) : \
  237. ( (code) < 64 ) ? \
  238. EUNIQ ( EINFO_EPROTO, ( (code) & 31 ), EPROTO_LOST, \
  239. EPROTO_UNDEFINED, EPROTO_STREAM_ID, \
  240. EPROTO_SECONDARY, EPROTO_SPLIT ) : \
  241. EFAULT )
  242. /******************************************************************************
  243. *
  244. * Register access
  245. *
  246. ******************************************************************************
  247. */
  248. /**
  249. * Initialise device
  250. *
  251. * @v xhci xHCI device
  252. * @v regs MMIO registers
  253. */
  254. static void xhci_init ( struct xhci_device *xhci, void *regs ) {
  255. uint32_t hcsparams1;
  256. uint32_t hcsparams2;
  257. uint32_t hccparams1;
  258. uint32_t pagesize;
  259. size_t caplength;
  260. size_t rtsoff;
  261. size_t dboff;
  262. /* Locate capability, operational, runtime, and doorbell registers */
  263. xhci->cap = regs;
  264. caplength = readb ( xhci->cap + XHCI_CAP_CAPLENGTH );
  265. rtsoff = readl ( xhci->cap + XHCI_CAP_RTSOFF );
  266. dboff = readl ( xhci->cap + XHCI_CAP_DBOFF );
  267. xhci->op = ( xhci->cap + caplength );
  268. xhci->run = ( xhci->cap + rtsoff );
  269. xhci->db = ( xhci->cap + dboff );
  270. DBGC2 ( xhci, "XHCI %p cap %08lx op %08lx run %08lx db %08lx\n",
  271. xhci, virt_to_phys ( xhci->cap ), virt_to_phys ( xhci->op ),
  272. virt_to_phys ( xhci->run ), virt_to_phys ( xhci->db ) );
  273. /* Read structural parameters 1 */
  274. hcsparams1 = readl ( xhci->cap + XHCI_CAP_HCSPARAMS1 );
  275. xhci->slots = XHCI_HCSPARAMS1_SLOTS ( hcsparams1 );
  276. xhci->intrs = XHCI_HCSPARAMS1_INTRS ( hcsparams1 );
  277. xhci->ports = XHCI_HCSPARAMS1_PORTS ( hcsparams1 );
  278. DBGC ( xhci, "XHCI %p has %d slots %d intrs %d ports\n",
  279. xhci, xhci->slots, xhci->intrs, xhci->ports );
  280. /* Read structural parameters 2 */
  281. hcsparams2 = readl ( xhci->cap + XHCI_CAP_HCSPARAMS2 );
  282. xhci->scratchpads = XHCI_HCSPARAMS2_SCRATCHPADS ( hcsparams2 );
  283. DBGC2 ( xhci, "XHCI %p needs %d scratchpads\n",
  284. xhci, xhci->scratchpads );
  285. /* Read capability parameters 1 */
  286. hccparams1 = readl ( xhci->cap + XHCI_CAP_HCCPARAMS1 );
  287. xhci->addr64 = XHCI_HCCPARAMS1_ADDR64 ( hccparams1 );
  288. xhci->csz_shift = XHCI_HCCPARAMS1_CSZ_SHIFT ( hccparams1 );
  289. xhci->xecp = XHCI_HCCPARAMS1_XECP ( hccparams1 );
  290. /* Read page size */
  291. pagesize = readl ( xhci->op + XHCI_OP_PAGESIZE );
  292. xhci->pagesize = XHCI_PAGESIZE ( pagesize );
  293. assert ( xhci->pagesize != 0 );
  294. assert ( ( ( xhci->pagesize ) & ( xhci->pagesize - 1 ) ) == 0 );
  295. DBGC2 ( xhci, "XHCI %p page size %zd bytes\n",
  296. xhci, xhci->pagesize );
  297. }
  298. /**
  299. * Find extended capability
  300. *
  301. * @v xhci xHCI device
  302. * @v id Capability ID
  303. * @v offset Offset to previous extended capability instance, or zero
  304. * @ret offset Offset to extended capability, or zero if not found
  305. */
  306. static unsigned int xhci_extended_capability ( struct xhci_device *xhci,
  307. unsigned int id,
  308. unsigned int offset ) {
  309. uint32_t xecp;
  310. unsigned int next;
  311. /* Locate the extended capability */
  312. while ( 1 ) {
  313. /* Locate first or next capability as applicable */
  314. if ( offset ) {
  315. xecp = readl ( xhci->cap + offset );
  316. next = XHCI_XECP_NEXT ( xecp );
  317. } else {
  318. next = xhci->xecp;
  319. }
  320. if ( ! next )
  321. return 0;
  322. offset += next;
  323. /* Check if this is the requested capability */
  324. xecp = readl ( xhci->cap + offset );
  325. if ( XHCI_XECP_ID ( xecp ) == id )
  326. return offset;
  327. }
  328. }
  329. /**
  330. * Write potentially 64-bit register
  331. *
  332. * @v xhci xHCI device
  333. * @v value Value
  334. * @v reg Register address
  335. * @ret rc Return status code
  336. */
  337. static inline __attribute__ (( always_inline )) int
  338. xhci_writeq ( struct xhci_device *xhci, physaddr_t value, void *reg ) {
  339. /* If this is a 32-bit build, then this can never fail
  340. * (allowing the compiler to optimise out the error path).
  341. */
  342. if ( sizeof ( value ) <= sizeof ( uint32_t ) ) {
  343. writel ( value, reg );
  344. writel ( 0, ( reg + sizeof ( uint32_t ) ) );
  345. return 0;
  346. }
  347. /* If the device does not support 64-bit addresses and this
  348. * address is outside the 32-bit address space, then fail.
  349. */
  350. if ( ( value & ~0xffffffffULL ) && ! xhci->addr64 ) {
  351. DBGC ( xhci, "XHCI %p cannot access address %lx\n",
  352. xhci, value );
  353. return -ENOTSUP;
  354. }
  355. /* If this is a 64-bit build, then writeq() is available */
  356. writeq ( value, reg );
  357. return 0;
  358. }
  359. /**
  360. * Calculate buffer alignment
  361. *
  362. * @v len Length
  363. * @ret align Buffer alignment
  364. *
  365. * Determine alignment required for a buffer which must be aligned to
  366. * at least XHCI_MIN_ALIGN and which must not cross a page boundary.
  367. */
  368. static inline size_t xhci_align ( size_t len ) {
  369. size_t align;
  370. /* Align to own length (rounded up to a power of two) */
  371. align = ( 1 << fls ( len - 1 ) );
  372. /* Round up to XHCI_MIN_ALIGN if needed */
  373. if ( align < XHCI_MIN_ALIGN )
  374. align = XHCI_MIN_ALIGN;
  375. return align;
  376. }
  377. /**
  378. * Calculate device context offset
  379. *
  380. * @v xhci xHCI device
  381. * @v ctx Context index
  382. */
  383. static inline size_t xhci_device_context_offset ( struct xhci_device *xhci,
  384. unsigned int ctx ) {
  385. return ( XHCI_DCI ( ctx ) << xhci->csz_shift );
  386. }
  387. /**
  388. * Calculate input context offset
  389. *
  390. * @v xhci xHCI device
  391. * @v ctx Context index
  392. */
  393. static inline size_t xhci_input_context_offset ( struct xhci_device *xhci,
  394. unsigned int ctx ) {
  395. return ( XHCI_ICI ( ctx ) << xhci->csz_shift );
  396. }
  397. /******************************************************************************
  398. *
  399. * Diagnostics
  400. *
  401. ******************************************************************************
  402. */
  403. /**
  404. * Dump host controller registers
  405. *
  406. * @v xhci xHCI device
  407. */
  408. static inline void xhci_dump ( struct xhci_device *xhci ) {
  409. uint32_t usbcmd;
  410. uint32_t usbsts;
  411. uint32_t pagesize;
  412. uint32_t dnctrl;
  413. uint32_t config;
  414. /* Do nothing unless debugging is enabled */
  415. if ( ! DBG_LOG )
  416. return;
  417. /* Dump USBCMD */
  418. usbcmd = readl ( xhci->op + XHCI_OP_USBCMD );
  419. DBGC ( xhci, "XHCI %p USBCMD %08x%s%s\n", xhci, usbcmd,
  420. ( ( usbcmd & XHCI_USBCMD_RUN ) ? " run" : "" ),
  421. ( ( usbcmd & XHCI_USBCMD_HCRST ) ? " hcrst" : "" ) );
  422. /* Dump USBSTS */
  423. usbsts = readl ( xhci->op + XHCI_OP_USBSTS );
  424. DBGC ( xhci, "XHCI %p USBSTS %08x%s\n", xhci, usbsts,
  425. ( ( usbsts & XHCI_USBSTS_HCH ) ? " hch" : "" ) );
  426. /* Dump PAGESIZE */
  427. pagesize = readl ( xhci->op + XHCI_OP_PAGESIZE );
  428. DBGC ( xhci, "XHCI %p PAGESIZE %08x\n", xhci, pagesize );
  429. /* Dump DNCTRL */
  430. dnctrl = readl ( xhci->op + XHCI_OP_DNCTRL );
  431. DBGC ( xhci, "XHCI %p DNCTRL %08x\n", xhci, dnctrl );
  432. /* Dump CONFIG */
  433. config = readl ( xhci->op + XHCI_OP_CONFIG );
  434. DBGC ( xhci, "XHCI %p CONFIG %08x\n", xhci, config );
  435. }
  436. /**
  437. * Dump port registers
  438. *
  439. * @v xhci xHCI device
  440. * @v port Port number
  441. */
  442. static inline void xhci_dump_port ( struct xhci_device *xhci,
  443. unsigned int port ) {
  444. uint32_t portsc;
  445. uint32_t portpmsc;
  446. uint32_t portli;
  447. uint32_t porthlpmc;
  448. /* Do nothing unless debugging is enabled */
  449. if ( ! DBG_LOG )
  450. return;
  451. /* Dump PORTSC */
  452. portsc = readl ( xhci->op + XHCI_OP_PORTSC ( port ) );
  453. DBGC ( xhci, "XHCI %p port %d PORTSC %08x%s%s%s%s psiv=%d\n",
  454. xhci, port, portsc,
  455. ( ( portsc & XHCI_PORTSC_CCS ) ? " ccs" : "" ),
  456. ( ( portsc & XHCI_PORTSC_PED ) ? " ped" : "" ),
  457. ( ( portsc & XHCI_PORTSC_PR ) ? " pr" : "" ),
  458. ( ( portsc & XHCI_PORTSC_PP ) ? " pp" : "" ),
  459. XHCI_PORTSC_PSIV ( portsc ) );
  460. /* Dump PORTPMSC */
  461. portpmsc = readl ( xhci->op + XHCI_OP_PORTPMSC ( port ) );
  462. DBGC ( xhci, "XHCI %p port %d PORTPMSC %08x\n", xhci, port, portpmsc );
  463. /* Dump PORTLI */
  464. portli = readl ( xhci->op + XHCI_OP_PORTLI ( port ) );
  465. DBGC ( xhci, "XHCI %p port %d PORTLI %08x\n", xhci, port, portli );
  466. /* Dump PORTHLPMC */
  467. porthlpmc = readl ( xhci->op + XHCI_OP_PORTHLPMC ( port ) );
  468. DBGC ( xhci, "XHCI %p port %d PORTHLPMC %08x\n",
  469. xhci, port, porthlpmc );
  470. }
  471. /******************************************************************************
  472. *
  473. * USB legacy support
  474. *
  475. ******************************************************************************
  476. */
  477. /**
  478. * Initialise USB legacy support
  479. *
  480. * @v xhci xHCI device
  481. */
  482. static void xhci_legacy_init ( struct xhci_device *xhci ) {
  483. unsigned int legacy;
  484. uint8_t bios;
  485. /* Locate USB legacy support capability (if present) */
  486. legacy = xhci_extended_capability ( xhci, XHCI_XECP_ID_LEGACY, 0 );
  487. if ( ! legacy ) {
  488. /* Not an error; capability may not be present */
  489. DBGC ( xhci, "XHCI %p has no USB legacy support capability\n",
  490. xhci );
  491. return;
  492. }
  493. /* Check if legacy USB support is enabled */
  494. bios = readb ( xhci->cap + legacy + XHCI_USBLEGSUP_BIOS );
  495. if ( ! ( bios & XHCI_USBLEGSUP_BIOS_OWNED ) ) {
  496. /* Not an error; already owned by OS */
  497. DBGC ( xhci, "XHCI %p USB legacy support already disabled\n",
  498. xhci );
  499. return;
  500. }
  501. /* Record presence of USB legacy support capability */
  502. xhci->legacy = legacy;
  503. }
  504. /**
  505. * Claim ownership from BIOS
  506. *
  507. * @v xhci xHCI device
  508. */
  509. static void xhci_legacy_claim ( struct xhci_device *xhci ) {
  510. uint32_t ctlsts;
  511. uint8_t bios;
  512. unsigned int i;
  513. /* Do nothing unless legacy support capability is present */
  514. if ( ! xhci->legacy )
  515. return;
  516. /* Claim ownership */
  517. writeb ( XHCI_USBLEGSUP_OS_OWNED,
  518. xhci->cap + xhci->legacy + XHCI_USBLEGSUP_OS );
  519. /* Wait for BIOS to release ownership */
  520. for ( i = 0 ; i < XHCI_USBLEGSUP_MAX_WAIT_MS ; i++ ) {
  521. /* Check if BIOS has released ownership */
  522. bios = readb ( xhci->cap + xhci->legacy + XHCI_USBLEGSUP_BIOS );
  523. if ( ! ( bios & XHCI_USBLEGSUP_BIOS_OWNED ) ) {
  524. DBGC ( xhci, "XHCI %p claimed ownership from BIOS\n",
  525. xhci );
  526. ctlsts = readl ( xhci->cap + xhci->legacy +
  527. XHCI_USBLEGSUP_CTLSTS );
  528. if ( ctlsts ) {
  529. DBGC ( xhci, "XHCI %p warning: BIOS retained "
  530. "SMIs: %08x\n", xhci, ctlsts );
  531. }
  532. return;
  533. }
  534. /* Delay */
  535. mdelay ( 1 );
  536. }
  537. /* BIOS did not release ownership. Claim it forcibly by
  538. * disabling all SMIs.
  539. */
  540. DBGC ( xhci, "XHCI %p could not claim ownership from BIOS: forcibly "
  541. "disabling SMIs\n", xhci );
  542. writel ( 0, xhci->cap + xhci->legacy + XHCI_USBLEGSUP_CTLSTS );
  543. }
  544. /**
  545. * Release ownership back to BIOS
  546. *
  547. * @v xhci xHCI device
  548. */
  549. static void xhci_legacy_release ( struct xhci_device *xhci ) {
  550. /* Do nothing unless legacy support capability is present */
  551. if ( ! xhci->legacy )
  552. return;
  553. /* Release ownership */
  554. writeb ( 0, xhci->cap + xhci->legacy + XHCI_USBLEGSUP_OS );
  555. DBGC ( xhci, "XHCI %p released ownership to BIOS\n", xhci );
  556. }
  557. /******************************************************************************
  558. *
  559. * Supported protocols
  560. *
  561. ******************************************************************************
  562. */
  563. /**
  564. * Transcribe port speed (for debugging)
  565. *
  566. * @v psi Protocol speed ID
  567. * @ret speed Transcribed speed
  568. */
  569. static inline const char * xhci_speed_name ( uint32_t psi ) {
  570. static const char *exponents[4] = { "", "k", "M", "G" };
  571. static char buf[ 10 /* "xxxxxXbps" + NUL */ ];
  572. unsigned int mantissa;
  573. unsigned int exponent;
  574. /* Extract mantissa and exponent */
  575. mantissa = XHCI_SUPPORTED_PSI_MANTISSA ( psi );
  576. exponent = XHCI_SUPPORTED_PSI_EXPONENT ( psi );
  577. /* Transcribe speed */
  578. snprintf ( buf, sizeof ( buf ), "%d%sbps",
  579. mantissa, exponents[exponent] );
  580. return buf;
  581. }
  582. /**
  583. * Find supported protocol extended capability for a port
  584. *
  585. * @v xhci xHCI device
  586. * @v port Port number
  587. * @ret supported Offset to extended capability, or zero if not found
  588. */
  589. static unsigned int xhci_supported_protocol ( struct xhci_device *xhci,
  590. unsigned int port ) {
  591. unsigned int supported = 0;
  592. unsigned int offset;
  593. unsigned int count;
  594. uint32_t ports;
  595. /* Iterate over all supported protocol structures */
  596. while ( ( supported = xhci_extended_capability ( xhci,
  597. XHCI_XECP_ID_SUPPORTED,
  598. supported ) ) ) {
  599. /* Determine port range */
  600. ports = readl ( xhci->cap + supported + XHCI_SUPPORTED_PORTS );
  601. offset = XHCI_SUPPORTED_PORTS_OFFSET ( ports );
  602. count = XHCI_SUPPORTED_PORTS_COUNT ( ports );
  603. /* Check if port lies within this range */
  604. if ( ( port - offset ) < count )
  605. return supported;
  606. }
  607. DBGC ( xhci, "XHCI %p port %d has no supported protocol\n",
  608. xhci, port );
  609. return 0;
  610. }
  611. /**
  612. * Find port protocol
  613. *
  614. * @v xhci xHCI device
  615. * @v port Port number
  616. * @ret protocol USB protocol, or zero if not found
  617. */
  618. static unsigned int xhci_port_protocol ( struct xhci_device *xhci,
  619. unsigned int port ) {
  620. unsigned int supported = xhci_supported_protocol ( xhci, port );
  621. union {
  622. uint32_t raw;
  623. char text[5];
  624. } name;
  625. unsigned int protocol;
  626. unsigned int type;
  627. unsigned int psic;
  628. unsigned int psiv;
  629. unsigned int i;
  630. uint32_t revision;
  631. uint32_t ports;
  632. uint32_t slot;
  633. uint32_t psi;
  634. /* Fail if there is no supported protocol */
  635. if ( ! supported )
  636. return 0;
  637. /* Determine protocol version */
  638. revision = readl ( xhci->cap + supported + XHCI_SUPPORTED_REVISION );
  639. protocol = XHCI_SUPPORTED_REVISION_VER ( revision );
  640. /* Describe port protocol */
  641. if ( DBG_EXTRA ) {
  642. name.raw = cpu_to_le32 ( readl ( xhci->cap + supported +
  643. XHCI_SUPPORTED_NAME ) );
  644. name.text[4] = '\0';
  645. slot = readl ( xhci->cap + supported + XHCI_SUPPORTED_SLOT );
  646. type = XHCI_SUPPORTED_SLOT_TYPE ( slot );
  647. DBGC2 ( xhci, "XHCI %p port %d %sv%04x type %d",
  648. xhci, port, name.text, protocol, type );
  649. ports = readl ( xhci->cap + supported + XHCI_SUPPORTED_PORTS );
  650. psic = XHCI_SUPPORTED_PORTS_PSIC ( ports );
  651. if ( psic ) {
  652. DBGC2 ( xhci, " speeds" );
  653. for ( i = 0 ; i < psic ; i++ ) {
  654. psi = readl ( xhci->cap + supported +
  655. XHCI_SUPPORTED_PSI ( i ) );
  656. psiv = XHCI_SUPPORTED_PSI_VALUE ( psi );
  657. DBGC2 ( xhci, " %d:%s", psiv,
  658. xhci_speed_name ( psi ) );
  659. }
  660. }
  661. DBGC2 ( xhci, "\n" );
  662. }
  663. return protocol;
  664. }
  665. /**
  666. * Find port slot type
  667. *
  668. * @v xhci xHCI device
  669. * @v port Port number
  670. * @ret type Slot type, or negative error
  671. */
  672. static int xhci_port_slot_type ( struct xhci_device *xhci, unsigned int port ) {
  673. unsigned int supported = xhci_supported_protocol ( xhci, port );
  674. unsigned int type;
  675. uint32_t slot;
  676. /* Fail if there is no supported protocol */
  677. if ( ! supported )
  678. return -ENOTSUP;
  679. /* Get slot type */
  680. slot = readl ( xhci->cap + supported + XHCI_SUPPORTED_SLOT );
  681. type = XHCI_SUPPORTED_SLOT_TYPE ( slot );
  682. return type;
  683. }
  684. /**
  685. * Find port speed
  686. *
  687. * @v xhci xHCI device
  688. * @v port Port number
  689. * @v psiv Protocol speed ID value
  690. * @ret speed Port speed, or negative error
  691. */
  692. static int xhci_port_speed ( struct xhci_device *xhci, unsigned int port,
  693. unsigned int psiv ) {
  694. unsigned int supported = xhci_supported_protocol ( xhci, port );
  695. unsigned int psic;
  696. unsigned int mantissa;
  697. unsigned int exponent;
  698. unsigned int speed;
  699. unsigned int i;
  700. uint32_t ports;
  701. uint32_t psi;
  702. /* Fail if there is no supported protocol */
  703. if ( ! supported )
  704. return -ENOTSUP;
  705. /* Get protocol speed ID count */
  706. ports = readl ( xhci->cap + supported + XHCI_SUPPORTED_PORTS );
  707. psic = XHCI_SUPPORTED_PORTS_PSIC ( ports );
  708. /* Use the default mappings if applicable */
  709. if ( ! psic ) {
  710. switch ( psiv ) {
  711. case XHCI_SPEED_LOW : return USB_SPEED_LOW;
  712. case XHCI_SPEED_FULL : return USB_SPEED_FULL;
  713. case XHCI_SPEED_HIGH : return USB_SPEED_HIGH;
  714. case XHCI_SPEED_SUPER : return USB_SPEED_SUPER;
  715. default:
  716. DBGC ( xhci, "XHCI %p port %d non-standard PSI value "
  717. "%d\n", xhci, port, psiv );
  718. return -ENOTSUP;
  719. }
  720. }
  721. /* Iterate over PSI dwords looking for a match */
  722. for ( i = 0 ; i < psic ; i++ ) {
  723. psi = readl ( xhci->cap + supported + XHCI_SUPPORTED_PSI ( i ));
  724. if ( psiv == XHCI_SUPPORTED_PSI_VALUE ( psi ) ) {
  725. mantissa = XHCI_SUPPORTED_PSI_MANTISSA ( psi );
  726. exponent = XHCI_SUPPORTED_PSI_EXPONENT ( psi );
  727. speed = USB_SPEED ( mantissa, exponent );
  728. return speed;
  729. }
  730. }
  731. DBGC ( xhci, "XHCI %p port %d spurious PSI value %d\n",
  732. xhci, port, psiv );
  733. return -ENOENT;
  734. }
  735. /**
  736. * Find protocol speed ID value
  737. *
  738. * @v xhci xHCI device
  739. * @v port Port number
  740. * @v speed USB speed
  741. * @ret psiv Protocol speed ID value, or negative error
  742. */
  743. static int xhci_port_psiv ( struct xhci_device *xhci, unsigned int port,
  744. unsigned int speed ) {
  745. unsigned int supported = xhci_supported_protocol ( xhci, port );
  746. unsigned int psic;
  747. unsigned int mantissa;
  748. unsigned int exponent;
  749. unsigned int psiv;
  750. unsigned int i;
  751. uint32_t ports;
  752. uint32_t psi;
  753. /* Fail if there is no supported protocol */
  754. if ( ! supported )
  755. return -ENOTSUP;
  756. /* Get protocol speed ID count */
  757. ports = readl ( xhci->cap + supported + XHCI_SUPPORTED_PORTS );
  758. psic = XHCI_SUPPORTED_PORTS_PSIC ( ports );
  759. /* Use the default mappings if applicable */
  760. if ( ! psic ) {
  761. switch ( speed ) {
  762. case USB_SPEED_LOW : return XHCI_SPEED_LOW;
  763. case USB_SPEED_FULL : return XHCI_SPEED_FULL;
  764. case USB_SPEED_HIGH : return XHCI_SPEED_HIGH;
  765. case USB_SPEED_SUPER : return XHCI_SPEED_SUPER;
  766. default:
  767. DBGC ( xhci, "XHCI %p port %d non-standad speed %d\n",
  768. xhci, port, speed );
  769. return -ENOTSUP;
  770. }
  771. }
  772. /* Iterate over PSI dwords looking for a match */
  773. for ( i = 0 ; i < psic ; i++ ) {
  774. psi = readl ( xhci->cap + supported + XHCI_SUPPORTED_PSI ( i ));
  775. mantissa = XHCI_SUPPORTED_PSI_MANTISSA ( psi );
  776. exponent = XHCI_SUPPORTED_PSI_EXPONENT ( psi );
  777. if ( speed == USB_SPEED ( mantissa, exponent ) ) {
  778. psiv = XHCI_SUPPORTED_PSI_VALUE ( psi );
  779. return psiv;
  780. }
  781. }
  782. DBGC ( xhci, "XHCI %p port %d unrepresentable speed %#x\n",
  783. xhci, port, speed );
  784. return -ENOENT;
  785. }
  786. /******************************************************************************
  787. *
  788. * Device context base address array
  789. *
  790. ******************************************************************************
  791. */
  792. /**
  793. * Allocate device context base address array
  794. *
  795. * @v xhci xHCI device
  796. * @ret rc Return status code
  797. */
  798. static int xhci_dcbaa_alloc ( struct xhci_device *xhci ) {
  799. size_t len;
  800. physaddr_t dcbaap;
  801. int rc;
  802. /* Allocate and initialise structure. Must be at least
  803. * 64-byte aligned and must not cross a page boundary, so
  804. * align on its own size (rounded up to a power of two and
  805. * with a minimum of 64 bytes).
  806. */
  807. len = ( ( xhci->slots + 1 ) * sizeof ( xhci->dcbaa[0] ) );
  808. xhci->dcbaa = malloc_dma ( len, xhci_align ( len ) );
  809. if ( ! xhci->dcbaa ) {
  810. DBGC ( xhci, "XHCI %p could not allocate DCBAA\n", xhci );
  811. rc = -ENOMEM;
  812. goto err_alloc;
  813. }
  814. memset ( xhci->dcbaa, 0, len );
  815. /* Program DCBAA pointer */
  816. dcbaap = virt_to_phys ( xhci->dcbaa );
  817. if ( ( rc = xhci_writeq ( xhci, dcbaap,
  818. xhci->op + XHCI_OP_DCBAAP ) ) != 0 )
  819. goto err_writeq;
  820. DBGC2 ( xhci, "XHCI %p DCBAA at [%08lx,%08lx)\n",
  821. xhci, dcbaap, ( dcbaap + len ) );
  822. return 0;
  823. err_writeq:
  824. free_dma ( xhci->dcbaa, len );
  825. err_alloc:
  826. return rc;
  827. }
  828. /**
  829. * Free device context base address array
  830. *
  831. * @v xhci xHCI device
  832. */
  833. static void xhci_dcbaa_free ( struct xhci_device *xhci ) {
  834. size_t len;
  835. unsigned int i;
  836. /* Sanity check */
  837. for ( i = 0 ; i <= xhci->slots ; i++ )
  838. assert ( xhci->dcbaa[i] == 0 );
  839. /* Clear DCBAA pointer */
  840. xhci_writeq ( xhci, 0, xhci->op + XHCI_OP_DCBAAP );
  841. /* Free DCBAA */
  842. len = ( ( xhci->slots + 1 ) * sizeof ( xhci->dcbaa[0] ) );
  843. free_dma ( xhci->dcbaa, len );
  844. }
  845. /******************************************************************************
  846. *
  847. * Scratchpad buffers
  848. *
  849. ******************************************************************************
  850. */
  851. /**
  852. * Allocate scratchpad buffers
  853. *
  854. * @v xhci xHCI device
  855. * @ret rc Return status code
  856. */
  857. static int xhci_scratchpad_alloc ( struct xhci_device *xhci ) {
  858. size_t array_len;
  859. size_t len;
  860. physaddr_t phys;
  861. unsigned int i;
  862. int rc;
  863. /* Do nothing if no scratchpad buffers are used */
  864. if ( ! xhci->scratchpads )
  865. return 0;
  866. /* Allocate scratchpads */
  867. len = ( xhci->scratchpads * xhci->pagesize );
  868. xhci->scratchpad = umalloc ( len );
  869. if ( ! xhci->scratchpad ) {
  870. DBGC ( xhci, "XHCI %p could not allocate scratchpad buffers\n",
  871. xhci );
  872. rc = -ENOMEM;
  873. goto err_alloc;
  874. }
  875. memset_user ( xhci->scratchpad, 0, 0, len );
  876. /* Allocate scratchpad array */
  877. array_len = ( xhci->scratchpads * sizeof ( xhci->scratchpad_array[0] ));
  878. xhci->scratchpad_array =
  879. malloc_dma ( array_len, xhci_align ( array_len ) );
  880. if ( ! xhci->scratchpad_array ) {
  881. DBGC ( xhci, "XHCI %p could not allocate scratchpad buffer "
  882. "array\n", xhci );
  883. rc = -ENOMEM;
  884. goto err_alloc_array;
  885. }
  886. /* Populate scratchpad array */
  887. for ( i = 0 ; i < xhci->scratchpads ; i++ ) {
  888. phys = user_to_phys ( xhci->scratchpad, ( i * xhci->pagesize ));
  889. xhci->scratchpad_array[i] = phys;
  890. }
  891. /* Set scratchpad array pointer */
  892. assert ( xhci->dcbaa != NULL );
  893. xhci->dcbaa[0] = cpu_to_le64 ( virt_to_phys ( xhci->scratchpad_array ));
  894. DBGC2 ( xhci, "XHCI %p scratchpad [%08lx,%08lx) array [%08lx,%08lx)\n",
  895. xhci, user_to_phys ( xhci->scratchpad, 0 ),
  896. user_to_phys ( xhci->scratchpad, len ),
  897. virt_to_phys ( xhci->scratchpad_array ),
  898. ( virt_to_phys ( xhci->scratchpad_array ) + array_len ) );
  899. return 0;
  900. free_dma ( xhci->scratchpad_array, array_len );
  901. err_alloc_array:
  902. ufree ( xhci->scratchpad );
  903. err_alloc:
  904. return rc;
  905. }
  906. /**
  907. * Free scratchpad buffers
  908. *
  909. * @v xhci xHCI device
  910. */
  911. static void xhci_scratchpad_free ( struct xhci_device *xhci ) {
  912. size_t array_len;
  913. /* Do nothing if no scratchpad buffers are used */
  914. if ( ! xhci->scratchpads )
  915. return;
  916. /* Clear scratchpad array pointer */
  917. assert ( xhci->dcbaa != NULL );
  918. xhci->dcbaa[0] = 0;
  919. /* Free scratchpad array */
  920. array_len = ( xhci->scratchpads * sizeof ( xhci->scratchpad_array[0] ));
  921. free_dma ( xhci->scratchpad_array, array_len );
  922. /* Free scratchpads */
  923. ufree ( xhci->scratchpad );
  924. }
  925. /******************************************************************************
  926. *
  927. * Run / stop / reset
  928. *
  929. ******************************************************************************
  930. */
  931. /**
  932. * Start xHCI device
  933. *
  934. * @v xhci xHCI device
  935. */
  936. static void xhci_run ( struct xhci_device *xhci ) {
  937. uint32_t config;
  938. uint32_t usbcmd;
  939. /* Configure number of device slots */
  940. config = readl ( xhci->op + XHCI_OP_CONFIG );
  941. config &= ~XHCI_CONFIG_MAX_SLOTS_EN_MASK;
  942. config |= XHCI_CONFIG_MAX_SLOTS_EN ( xhci->slots );
  943. writel ( config, xhci->op + XHCI_OP_CONFIG );
  944. /* Set run/stop bit */
  945. usbcmd = readl ( xhci->op + XHCI_OP_USBCMD );
  946. usbcmd |= XHCI_USBCMD_RUN;
  947. writel ( usbcmd, xhci->op + XHCI_OP_USBCMD );
  948. }
  949. /**
  950. * Stop xHCI device
  951. *
  952. * @v xhci xHCI device
  953. * @ret rc Return status code
  954. */
  955. static int xhci_stop ( struct xhci_device *xhci ) {
  956. uint32_t usbcmd;
  957. uint32_t usbsts;
  958. unsigned int i;
  959. /* Clear run/stop bit */
  960. usbcmd = readl ( xhci->op + XHCI_OP_USBCMD );
  961. usbcmd &= ~XHCI_USBCMD_RUN;
  962. writel ( usbcmd, xhci->op + XHCI_OP_USBCMD );
  963. /* Wait for device to stop */
  964. for ( i = 0 ; i < XHCI_STOP_MAX_WAIT_MS ; i++ ) {
  965. /* Check if device is stopped */
  966. usbsts = readl ( xhci->op + XHCI_OP_USBSTS );
  967. if ( usbsts & XHCI_USBSTS_HCH )
  968. return 0;
  969. /* Delay */
  970. mdelay ( 1 );
  971. }
  972. DBGC ( xhci, "XHCI %p timed out waiting for stop\n", xhci );
  973. return -ETIMEDOUT;
  974. }
  975. /**
  976. * Reset xHCI device
  977. *
  978. * @v xhci xHCI device
  979. * @ret rc Return status code
  980. */
  981. static int xhci_reset ( struct xhci_device *xhci ) {
  982. uint32_t usbcmd;
  983. unsigned int i;
  984. int rc;
  985. /* The xHCI specification states that resetting a running
  986. * device may result in undefined behaviour, so try stopping
  987. * it first.
  988. */
  989. if ( ( rc = xhci_stop ( xhci ) ) != 0 ) {
  990. /* Ignore errors and attempt to reset the device anyway */
  991. }
  992. /* Reset device */
  993. writel ( XHCI_USBCMD_HCRST, xhci->op + XHCI_OP_USBCMD );
  994. /* Wait for reset to complete */
  995. for ( i = 0 ; i < XHCI_RESET_MAX_WAIT_MS ; i++ ) {
  996. /* Check if reset is complete */
  997. usbcmd = readl ( xhci->op + XHCI_OP_USBCMD );
  998. if ( ! ( usbcmd & XHCI_USBCMD_HCRST ) )
  999. return 0;
  1000. /* Delay */
  1001. mdelay ( 1 );
  1002. }
  1003. DBGC ( xhci, "XHCI %p timed out waiting for reset\n", xhci );
  1004. return -ETIMEDOUT;
  1005. }
  1006. /******************************************************************************
  1007. *
  1008. * Transfer request blocks
  1009. *
  1010. ******************************************************************************
  1011. */
  1012. /**
  1013. * Allocate transfer request block ring
  1014. *
  1015. * @v xhci xHCI device
  1016. * @v ring TRB ring
  1017. * @v shift Ring size (log2)
  1018. * @v slot Device slot
  1019. * @v target Doorbell target
  1020. * @v stream Doorbell stream ID
  1021. * @ret rc Return status code
  1022. */
  1023. static int xhci_ring_alloc ( struct xhci_device *xhci,
  1024. struct xhci_trb_ring *ring,
  1025. unsigned int shift, unsigned int slot,
  1026. unsigned int target, unsigned int stream ) {
  1027. struct xhci_trb_link *link;
  1028. unsigned int count;
  1029. int rc;
  1030. /* Sanity check */
  1031. assert ( shift > 0 );
  1032. /* Initialise structure */
  1033. memset ( ring, 0, sizeof ( *ring ) );
  1034. ring->shift = shift;
  1035. count = ( 1U << shift );
  1036. ring->mask = ( count - 1 );
  1037. ring->len = ( ( count + 1 /* Link TRB */ ) * sizeof ( ring->trb[0] ) );
  1038. ring->db = ( xhci->db + ( slot * sizeof ( ring->dbval ) ) );
  1039. ring->dbval = XHCI_DBVAL ( target, stream );
  1040. /* Allocate I/O buffers */
  1041. ring->iobuf = zalloc ( count * sizeof ( ring->iobuf[0] ) );
  1042. if ( ! ring->iobuf ) {
  1043. rc = -ENOMEM;
  1044. goto err_alloc_iobuf;
  1045. }
  1046. /* Allocate TRBs */
  1047. ring->trb = malloc_dma ( ring->len, xhci_align ( ring->len ) );
  1048. if ( ! ring->trb ) {
  1049. rc = -ENOMEM;
  1050. goto err_alloc_trb;
  1051. }
  1052. memset ( ring->trb, 0, ring->len );
  1053. /* Initialise Link TRB */
  1054. link = &ring->trb[count].link;
  1055. link->next = cpu_to_le64 ( virt_to_phys ( ring->trb ) );
  1056. link->flags = XHCI_TRB_TC;
  1057. link->type = XHCI_TRB_LINK;
  1058. ring->link = link;
  1059. return 0;
  1060. free_dma ( ring->trb, ring->len );
  1061. err_alloc_trb:
  1062. free ( ring->iobuf );
  1063. err_alloc_iobuf:
  1064. return rc;
  1065. }
  1066. /**
  1067. * Reset transfer request block ring
  1068. *
  1069. * @v ring TRB ring
  1070. */
  1071. static void xhci_ring_reset ( struct xhci_trb_ring *ring ) {
  1072. unsigned int count = ( 1U << ring->shift );
  1073. /* Reset producer and consumer counters */
  1074. ring->prod = 0;
  1075. ring->cons = 0;
  1076. /* Reset TRBs (except Link TRB) */
  1077. memset ( ring->trb, 0, ( count * sizeof ( ring->trb[0] ) ) );
  1078. }
  1079. /**
  1080. * Free transfer request block ring
  1081. *
  1082. * @v ring TRB ring
  1083. */
  1084. static void xhci_ring_free ( struct xhci_trb_ring *ring ) {
  1085. unsigned int count = ( 1U << ring->shift );
  1086. unsigned int i;
  1087. /* Sanity checks */
  1088. assert ( ring->cons == ring->prod );
  1089. for ( i = 0 ; i < count ; i++ )
  1090. assert ( ring->iobuf[i] == NULL );
  1091. /* Free TRBs */
  1092. free_dma ( ring->trb, ring->len );
  1093. /* Free I/O buffers */
  1094. free ( ring->iobuf );
  1095. }
  1096. /**
  1097. * Enqueue a transfer request block
  1098. *
  1099. * @v ring TRB ring
  1100. * @v iobuf I/O buffer (if any)
  1101. * @v trb Transfer request block (with empty Cycle flag)
  1102. * @ret rc Return status code
  1103. *
  1104. * This operation does not implicitly ring the doorbell register.
  1105. */
  1106. static int xhci_enqueue ( struct xhci_trb_ring *ring, struct io_buffer *iobuf,
  1107. const union xhci_trb *trb ) {
  1108. union xhci_trb *dest;
  1109. unsigned int prod;
  1110. unsigned int mask;
  1111. unsigned int index;
  1112. unsigned int cycle;
  1113. /* Sanity check */
  1114. assert ( ! ( trb->common.flags & XHCI_TRB_C ) );
  1115. /* Fail if ring is full */
  1116. if ( ! xhci_ring_remaining ( ring ) )
  1117. return -ENOBUFS;
  1118. /* Update producer counter (and link TRB, if applicable) */
  1119. prod = ring->prod++;
  1120. mask = ring->mask;
  1121. cycle = ( ( ~( prod >> ring->shift ) ) & XHCI_TRB_C );
  1122. index = ( prod & mask );
  1123. if ( index == 0 )
  1124. ring->link->flags = ( XHCI_TRB_TC | ( cycle ^ XHCI_TRB_C ) );
  1125. /* Record I/O buffer */
  1126. ring->iobuf[index] = iobuf;
  1127. /* Enqueue TRB */
  1128. dest = &ring->trb[index];
  1129. dest->template.parameter = trb->template.parameter;
  1130. dest->template.status = trb->template.status;
  1131. wmb();
  1132. dest->template.control = ( trb->template.control |
  1133. cpu_to_le32 ( cycle ) );
  1134. return 0;
  1135. }
  1136. /**
  1137. * Dequeue a transfer request block
  1138. *
  1139. * @v ring TRB ring
  1140. * @ret iobuf I/O buffer
  1141. */
  1142. static struct io_buffer * xhci_dequeue ( struct xhci_trb_ring *ring ) {
  1143. struct io_buffer *iobuf;
  1144. unsigned int cons;
  1145. unsigned int mask;
  1146. unsigned int index;
  1147. /* Sanity check */
  1148. assert ( xhci_ring_fill ( ring ) != 0 );
  1149. /* Update consumer counter */
  1150. cons = ring->cons++;
  1151. mask = ring->mask;
  1152. index = ( cons & mask );
  1153. /* Retrieve I/O buffer */
  1154. iobuf = ring->iobuf[index];
  1155. ring->iobuf[index] = NULL;
  1156. return iobuf;
  1157. }
  1158. /**
  1159. * Enqueue multiple transfer request blocks
  1160. *
  1161. * @v ring TRB ring
  1162. * @v iobuf I/O buffer
  1163. * @v trbs Transfer request blocks (with empty Cycle flag)
  1164. * @v count Number of transfer request blocks
  1165. * @ret rc Return status code
  1166. *
  1167. * This operation does not implicitly ring the doorbell register.
  1168. */
  1169. static int xhci_enqueue_multi ( struct xhci_trb_ring *ring,
  1170. struct io_buffer *iobuf,
  1171. const union xhci_trb *trbs,
  1172. unsigned int count ) {
  1173. const union xhci_trb *trb = trbs;
  1174. int rc;
  1175. /* Sanity check */
  1176. assert ( iobuf != NULL );
  1177. /* Fail if ring does not have sufficient space */
  1178. if ( xhci_ring_remaining ( ring ) < count )
  1179. return -ENOBUFS;
  1180. /* Enqueue each TRB, recording the I/O buffer with the final TRB */
  1181. while ( count-- ) {
  1182. rc = xhci_enqueue ( ring, ( count ? NULL : iobuf ), trb++ );
  1183. assert ( rc == 0 ); /* Should never be able to fail */
  1184. }
  1185. return 0;
  1186. }
  1187. /**
  1188. * Dequeue multiple transfer request blocks
  1189. *
  1190. * @v ring TRB ring
  1191. * @ret iobuf I/O buffer
  1192. */
  1193. static struct io_buffer * xhci_dequeue_multi ( struct xhci_trb_ring *ring ) {
  1194. struct io_buffer *iobuf;
  1195. /* Dequeue TRBs until we reach the final TRB for an I/O buffer */
  1196. do {
  1197. iobuf = xhci_dequeue ( ring );
  1198. } while ( iobuf == NULL );
  1199. return iobuf;
  1200. }
  1201. /**
  1202. * Ring doorbell register
  1203. *
  1204. * @v ring TRB ring
  1205. */
  1206. static inline __attribute__ (( always_inline )) void
  1207. xhci_doorbell ( struct xhci_trb_ring *ring ) {
  1208. wmb();
  1209. writel ( ring->dbval, ring->db );
  1210. }
  1211. /******************************************************************************
  1212. *
  1213. * Command and event rings
  1214. *
  1215. ******************************************************************************
  1216. */
  1217. /**
  1218. * Allocate command ring
  1219. *
  1220. * @v xhci xHCI device
  1221. * @ret rc Return status code
  1222. */
  1223. static int xhci_command_alloc ( struct xhci_device *xhci ) {
  1224. physaddr_t crp;
  1225. int rc;
  1226. /* Allocate TRB ring */
  1227. if ( ( rc = xhci_ring_alloc ( xhci, &xhci->command, XHCI_CMD_TRBS_LOG2,
  1228. 0, 0, 0 ) ) != 0 )
  1229. goto err_ring_alloc;
  1230. /* Program command ring control register */
  1231. crp = virt_to_phys ( xhci->command.trb );
  1232. if ( ( rc = xhci_writeq ( xhci, ( crp | XHCI_CRCR_RCS ),
  1233. xhci->op + XHCI_OP_CRCR ) ) != 0 )
  1234. goto err_writeq;
  1235. DBGC2 ( xhci, "XHCI %p CRCR at [%08lx,%08lx)\n",
  1236. xhci, crp, ( crp + xhci->command.len ) );
  1237. return 0;
  1238. err_writeq:
  1239. xhci_ring_free ( &xhci->command );
  1240. err_ring_alloc:
  1241. return rc;
  1242. }
  1243. /**
  1244. * Free command ring
  1245. *
  1246. * @v xhci xHCI device
  1247. */
  1248. static void xhci_command_free ( struct xhci_device *xhci ) {
  1249. /* Sanity check */
  1250. assert ( ( readl ( xhci->op + XHCI_OP_CRCR ) & XHCI_CRCR_CRR ) == 0 );
  1251. /* Clear command ring control register */
  1252. xhci_writeq ( xhci, 0, xhci->op + XHCI_OP_CRCR );
  1253. /* Free TRB ring */
  1254. xhci_ring_free ( &xhci->command );
  1255. }
  1256. /**
  1257. * Allocate event ring
  1258. *
  1259. * @v xhci xHCI device
  1260. * @ret rc Return status code
  1261. */
  1262. static int xhci_event_alloc ( struct xhci_device *xhci ) {
  1263. struct xhci_event_ring *event = &xhci->event;
  1264. unsigned int count;
  1265. size_t len;
  1266. int rc;
  1267. /* Allocate event ring */
  1268. count = ( 1 << XHCI_EVENT_TRBS_LOG2 );
  1269. len = ( count * sizeof ( event->trb[0] ) );
  1270. event->trb = malloc_dma ( len, xhci_align ( len ) );
  1271. if ( ! event->trb ) {
  1272. rc = -ENOMEM;
  1273. goto err_alloc_trb;
  1274. }
  1275. memset ( event->trb, 0, len );
  1276. /* Allocate event ring segment table */
  1277. event->segment = malloc_dma ( sizeof ( event->segment[0] ),
  1278. xhci_align ( sizeof (event->segment[0])));
  1279. if ( ! event->segment ) {
  1280. rc = -ENOMEM;
  1281. goto err_alloc_segment;
  1282. }
  1283. memset ( event->segment, 0, sizeof ( event->segment[0] ) );
  1284. event->segment[0].base = cpu_to_le64 ( virt_to_phys ( event->trb ) );
  1285. event->segment[0].count = cpu_to_le32 ( count );
  1286. /* Program event ring registers */
  1287. writel ( 1, xhci->run + XHCI_RUN_ERSTSZ ( 0 ) );
  1288. if ( ( rc = xhci_writeq ( xhci, virt_to_phys ( event->trb ),
  1289. xhci->run + XHCI_RUN_ERDP ( 0 ) ) ) != 0 )
  1290. goto err_writeq_erdp;
  1291. if ( ( rc = xhci_writeq ( xhci, virt_to_phys ( event->segment ),
  1292. xhci->run + XHCI_RUN_ERSTBA ( 0 ) ) ) != 0 )
  1293. goto err_writeq_erstba;
  1294. DBGC2 ( xhci, "XHCI %p event ring [%08lx,%08lx) table [%08lx,%08lx)\n",
  1295. xhci, virt_to_phys ( event->trb ),
  1296. ( virt_to_phys ( event->trb ) + len ),
  1297. virt_to_phys ( event->segment ),
  1298. ( virt_to_phys ( event->segment ) +
  1299. sizeof (event->segment[0] ) ) );
  1300. return 0;
  1301. xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERSTBA ( 0 ) );
  1302. err_writeq_erstba:
  1303. xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERDP ( 0 ) );
  1304. err_writeq_erdp:
  1305. free_dma ( event->trb, len );
  1306. err_alloc_segment:
  1307. free_dma ( event->segment, sizeof ( event->segment[0] ) );
  1308. err_alloc_trb:
  1309. return rc;
  1310. }
  1311. /**
  1312. * Free event ring
  1313. *
  1314. * @v xhci xHCI device
  1315. */
  1316. static void xhci_event_free ( struct xhci_device *xhci ) {
  1317. struct xhci_event_ring *event = &xhci->event;
  1318. unsigned int count;
  1319. size_t len;
  1320. /* Clear event ring registers */
  1321. writel ( 0, xhci->run + XHCI_RUN_ERSTSZ ( 0 ) );
  1322. xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERSTBA ( 0 ) );
  1323. xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERDP ( 0 ) );
  1324. /* Free event ring segment table */
  1325. free_dma ( event->segment, sizeof ( event->segment[0] ) );
  1326. /* Free event ring */
  1327. count = ( 1 << XHCI_EVENT_TRBS_LOG2 );
  1328. len = ( count * sizeof ( event->trb[0] ) );
  1329. free_dma ( event->trb, len );
  1330. }
  1331. /**
  1332. * Handle transfer event
  1333. *
  1334. * @v xhci xHCI device
  1335. * @v transfer Transfer event TRB
  1336. */
  1337. static void xhci_transfer ( struct xhci_device *xhci,
  1338. struct xhci_trb_transfer *transfer ) {
  1339. struct xhci_slot *slot;
  1340. struct xhci_endpoint *endpoint;
  1341. struct io_buffer *iobuf;
  1342. int rc;
  1343. /* Profile transfer events */
  1344. profile_start ( &xhci_transfer_profiler );
  1345. /* Identify slot */
  1346. if ( ( transfer->slot > xhci->slots ) ||
  1347. ( ( slot = xhci->slot[transfer->slot] ) == NULL ) ) {
  1348. DBGC ( xhci, "XHCI %p transfer event invalid slot %d:\n",
  1349. xhci, transfer->slot );
  1350. DBGC_HDA ( xhci, 0, transfer, sizeof ( *transfer ) );
  1351. return;
  1352. }
  1353. /* Identify endpoint */
  1354. if ( ( transfer->endpoint > XHCI_CTX_END ) ||
  1355. ( ( endpoint = slot->endpoint[transfer->endpoint] ) == NULL ) ) {
  1356. DBGC ( xhci, "XHCI %p slot %d transfer event invalid epid "
  1357. "%d:\n", xhci, slot->id, transfer->endpoint );
  1358. DBGC_HDA ( xhci, 0, transfer, sizeof ( *transfer ) );
  1359. return;
  1360. }
  1361. /* Dequeue TRB(s) */
  1362. iobuf = xhci_dequeue_multi ( &endpoint->ring );
  1363. assert ( iobuf != NULL );
  1364. /* Check for errors */
  1365. if ( ! ( ( transfer->code == XHCI_CMPLT_SUCCESS ) ||
  1366. ( transfer->code == XHCI_CMPLT_SHORT ) ) ) {
  1367. /* Construct error */
  1368. rc = -ECODE ( transfer->code );
  1369. DBGC ( xhci, "XHCI %p slot %d ctx %d failed (code %d): %s\n",
  1370. xhci, slot->id, endpoint->ctx, transfer->code,
  1371. strerror ( rc ) );
  1372. DBGC_HDA ( xhci, 0, transfer, sizeof ( *transfer ) );
  1373. /* Sanity check */
  1374. assert ( ( endpoint->context->state & XHCI_ENDPOINT_STATE_MASK )
  1375. != XHCI_ENDPOINT_RUNNING );
  1376. /* Report failure to USB core */
  1377. usb_complete_err ( endpoint->ep, iobuf, rc );
  1378. return;
  1379. }
  1380. /* Record actual transfer size */
  1381. iob_unput ( iobuf, le16_to_cpu ( transfer->residual ) );
  1382. /* Sanity check (for successful completions only) */
  1383. assert ( xhci_ring_consumed ( &endpoint->ring ) ==
  1384. le64_to_cpu ( transfer->transfer ) );
  1385. /* Report completion to USB core */
  1386. usb_complete ( endpoint->ep, iobuf );
  1387. profile_stop ( &xhci_transfer_profiler );
  1388. }
  1389. /**
  1390. * Handle command completion event
  1391. *
  1392. * @v xhci xHCI device
  1393. * @v complete Command completion event
  1394. */
  1395. static void xhci_complete ( struct xhci_device *xhci,
  1396. struct xhci_trb_complete *complete ) {
  1397. int rc;
  1398. /* Ignore "command ring stopped" notifications */
  1399. if ( complete->code == XHCI_CMPLT_CMD_STOPPED ) {
  1400. DBGC2 ( xhci, "XHCI %p command ring stopped\n", xhci );
  1401. return;
  1402. }
  1403. /* Ignore unexpected completions */
  1404. if ( ! xhci->pending ) {
  1405. rc = -ECODE ( complete->code );
  1406. DBGC ( xhci, "XHCI %p unexpected completion (code %d): %s\n",
  1407. xhci, complete->code, strerror ( rc ) );
  1408. DBGC_HDA ( xhci, 0, complete, sizeof ( *complete ) );
  1409. return;
  1410. }
  1411. /* Dequeue command TRB */
  1412. xhci_dequeue ( &xhci->command );
  1413. /* Sanity check */
  1414. assert ( xhci_ring_consumed ( &xhci->command ) ==
  1415. le64_to_cpu ( complete->command ) );
  1416. /* Record completion */
  1417. memcpy ( xhci->pending, complete, sizeof ( *xhci->pending ) );
  1418. xhci->pending = NULL;
  1419. }
  1420. /**
  1421. * Handle port status event
  1422. *
  1423. * @v xhci xHCI device
  1424. * @v port Port status event
  1425. */
  1426. static void xhci_port_status ( struct xhci_device *xhci,
  1427. struct xhci_trb_port_status *port ) {
  1428. uint32_t portsc;
  1429. /* Sanity check */
  1430. assert ( ( port->port > 0 ) && ( port->port <= xhci->ports ) );
  1431. /* Clear port status change bits */
  1432. portsc = readl ( xhci->op + XHCI_OP_PORTSC ( port->port ) );
  1433. portsc &= ( XHCI_PORTSC_PRESERVE | XHCI_PORTSC_CHANGE );
  1434. writel ( portsc, xhci->op + XHCI_OP_PORTSC ( port->port ) );
  1435. /* Report port status change */
  1436. usb_port_changed ( usb_port ( xhci->bus->hub, port->port ) );
  1437. }
  1438. /**
  1439. * Handle host controller event
  1440. *
  1441. * @v xhci xHCI device
  1442. * @v host Host controller event
  1443. */
  1444. static void xhci_host_controller ( struct xhci_device *xhci,
  1445. struct xhci_trb_host_controller *host ) {
  1446. int rc;
  1447. /* Construct error */
  1448. rc = -ECODE ( host->code );
  1449. DBGC ( xhci, "XHCI %p host controller event (code %d): %s\n",
  1450. xhci, host->code, strerror ( rc ) );
  1451. }
  1452. /**
  1453. * Poll event ring
  1454. *
  1455. * @v xhci xHCI device
  1456. */
  1457. static void xhci_event_poll ( struct xhci_device *xhci ) {
  1458. struct xhci_event_ring *event = &xhci->event;
  1459. union xhci_trb *trb;
  1460. unsigned int shift = XHCI_EVENT_TRBS_LOG2;
  1461. unsigned int count = ( 1 << shift );
  1462. unsigned int mask = ( count - 1 );
  1463. unsigned int consumed;
  1464. unsigned int type;
  1465. /* Poll for events */
  1466. profile_start ( &xhci_event_profiler );
  1467. for ( consumed = 0 ; ; consumed++ ) {
  1468. /* Stop if we reach an empty TRB */
  1469. rmb();
  1470. trb = &event->trb[ event->cons & mask ];
  1471. if ( ! ( ( trb->common.flags ^
  1472. ( event->cons >> shift ) ) & XHCI_TRB_C ) )
  1473. break;
  1474. /* Handle TRB */
  1475. type = ( trb->common.type & XHCI_TRB_TYPE_MASK );
  1476. switch ( type ) {
  1477. case XHCI_TRB_TRANSFER :
  1478. xhci_transfer ( xhci, &trb->transfer );
  1479. break;
  1480. case XHCI_TRB_COMPLETE :
  1481. xhci_complete ( xhci, &trb->complete );
  1482. break;
  1483. case XHCI_TRB_PORT_STATUS:
  1484. xhci_port_status ( xhci, &trb->port );
  1485. break;
  1486. case XHCI_TRB_HOST_CONTROLLER:
  1487. xhci_host_controller ( xhci, &trb->host );
  1488. break;
  1489. default:
  1490. DBGC ( xhci, "XHCI %p unrecognised event %#x\n:",
  1491. xhci, event->cons );
  1492. DBGC_HDA ( xhci, virt_to_phys ( trb ),
  1493. trb, sizeof ( *trb ) );
  1494. break;
  1495. }
  1496. /* Consume this TRB */
  1497. event->cons++;
  1498. }
  1499. /* Update dequeue pointer if applicable */
  1500. if ( consumed ) {
  1501. xhci_writeq ( xhci, virt_to_phys ( trb ),
  1502. xhci->run + XHCI_RUN_ERDP ( 0 ) );
  1503. profile_stop ( &xhci_event_profiler );
  1504. }
  1505. }
  1506. /**
  1507. * Abort command
  1508. *
  1509. * @v xhci xHCI device
  1510. */
  1511. static void xhci_abort ( struct xhci_device *xhci ) {
  1512. physaddr_t crp;
  1513. /* Abort the command */
  1514. DBGC2 ( xhci, "XHCI %p aborting command\n", xhci );
  1515. xhci_writeq ( xhci, XHCI_CRCR_CA, xhci->op + XHCI_OP_CRCR );
  1516. /* Allow time for command to abort */
  1517. mdelay ( XHCI_COMMAND_ABORT_DELAY_MS );
  1518. /* Sanity check */
  1519. assert ( ( readl ( xhci->op + XHCI_OP_CRCR ) & XHCI_CRCR_CRR ) == 0 );
  1520. /* Consume (and ignore) any final command status */
  1521. xhci_event_poll ( xhci );
  1522. /* Reset the command ring control register */
  1523. xhci_ring_reset ( &xhci->command );
  1524. crp = virt_to_phys ( xhci->command.trb );
  1525. xhci_writeq ( xhci, ( crp | XHCI_CRCR_RCS ), xhci->op + XHCI_OP_CRCR );
  1526. }
  1527. /**
  1528. * Issue command and wait for completion
  1529. *
  1530. * @v xhci xHCI device
  1531. * @v trb Transfer request block (with empty Cycle flag)
  1532. * @ret rc Return status code
  1533. *
  1534. * On a successful completion, the TRB will be overwritten with the
  1535. * completion.
  1536. */
  1537. static int xhci_command ( struct xhci_device *xhci, union xhci_trb *trb ) {
  1538. struct xhci_trb_complete *complete = &trb->complete;
  1539. unsigned int i;
  1540. int rc;
  1541. /* Record the pending command */
  1542. xhci->pending = trb;
  1543. /* Enqueue the command */
  1544. if ( ( rc = xhci_enqueue ( &xhci->command, NULL, trb ) ) != 0 )
  1545. goto err_enqueue;
  1546. /* Ring the command doorbell */
  1547. xhci_doorbell ( &xhci->command );
  1548. /* Wait for the command to complete */
  1549. for ( i = 0 ; i < XHCI_COMMAND_MAX_WAIT_MS ; i++ ) {
  1550. /* Poll event ring */
  1551. xhci_event_poll ( xhci );
  1552. /* Check for completion */
  1553. if ( ! xhci->pending ) {
  1554. if ( complete->code != XHCI_CMPLT_SUCCESS ) {
  1555. rc = -ECODE ( complete->code );
  1556. DBGC ( xhci, "XHCI %p command failed (code "
  1557. "%d): %s\n", xhci, complete->code,
  1558. strerror ( rc ) );
  1559. DBGC_HDA ( xhci, 0, trb, sizeof ( *trb ) );
  1560. return rc;
  1561. }
  1562. return 0;
  1563. }
  1564. /* Delay */
  1565. mdelay ( 1 );
  1566. }
  1567. /* Timeout */
  1568. DBGC ( xhci, "XHCI %p timed out waiting for completion\n", xhci );
  1569. rc = -ETIMEDOUT;
  1570. /* Abort command */
  1571. xhci_abort ( xhci );
  1572. err_enqueue:
  1573. xhci->pending = NULL;
  1574. return rc;
  1575. }
  1576. /**
  1577. * Issue NOP and wait for completion
  1578. *
  1579. * @v xhci xHCI device
  1580. * @ret rc Return status code
  1581. */
  1582. static inline int xhci_nop ( struct xhci_device *xhci ) {
  1583. union xhci_trb trb;
  1584. struct xhci_trb_common *nop = &trb.common;
  1585. int rc;
  1586. /* Construct command */
  1587. memset ( nop, 0, sizeof ( *nop ) );
  1588. nop->flags = XHCI_TRB_IOC;
  1589. nop->type = XHCI_TRB_NOP_CMD;
  1590. /* Issue command and wait for completion */
  1591. if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 )
  1592. return rc;
  1593. return 0;
  1594. }
  1595. /**
  1596. * Enable slot
  1597. *
  1598. * @v xhci xHCI device
  1599. * @v type Slot type
  1600. * @ret slot Device slot ID, or negative error
  1601. */
  1602. static inline int xhci_enable_slot ( struct xhci_device *xhci,
  1603. unsigned int type ) {
  1604. union xhci_trb trb;
  1605. struct xhci_trb_enable_slot *enable = &trb.enable;
  1606. struct xhci_trb_complete *enabled = &trb.complete;
  1607. unsigned int slot;
  1608. int rc;
  1609. /* Construct command */
  1610. memset ( enable, 0, sizeof ( *enable ) );
  1611. enable->slot = type;
  1612. enable->type = XHCI_TRB_ENABLE_SLOT;
  1613. /* Issue command and wait for completion */
  1614. if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) {
  1615. DBGC ( xhci, "XHCI %p could not enable new slot: %s\n",
  1616. xhci, strerror ( rc ) );
  1617. return rc;
  1618. }
  1619. /* Extract slot number */
  1620. slot = enabled->slot;
  1621. DBGC2 ( xhci, "XHCI %p slot %d enabled\n", xhci, slot );
  1622. return slot;
  1623. }
  1624. /**
  1625. * Disable slot
  1626. *
  1627. * @v xhci xHCI device
  1628. * @v slot Device slot
  1629. * @ret rc Return status code
  1630. */
  1631. static inline int xhci_disable_slot ( struct xhci_device *xhci,
  1632. unsigned int slot ) {
  1633. union xhci_trb trb;
  1634. struct xhci_trb_disable_slot *disable = &trb.disable;
  1635. int rc;
  1636. /* Construct command */
  1637. memset ( disable, 0, sizeof ( *disable ) );
  1638. disable->type = XHCI_TRB_DISABLE_SLOT;
  1639. disable->slot = slot;
  1640. /* Issue command and wait for completion */
  1641. if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) {
  1642. DBGC ( xhci, "XHCI %p could not disable slot %d: %s\n",
  1643. xhci, slot, strerror ( rc ) );
  1644. return rc;
  1645. }
  1646. DBGC2 ( xhci, "XHCI %p slot %d disabled\n", xhci, slot );
  1647. return 0;
  1648. }
  1649. /**
  1650. * Issue context-based command and wait for completion
  1651. *
  1652. * @v xhci xHCI device
  1653. * @v slot Device slot
  1654. * @v endpoint Endpoint
  1655. * @v type TRB type
  1656. * @v populate Input context populater
  1657. * @ret rc Return status code
  1658. */
  1659. static int xhci_context ( struct xhci_device *xhci, struct xhci_slot *slot,
  1660. struct xhci_endpoint *endpoint, unsigned int type,
  1661. void ( * populate ) ( struct xhci_device *xhci,
  1662. struct xhci_slot *slot,
  1663. struct xhci_endpoint *endpoint,
  1664. void *input ) ) {
  1665. union xhci_trb trb;
  1666. struct xhci_trb_context *context = &trb.context;
  1667. size_t len;
  1668. void *input;
  1669. int rc;
  1670. /* Allocate an input context */
  1671. len = xhci_input_context_offset ( xhci, XHCI_CTX_END );
  1672. input = malloc_dma ( len, xhci_align ( len ) );
  1673. if ( ! input ) {
  1674. rc = -ENOMEM;
  1675. goto err_alloc;
  1676. }
  1677. memset ( input, 0, len );
  1678. /* Populate input context */
  1679. populate ( xhci, slot, endpoint, input );
  1680. /* Construct command */
  1681. memset ( context, 0, sizeof ( *context ) );
  1682. context->type = type;
  1683. context->input = cpu_to_le64 ( virt_to_phys ( input ) );
  1684. context->slot = slot->id;
  1685. /* Issue command and wait for completion */
  1686. if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 )
  1687. goto err_command;
  1688. err_command:
  1689. free_dma ( input, len );
  1690. err_alloc:
  1691. return rc;
  1692. }
  1693. /**
  1694. * Populate address device input context
  1695. *
  1696. * @v xhci xHCI device
  1697. * @v slot Device slot
  1698. * @v endpoint Endpoint
  1699. * @v input Input context
  1700. */
  1701. static void xhci_address_device_input ( struct xhci_device *xhci,
  1702. struct xhci_slot *slot,
  1703. struct xhci_endpoint *endpoint,
  1704. void *input ) {
  1705. struct xhci_control_context *control_ctx;
  1706. struct xhci_slot_context *slot_ctx;
  1707. struct xhci_endpoint_context *ep_ctx;
  1708. /* Sanity checks */
  1709. assert ( endpoint->ctx == XHCI_CTX_EP0 );
  1710. /* Populate control context */
  1711. control_ctx = input;
  1712. control_ctx->add = cpu_to_le32 ( ( 1 << XHCI_CTX_SLOT ) |
  1713. ( 1 << XHCI_CTX_EP0 ) );
  1714. /* Populate slot context */
  1715. slot_ctx = ( input + xhci_input_context_offset ( xhci, XHCI_CTX_SLOT ));
  1716. slot_ctx->info = cpu_to_le32 ( XHCI_SLOT_INFO ( 1, 0, slot->psiv,
  1717. slot->route ) );
  1718. slot_ctx->port = slot->port;
  1719. /* Populate control endpoint context */
  1720. ep_ctx = ( input + xhci_input_context_offset ( xhci, XHCI_CTX_EP0 ) );
  1721. ep_ctx->type = XHCI_EP_TYPE_CONTROL;
  1722. ep_ctx->burst = endpoint->ep->burst;
  1723. ep_ctx->mtu = cpu_to_le16 ( endpoint->ep->mtu );
  1724. ep_ctx->dequeue = cpu_to_le64 ( virt_to_phys ( endpoint->ring.trb ) |
  1725. XHCI_EP_DCS );
  1726. ep_ctx->trb_len = cpu_to_le16 ( XHCI_EP0_TRB_LEN );
  1727. }
  1728. /**
  1729. * Address device
  1730. *
  1731. * @v xhci xHCI device
  1732. * @v slot Device slot
  1733. * @ret rc Return status code
  1734. */
  1735. static inline int xhci_address_device ( struct xhci_device *xhci,
  1736. struct xhci_slot *slot ) {
  1737. struct usb_device *usb = slot->usb;
  1738. struct xhci_slot_context *slot_ctx;
  1739. int rc;
  1740. /* Assign device address */
  1741. if ( ( rc = xhci_context ( xhci, slot, slot->endpoint[XHCI_CTX_EP0],
  1742. XHCI_TRB_ADDRESS_DEVICE,
  1743. xhci_address_device_input ) ) != 0 )
  1744. return rc;
  1745. /* Get assigned address */
  1746. slot_ctx = ( slot->context +
  1747. xhci_device_context_offset ( xhci, XHCI_CTX_SLOT ) );
  1748. usb->address = slot_ctx->address;
  1749. DBGC2 ( xhci, "XHCI %p assigned address %d to %s\n",
  1750. xhci, usb->address, usb->name );
  1751. return 0;
  1752. }
  1753. /**
  1754. * Populate configure endpoint input context
  1755. *
  1756. * @v xhci xHCI device
  1757. * @v slot Device slot
  1758. * @v endpoint Endpoint
  1759. * @v input Input context
  1760. */
  1761. static void xhci_configure_endpoint_input ( struct xhci_device *xhci,
  1762. struct xhci_slot *slot __unused,
  1763. struct xhci_endpoint *endpoint,
  1764. void *input ) {
  1765. struct xhci_control_context *control_ctx;
  1766. struct xhci_slot_context *slot_ctx;
  1767. struct xhci_endpoint_context *ep_ctx;
  1768. /* Populate control context */
  1769. control_ctx = input;
  1770. control_ctx->add = cpu_to_le32 ( ( 1 << XHCI_CTX_SLOT ) |
  1771. ( 1 << endpoint->ctx ) );
  1772. /* Populate slot context */
  1773. slot_ctx = ( input + xhci_input_context_offset ( xhci, XHCI_CTX_SLOT ));
  1774. slot_ctx->info = cpu_to_le32 ( XHCI_SLOT_INFO ( ( XHCI_CTX_END - 1 ),
  1775. 0, 0, 0 ) );
  1776. /* Populate endpoint context */
  1777. ep_ctx = ( input + xhci_input_context_offset ( xhci, endpoint->ctx ) );
  1778. ep_ctx->interval = endpoint->interval;
  1779. ep_ctx->type = endpoint->type;
  1780. ep_ctx->burst = endpoint->ep->burst;
  1781. ep_ctx->mtu = cpu_to_le16 ( endpoint->ep->mtu );
  1782. ep_ctx->dequeue = cpu_to_le64 ( virt_to_phys ( endpoint->ring.trb ) |
  1783. XHCI_EP_DCS );
  1784. ep_ctx->trb_len = cpu_to_le16 ( endpoint->ep->mtu ); /* best guess */
  1785. }
  1786. /**
  1787. * Configure endpoint
  1788. *
  1789. * @v xhci xHCI device
  1790. * @v slot Device slot
  1791. * @v endpoint Endpoint
  1792. * @ret rc Return status code
  1793. */
  1794. static inline int xhci_configure_endpoint ( struct xhci_device *xhci,
  1795. struct xhci_slot *slot,
  1796. struct xhci_endpoint *endpoint ) {
  1797. int rc;
  1798. /* Configure endpoint */
  1799. if ( ( rc = xhci_context ( xhci, slot, endpoint,
  1800. XHCI_TRB_CONFIGURE_ENDPOINT,
  1801. xhci_configure_endpoint_input ) ) != 0 )
  1802. return rc;
  1803. DBGC2 ( xhci, "XHCI %p slot %d ctx %d configured\n",
  1804. xhci, slot->id, endpoint->ctx );
  1805. return 0;
  1806. }
  1807. /**
  1808. * Populate deconfigure endpoint input context
  1809. *
  1810. * @v xhci xHCI device
  1811. * @v slot Device slot
  1812. * @v endpoint Endpoint
  1813. * @v input Input context
  1814. */
  1815. static void
  1816. xhci_deconfigure_endpoint_input ( struct xhci_device *xhci __unused,
  1817. struct xhci_slot *slot __unused,
  1818. struct xhci_endpoint *endpoint,
  1819. void *input ) {
  1820. struct xhci_control_context *control_ctx;
  1821. struct xhci_slot_context *slot_ctx;
  1822. /* Populate control context */
  1823. control_ctx = input;
  1824. control_ctx->add = cpu_to_le32 ( 1 << XHCI_CTX_SLOT );
  1825. control_ctx->drop = cpu_to_le32 ( 1 << endpoint->ctx );
  1826. /* Populate slot context */
  1827. slot_ctx = ( input + xhci_input_context_offset ( xhci, XHCI_CTX_SLOT ));
  1828. slot_ctx->info = cpu_to_le32 ( XHCI_SLOT_INFO ( ( XHCI_CTX_END - 1 ),
  1829. 0, 0, 0 ) );
  1830. }
  1831. /**
  1832. * Deconfigure endpoint
  1833. *
  1834. * @v xhci xHCI device
  1835. * @v slot Device slot
  1836. * @v endpoint Endpoint
  1837. * @ret rc Return status code
  1838. */
  1839. static inline int xhci_deconfigure_endpoint ( struct xhci_device *xhci,
  1840. struct xhci_slot *slot,
  1841. struct xhci_endpoint *endpoint ) {
  1842. int rc;
  1843. /* Deconfigure endpoint */
  1844. if ( ( rc = xhci_context ( xhci, slot, endpoint,
  1845. XHCI_TRB_CONFIGURE_ENDPOINT,
  1846. xhci_deconfigure_endpoint_input ) ) != 0 )
  1847. return rc;
  1848. DBGC2 ( xhci, "XHCI %p slot %d ctx %d deconfigured\n",
  1849. xhci, slot->id, endpoint->ctx );
  1850. return 0;
  1851. }
  1852. /**
  1853. * Populate evaluate context input context
  1854. *
  1855. * @v xhci xHCI device
  1856. * @v slot Device slot
  1857. * @v endpoint Endpoint
  1858. * @v input Input context
  1859. */
  1860. static void xhci_evaluate_context_input ( struct xhci_device *xhci,
  1861. struct xhci_slot *slot __unused,
  1862. struct xhci_endpoint *endpoint,
  1863. void *input ) {
  1864. struct xhci_control_context *control_ctx;
  1865. struct xhci_slot_context *slot_ctx;
  1866. struct xhci_endpoint_context *ep_ctx;
  1867. /* Populate control context */
  1868. control_ctx = input;
  1869. control_ctx->add = cpu_to_le32 ( ( 1 << XHCI_CTX_SLOT ) |
  1870. ( 1 << endpoint->ctx ) );
  1871. /* Populate slot context */
  1872. slot_ctx = ( input + xhci_input_context_offset ( xhci, XHCI_CTX_SLOT ));
  1873. slot_ctx->info = cpu_to_le32 ( XHCI_SLOT_INFO ( ( XHCI_CTX_END - 1 ),
  1874. 0, 0, 0 ) );
  1875. /* Populate endpoint context */
  1876. ep_ctx = ( input + xhci_input_context_offset ( xhci, endpoint->ctx ) );
  1877. ep_ctx->mtu = cpu_to_le16 ( endpoint->ep->mtu );
  1878. }
  1879. /**
  1880. * Evaluate context
  1881. *
  1882. * @v xhci xHCI device
  1883. * @v slot Device slot
  1884. * @v endpoint Endpoint
  1885. * @ret rc Return status code
  1886. */
  1887. static inline int xhci_evaluate_context ( struct xhci_device *xhci,
  1888. struct xhci_slot *slot,
  1889. struct xhci_endpoint *endpoint ) {
  1890. int rc;
  1891. /* Configure endpoint */
  1892. if ( ( rc = xhci_context ( xhci, slot, endpoint,
  1893. XHCI_TRB_EVALUATE_CONTEXT,
  1894. xhci_evaluate_context_input ) ) != 0 )
  1895. return rc;
  1896. DBGC2 ( xhci, "XHCI %p slot %d ctx %d (re-)evaluated\n",
  1897. xhci, slot->id, endpoint->ctx );
  1898. return 0;
  1899. }
  1900. /**
  1901. * Reset endpoint
  1902. *
  1903. * @v xhci xHCI device
  1904. * @v slot Device slot
  1905. * @v endpoint Endpoint
  1906. * @ret rc Return status code
  1907. */
  1908. static inline int xhci_reset_endpoint ( struct xhci_device *xhci,
  1909. struct xhci_slot *slot,
  1910. struct xhci_endpoint *endpoint ) {
  1911. union xhci_trb trb;
  1912. struct xhci_trb_reset_endpoint *reset = &trb.reset;
  1913. int rc;
  1914. /* Construct command */
  1915. memset ( reset, 0, sizeof ( *reset ) );
  1916. reset->slot = slot->id;
  1917. reset->endpoint = endpoint->ctx;
  1918. reset->type = XHCI_TRB_RESET_ENDPOINT;
  1919. /* Issue command and wait for completion */
  1920. if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) {
  1921. DBGC ( xhci, "XHCI %p slot %d ctx %d could not reset endpoint "
  1922. "in state %d: %s\n", xhci, slot->id, endpoint->ctx,
  1923. endpoint->context->state, strerror ( rc ) );
  1924. return rc;
  1925. }
  1926. return 0;
  1927. }
  1928. /**
  1929. * Stop endpoint
  1930. *
  1931. * @v xhci xHCI device
  1932. * @v slot Device slot
  1933. * @v endpoint Endpoint
  1934. * @ret rc Return status code
  1935. */
  1936. static inline int xhci_stop_endpoint ( struct xhci_device *xhci,
  1937. struct xhci_slot *slot,
  1938. struct xhci_endpoint *endpoint ) {
  1939. union xhci_trb trb;
  1940. struct xhci_trb_stop_endpoint *stop = &trb.stop;
  1941. int rc;
  1942. /* Construct command */
  1943. memset ( stop, 0, sizeof ( *stop ) );
  1944. stop->slot = slot->id;
  1945. stop->endpoint = endpoint->ctx;
  1946. stop->type = XHCI_TRB_STOP_ENDPOINT;
  1947. /* Issue command and wait for completion */
  1948. if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) {
  1949. DBGC ( xhci, "XHCI %p slot %d ctx %d could not stop endpoint "
  1950. "in state %d: %s\n", xhci, slot->id, endpoint->ctx,
  1951. endpoint->context->state, strerror ( rc ) );
  1952. return rc;
  1953. }
  1954. return 0;
  1955. }
  1956. /**
  1957. * Set transfer ring dequeue pointer
  1958. *
  1959. * @v xhci xHCI device
  1960. * @v slot Device slot
  1961. * @v endpoint Endpoint
  1962. * @ret rc Return status code
  1963. */
  1964. static inline int
  1965. xhci_set_tr_dequeue_pointer ( struct xhci_device *xhci,
  1966. struct xhci_slot *slot,
  1967. struct xhci_endpoint *endpoint ) {
  1968. union xhci_trb trb;
  1969. struct xhci_trb_set_tr_dequeue_pointer *dequeue = &trb.dequeue;
  1970. struct xhci_trb_ring *ring = &endpoint->ring;
  1971. unsigned int cons;
  1972. unsigned int mask;
  1973. unsigned int index;
  1974. unsigned int dcs;
  1975. int rc;
  1976. /* Construct command */
  1977. memset ( dequeue, 0, sizeof ( *dequeue ) );
  1978. cons = ring->cons;
  1979. mask = ring->mask;
  1980. dcs = ( ( ~( cons >> ring->shift ) ) & XHCI_EP_DCS );
  1981. index = ( cons & mask );
  1982. dequeue->dequeue =
  1983. cpu_to_le64 ( virt_to_phys ( &ring->trb[index] ) | dcs );
  1984. dequeue->slot = slot->id;
  1985. dequeue->endpoint = endpoint->ctx;
  1986. dequeue->type = XHCI_TRB_SET_TR_DEQUEUE_POINTER;
  1987. /* Issue command and wait for completion */
  1988. if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) {
  1989. DBGC ( xhci, "XHCI %p slot %d ctx %d could not set TR dequeue "
  1990. "pointer in state %d: %s\n", xhci, slot->id,
  1991. endpoint->ctx, endpoint->context->state, strerror ( rc));
  1992. return rc;
  1993. }
  1994. return 0;
  1995. }
  1996. /******************************************************************************
  1997. *
  1998. * Endpoint operations
  1999. *
  2000. ******************************************************************************
  2001. */
  2002. /**
  2003. * Open endpoint
  2004. *
  2005. * @v ep USB endpoint
  2006. * @ret rc Return status code
  2007. */
  2008. static int xhci_endpoint_open ( struct usb_endpoint *ep ) {
  2009. struct usb_device *usb = ep->usb;
  2010. struct xhci_slot *slot = usb_get_hostdata ( usb );
  2011. struct xhci_device *xhci = slot->xhci;
  2012. struct xhci_endpoint *endpoint;
  2013. unsigned int ctx;
  2014. unsigned int type;
  2015. unsigned int interval;
  2016. int rc;
  2017. /* Calculate context index */
  2018. ctx = XHCI_CTX ( ep->address );
  2019. assert ( slot->endpoint[ctx] == NULL );
  2020. /* Calculate endpoint type */
  2021. type = XHCI_EP_TYPE ( ep->attributes & USB_ENDPOINT_ATTR_TYPE_MASK );
  2022. if ( type == XHCI_EP_TYPE ( USB_ENDPOINT_ATTR_CONTROL ) )
  2023. type = XHCI_EP_TYPE_CONTROL;
  2024. if ( ep->address & USB_DIR_IN )
  2025. type |= XHCI_EP_TYPE_IN;
  2026. /* Calculate interval */
  2027. if ( type & XHCI_EP_TYPE_PERIODIC ) {
  2028. interval = ( fls ( ep->interval ) - 1 );
  2029. } else {
  2030. interval = ep->interval;
  2031. }
  2032. /* Allocate and initialise structure */
  2033. endpoint = zalloc ( sizeof ( *endpoint ) );
  2034. if ( ! endpoint ) {
  2035. rc = -ENOMEM;
  2036. goto err_alloc;
  2037. }
  2038. usb_endpoint_set_hostdata ( ep, endpoint );
  2039. slot->endpoint[ctx] = endpoint;
  2040. endpoint->xhci = xhci;
  2041. endpoint->slot = slot;
  2042. endpoint->ep = ep;
  2043. endpoint->ctx = ctx;
  2044. endpoint->type = type;
  2045. endpoint->interval = interval;
  2046. endpoint->context = ( ( ( void * ) slot->context ) +
  2047. xhci_device_context_offset ( xhci, ctx ) );
  2048. /* Allocate transfer ring */
  2049. if ( ( rc = xhci_ring_alloc ( xhci, &endpoint->ring,
  2050. XHCI_TRANSFER_TRBS_LOG2,
  2051. slot->id, ctx, 0 ) ) != 0 )
  2052. goto err_ring_alloc;
  2053. /* Configure endpoint, if applicable */
  2054. if ( ( ctx != XHCI_CTX_EP0 ) &&
  2055. ( ( rc = xhci_configure_endpoint ( xhci, slot, endpoint ) ) != 0 ))
  2056. goto err_configure_endpoint;
  2057. DBGC2 ( xhci, "XHCI %p slot %d ctx %d ring [%08lx,%08lx)\n",
  2058. xhci, slot->id, ctx, virt_to_phys ( endpoint->ring.trb ),
  2059. ( virt_to_phys ( endpoint->ring.trb ) + endpoint->ring.len ) );
  2060. return 0;
  2061. xhci_deconfigure_endpoint ( xhci, slot, endpoint );
  2062. err_configure_endpoint:
  2063. xhci_ring_free ( &endpoint->ring );
  2064. err_ring_alloc:
  2065. slot->endpoint[ctx] = NULL;
  2066. free ( endpoint );
  2067. err_alloc:
  2068. return rc;
  2069. }
  2070. /**
  2071. * Close endpoint
  2072. *
  2073. * @v ep USB endpoint
  2074. */
  2075. static void xhci_endpoint_close ( struct usb_endpoint *ep ) {
  2076. struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
  2077. struct xhci_slot *slot = endpoint->slot;
  2078. struct xhci_device *xhci = slot->xhci;
  2079. struct io_buffer *iobuf;
  2080. unsigned int ctx = endpoint->ctx;
  2081. /* Deconfigure endpoint, if applicable */
  2082. if ( ctx != XHCI_CTX_EP0 )
  2083. xhci_deconfigure_endpoint ( xhci, slot, endpoint );
  2084. /* Cancel any incomplete transfers */
  2085. while ( xhci_ring_fill ( &endpoint->ring ) ) {
  2086. iobuf = xhci_dequeue_multi ( &endpoint->ring );
  2087. usb_complete_err ( ep, iobuf, -ECANCELED );
  2088. }
  2089. /* Free endpoint */
  2090. xhci_ring_free ( &endpoint->ring );
  2091. slot->endpoint[ctx] = NULL;
  2092. free ( endpoint );
  2093. }
  2094. /**
  2095. * Reset endpoint
  2096. *
  2097. * @v ep USB endpoint
  2098. * @ret rc Return status code
  2099. */
  2100. static int xhci_endpoint_reset ( struct usb_endpoint *ep ) {
  2101. struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
  2102. struct xhci_slot *slot = endpoint->slot;
  2103. struct xhci_device *xhci = slot->xhci;
  2104. int rc;
  2105. /* Reset endpoint context */
  2106. if ( ( rc = xhci_reset_endpoint ( xhci, slot, endpoint ) ) != 0 )
  2107. return rc;
  2108. /* Set transfer ring dequeue pointer */
  2109. if ( ( rc = xhci_set_tr_dequeue_pointer ( xhci, slot, endpoint ) ) != 0)
  2110. return rc;
  2111. DBGC ( xhci, "XHCI %p slot %d ctx %d reset\n",
  2112. xhci, slot->id, endpoint->ctx );
  2113. return 0;
  2114. }
  2115. /**
  2116. * Update MTU
  2117. *
  2118. * @v ep USB endpoint
  2119. * @ret rc Return status code
  2120. */
  2121. static int xhci_endpoint_mtu ( struct usb_endpoint *ep ) {
  2122. struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
  2123. struct xhci_slot *slot = endpoint->slot;
  2124. struct xhci_device *xhci = slot->xhci;
  2125. int rc;
  2126. /* Evalulate context */
  2127. if ( ( rc = xhci_evaluate_context ( xhci, slot, endpoint ) ) != 0 )
  2128. return rc;
  2129. return 0;
  2130. }
  2131. /**
  2132. * Enqueue message transfer
  2133. *
  2134. * @v ep USB endpoint
  2135. * @v packet Setup packet
  2136. * @v iobuf I/O buffer
  2137. * @ret rc Return status code
  2138. */
  2139. static int xhci_endpoint_message ( struct usb_endpoint *ep,
  2140. struct usb_setup_packet *packet,
  2141. struct io_buffer *iobuf ) {
  2142. struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
  2143. unsigned int input = ( le16_to_cpu ( packet->request ) & USB_DIR_IN );
  2144. size_t len = iob_len ( iobuf );
  2145. union xhci_trb trbs[ 1 /* setup */ + 1 /* possible data */ +
  2146. 1 /* status */ ];
  2147. union xhci_trb *trb = trbs;
  2148. struct xhci_trb_setup *setup;
  2149. struct xhci_trb_data *data;
  2150. struct xhci_trb_status *status;
  2151. int rc;
  2152. /* Profile message transfers */
  2153. profile_start ( &xhci_message_profiler );
  2154. /* Construct setup stage TRB */
  2155. memset ( trbs, 0, sizeof ( trbs ) );
  2156. setup = &(trb++)->setup;
  2157. memcpy ( &setup->packet, packet, sizeof ( setup->packet ) );
  2158. setup->len = cpu_to_le32 ( sizeof ( *packet ) );
  2159. setup->flags = XHCI_TRB_IDT;
  2160. setup->type = XHCI_TRB_SETUP;
  2161. if ( len )
  2162. setup->direction = ( input ? XHCI_SETUP_IN : XHCI_SETUP_OUT );
  2163. /* Construct data stage TRB, if applicable */
  2164. if ( len ) {
  2165. data = &(trb++)->data;
  2166. data->data = cpu_to_le64 ( virt_to_phys ( iobuf->data ) );
  2167. data->len = cpu_to_le32 ( len );
  2168. data->type = XHCI_TRB_DATA;
  2169. data->direction = ( input ? XHCI_DATA_IN : XHCI_DATA_OUT );
  2170. }
  2171. /* Construct status stage TRB */
  2172. status = &(trb++)->status;
  2173. status->flags = XHCI_TRB_IOC;
  2174. status->type = XHCI_TRB_STATUS;
  2175. status->direction =
  2176. ( ( len && input ) ? XHCI_STATUS_OUT : XHCI_STATUS_IN );
  2177. /* Enqueue TRBs */
  2178. if ( ( rc = xhci_enqueue_multi ( &endpoint->ring, iobuf, trbs,
  2179. ( trb - trbs ) ) ) != 0 )
  2180. return rc;
  2181. /* Ring the doorbell */
  2182. xhci_doorbell ( &endpoint->ring );
  2183. profile_stop ( &xhci_message_profiler );
  2184. return 0;
  2185. }
  2186. /**
  2187. * Enqueue stream transfer
  2188. *
  2189. * @v ep USB endpoint
  2190. * @v iobuf I/O buffer
  2191. * @v terminate Terminate using a short packet
  2192. * @ret rc Return status code
  2193. */
  2194. static int xhci_endpoint_stream ( struct usb_endpoint *ep,
  2195. struct io_buffer *iobuf, int terminate ) {
  2196. struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
  2197. union xhci_trb trbs[ 1 /* Normal */ + 1 /* Possible zero-length */ ];
  2198. union xhci_trb *trb = trbs;
  2199. struct xhci_trb_normal *normal;
  2200. size_t len = iob_len ( iobuf );
  2201. int rc;
  2202. /* Profile stream transfers */
  2203. profile_start ( &xhci_stream_profiler );
  2204. /* Construct normal TRBs */
  2205. memset ( &trbs, 0, sizeof ( trbs ) );
  2206. normal = &(trb++)->normal;
  2207. normal->data = cpu_to_le64 ( virt_to_phys ( iobuf->data ) );
  2208. normal->len = cpu_to_le32 ( len );
  2209. normal->type = XHCI_TRB_NORMAL;
  2210. if ( terminate && ( ( len & ( ep->mtu - 1 ) ) == 0 ) ) {
  2211. normal->flags = XHCI_TRB_CH;
  2212. normal = &(trb++)->normal;
  2213. normal->type = XHCI_TRB_NORMAL;
  2214. }
  2215. normal->flags = XHCI_TRB_IOC;
  2216. /* Enqueue TRBs */
  2217. if ( ( rc = xhci_enqueue_multi ( &endpoint->ring, iobuf, trbs,
  2218. ( trb - trbs ) ) ) != 0 )
  2219. return rc;
  2220. /* Ring the doorbell */
  2221. xhci_doorbell ( &endpoint->ring );
  2222. profile_stop ( &xhci_stream_profiler );
  2223. return 0;
  2224. }
  2225. /******************************************************************************
  2226. *
  2227. * Device operations
  2228. *
  2229. ******************************************************************************
  2230. */
  2231. /**
  2232. * Open device
  2233. *
  2234. * @v usb USB device
  2235. * @ret rc Return status code
  2236. */
  2237. static int xhci_device_open ( struct usb_device *usb ) {
  2238. struct xhci_device *xhci = usb_bus_get_hostdata ( usb->port->hub->bus );
  2239. struct xhci_slot *slot;
  2240. size_t len;
  2241. int type;
  2242. int id;
  2243. int rc;
  2244. /* Determine applicable slot type */
  2245. type = xhci_port_slot_type ( xhci, usb->port->address );
  2246. if ( type < 0 ) {
  2247. rc = type;
  2248. DBGC ( xhci, "XHCI %p port %d has no slot type\n",
  2249. xhci, usb->port->address );
  2250. goto err_type;
  2251. }
  2252. /* Allocate a device slot number */
  2253. id = xhci_enable_slot ( xhci, type );
  2254. if ( id < 0 ) {
  2255. rc = id;
  2256. goto err_enable_slot;
  2257. }
  2258. assert ( xhci->slot[id] == NULL );
  2259. /* Allocate and initialise structure */
  2260. slot = zalloc ( sizeof ( *slot ) );
  2261. if ( ! slot ) {
  2262. rc = -ENOMEM;
  2263. goto err_alloc;
  2264. }
  2265. usb_set_hostdata ( usb, slot );
  2266. xhci->slot[id] = slot;
  2267. slot->xhci = xhci;
  2268. slot->usb = usb;
  2269. slot->id = id;
  2270. /* Allocate a device context */
  2271. len = xhci_device_context_offset ( xhci, XHCI_CTX_END );
  2272. slot->context = malloc_dma ( len, xhci_align ( len ) );
  2273. if ( ! slot->context ) {
  2274. rc = -ENOMEM;
  2275. goto err_alloc_context;
  2276. }
  2277. memset ( slot->context, 0, len );
  2278. /* Set device context base address */
  2279. assert ( xhci->dcbaa[id] == 0 );
  2280. xhci->dcbaa[id] = cpu_to_le64 ( virt_to_phys ( slot->context ) );
  2281. DBGC2 ( xhci, "XHCI %p slot %d device context [%08lx,%08lx) for %s\n",
  2282. xhci, slot->id, virt_to_phys ( slot->context ),
  2283. ( virt_to_phys ( slot->context ) + len ), usb->name );
  2284. return 0;
  2285. xhci->dcbaa[id] = 0;
  2286. free_dma ( slot->context, len );
  2287. err_alloc_context:
  2288. xhci->slot[id] = NULL;
  2289. free ( slot );
  2290. err_alloc:
  2291. xhci_disable_slot ( xhci, id );
  2292. err_enable_slot:
  2293. err_type:
  2294. return rc;
  2295. }
  2296. /**
  2297. * Close device
  2298. *
  2299. * @v usb USB device
  2300. */
  2301. static void xhci_device_close ( struct usb_device *usb ) {
  2302. struct xhci_slot *slot = usb_get_hostdata ( usb );
  2303. struct xhci_device *xhci = slot->xhci;
  2304. size_t len = xhci_device_context_offset ( xhci, XHCI_CTX_END );
  2305. unsigned int id = slot->id;
  2306. int rc;
  2307. /* Disable slot */
  2308. if ( ( rc = xhci_disable_slot ( xhci, id ) ) != 0 ) {
  2309. /* Slot is still enabled. Leak the slot context,
  2310. * since the controller may still write to this
  2311. * memory, and leave the DCBAA entry intact.
  2312. *
  2313. * If the controller later reports that this same slot
  2314. * has been re-enabled, then some assertions will be
  2315. * triggered.
  2316. */
  2317. DBGC ( xhci, "XHCI %p slot %d leaking context memory\n",
  2318. xhci, slot->id );
  2319. slot->context = NULL;
  2320. }
  2321. /* Free slot */
  2322. if ( slot->context ) {
  2323. free_dma ( slot->context, len );
  2324. xhci->dcbaa[id] = 0;
  2325. }
  2326. xhci->slot[id] = NULL;
  2327. free ( slot );
  2328. }
  2329. /**
  2330. * Assign device address
  2331. *
  2332. * @v usb USB device
  2333. * @ret rc Return status code
  2334. */
  2335. static int xhci_device_address ( struct usb_device *usb ) {
  2336. struct xhci_slot *slot = usb_get_hostdata ( usb );
  2337. struct xhci_device *xhci = slot->xhci;
  2338. struct usb_port *port = usb->port;
  2339. struct usb_port *root_port;
  2340. int psiv;
  2341. int rc;
  2342. /* Calculate route string */
  2343. slot->route = usb_route_string ( usb );
  2344. /* Calculate root hub port number */
  2345. root_port = usb_root_hub_port ( usb );
  2346. slot->port = root_port->address;
  2347. /* Calculate protocol speed ID */
  2348. psiv = xhci_port_psiv ( xhci, slot->port, port->speed );
  2349. if ( psiv < 0 ) {
  2350. rc = psiv;
  2351. return rc;
  2352. }
  2353. slot->psiv = psiv;
  2354. /* Address device */
  2355. if ( ( rc = xhci_address_device ( xhci, slot ) ) != 0 )
  2356. return rc;
  2357. return 0;
  2358. }
  2359. /******************************************************************************
  2360. *
  2361. * Bus operations
  2362. *
  2363. ******************************************************************************
  2364. */
  2365. /**
  2366. * Open USB bus
  2367. *
  2368. * @v bus USB bus
  2369. * @ret rc Return status code
  2370. */
  2371. static int xhci_bus_open ( struct usb_bus *bus ) {
  2372. struct xhci_device *xhci = usb_bus_get_hostdata ( bus );
  2373. int rc;
  2374. /* Allocate device slot array */
  2375. xhci->slot = zalloc ( xhci->slots * sizeof ( xhci->slot[0] ) );
  2376. if ( ! xhci->slot ) {
  2377. rc = -ENOMEM;
  2378. goto err_slot_alloc;
  2379. }
  2380. /* Allocate device context base address array */
  2381. if ( ( rc = xhci_dcbaa_alloc ( xhci ) ) != 0 )
  2382. goto err_dcbaa_alloc;
  2383. /* Allocate scratchpad buffers */
  2384. if ( ( rc = xhci_scratchpad_alloc ( xhci ) ) != 0 )
  2385. goto err_scratchpad_alloc;
  2386. /* Allocate command ring */
  2387. if ( ( rc = xhci_command_alloc ( xhci ) ) != 0 )
  2388. goto err_command_alloc;
  2389. /* Allocate event ring */
  2390. if ( ( rc = xhci_event_alloc ( xhci ) ) != 0 )
  2391. goto err_event_alloc;
  2392. /* Start controller */
  2393. xhci_run ( xhci );
  2394. return 0;
  2395. xhci_stop ( xhci );
  2396. xhci_event_free ( xhci );
  2397. err_event_alloc:
  2398. xhci_command_free ( xhci );
  2399. err_command_alloc:
  2400. xhci_scratchpad_free ( xhci );
  2401. err_scratchpad_alloc:
  2402. xhci_dcbaa_free ( xhci );
  2403. err_dcbaa_alloc:
  2404. free ( xhci->slot );
  2405. err_slot_alloc:
  2406. return rc;
  2407. }
  2408. /**
  2409. * Close USB bus
  2410. *
  2411. * @v bus USB bus
  2412. */
  2413. static void xhci_bus_close ( struct usb_bus *bus ) {
  2414. struct xhci_device *xhci = usb_bus_get_hostdata ( bus );
  2415. unsigned int i;
  2416. /* Sanity checks */
  2417. assert ( xhci->slot != NULL );
  2418. for ( i = 0 ; i < xhci->slots ; i++ )
  2419. assert ( xhci->slot[i] == NULL );
  2420. xhci_stop ( xhci );
  2421. xhci_event_free ( xhci );
  2422. xhci_command_free ( xhci );
  2423. xhci_scratchpad_free ( xhci );
  2424. xhci_dcbaa_free ( xhci );
  2425. free ( xhci->slot );
  2426. }
  2427. /**
  2428. * Poll USB bus
  2429. *
  2430. * @v bus USB bus
  2431. */
  2432. static void xhci_bus_poll ( struct usb_bus *bus ) {
  2433. struct xhci_device *xhci = usb_bus_get_hostdata ( bus );
  2434. /* Poll event ring */
  2435. xhci_event_poll ( xhci );
  2436. }
  2437. /******************************************************************************
  2438. *
  2439. * Root hub operations
  2440. *
  2441. ******************************************************************************
  2442. */
  2443. /**
  2444. * Open root hub
  2445. *
  2446. * @v hub USB hub
  2447. * @ret rc Return status code
  2448. */
  2449. static int xhci_hub_open ( struct usb_hub *hub ) {
  2450. struct usb_bus *bus = hub->bus;
  2451. struct xhci_device *xhci = usb_bus_get_hostdata ( bus );
  2452. struct usb_port *port;
  2453. uint32_t portsc;
  2454. unsigned int i;
  2455. /* Enable power to all ports */
  2456. for ( i = 1 ; i <= xhci->ports ; i++ ) {
  2457. portsc = readl ( xhci->op + XHCI_OP_PORTSC ( i ) );
  2458. portsc &= XHCI_PORTSC_PRESERVE;
  2459. portsc |= XHCI_PORTSC_PP;
  2460. writel ( portsc, xhci->op + XHCI_OP_PORTSC ( i ) );
  2461. }
  2462. /* xHCI spec requires us to potentially wait 20ms after
  2463. * enabling power to a port.
  2464. */
  2465. mdelay ( XHCI_PORT_POWER_DELAY_MS );
  2466. /* USB3 ports may power up as Disabled */
  2467. for ( i = 1 ; i <= xhci->ports ; i++ ) {
  2468. portsc = readl ( xhci->op + XHCI_OP_PORTSC ( i ) );
  2469. port = usb_port ( hub, i );
  2470. if ( ( port->protocol >= USB_PROTO_3_0 ) &&
  2471. ( ( portsc & XHCI_PORTSC_PLS_MASK ) ==
  2472. XHCI_PORTSC_PLS_DISABLED ) ) {
  2473. /* Force link state to RxDetect */
  2474. portsc &= XHCI_PORTSC_PRESERVE;
  2475. portsc |= ( XHCI_PORTSC_PLS_RXDETECT | XHCI_PORTSC_LWS);
  2476. writel ( portsc, xhci->op + XHCI_OP_PORTSC ( i ) );
  2477. }
  2478. }
  2479. /* Some xHCI cards seem to require an additional delay after
  2480. * setting the link state to RxDetect.
  2481. */
  2482. mdelay ( XHCI_LINK_STATE_DELAY_MS );
  2483. /* Record hub driver private data */
  2484. usb_hub_set_drvdata ( hub, xhci );
  2485. return 0;
  2486. }
  2487. /**
  2488. * Close root hub
  2489. *
  2490. * @v hub USB hub
  2491. */
  2492. static void xhci_hub_close ( struct usb_hub *hub ) {
  2493. /* Clear hub driver private data */
  2494. usb_hub_set_drvdata ( hub, NULL );
  2495. }
  2496. /**
  2497. * Enable port
  2498. *
  2499. * @v hub USB hub
  2500. * @v port USB port
  2501. * @ret rc Return status code
  2502. */
  2503. static int xhci_hub_enable ( struct usb_hub *hub, struct usb_port *port ) {
  2504. struct xhci_device *xhci = usb_hub_get_drvdata ( hub );
  2505. uint32_t portsc;
  2506. unsigned int i;
  2507. /* Reset port if applicable */
  2508. if ( port->protocol < USB_PROTO_3_0 ) {
  2509. portsc = readl ( xhci->op + XHCI_OP_PORTSC ( port->address ) );
  2510. portsc &= XHCI_PORTSC_PRESERVE;
  2511. portsc |= XHCI_PORTSC_PR;
  2512. writel ( portsc, xhci->op + XHCI_OP_PORTSC ( port->address ) );
  2513. }
  2514. /* Wait for port to become enabled */
  2515. for ( i = 0 ; i < XHCI_PORT_RESET_MAX_WAIT_MS ; i++ ) {
  2516. /* Check port status */
  2517. portsc = readl ( xhci->op + XHCI_OP_PORTSC ( port->address ) );
  2518. if ( portsc & XHCI_PORTSC_PED )
  2519. return 0;
  2520. /* Delay */
  2521. mdelay ( 1 );
  2522. }
  2523. DBGC ( xhci, "XHCI %p timed out waiting for port %d to enable\n",
  2524. xhci, port->address );
  2525. return -ETIMEDOUT;
  2526. }
  2527. /**
  2528. * Disable port
  2529. *
  2530. * @v hub USB hub
  2531. * @v port USB port
  2532. * @ret rc Return status code
  2533. */
  2534. static int xhci_hub_disable ( struct usb_hub *hub, struct usb_port *port ) {
  2535. struct xhci_device *xhci = usb_hub_get_drvdata ( hub );
  2536. uint32_t portsc;
  2537. /* Disable port */
  2538. portsc = readl ( xhci->op + XHCI_OP_PORTSC ( port->address ) );
  2539. portsc &= XHCI_PORTSC_PRESERVE;
  2540. portsc |= XHCI_PORTSC_PED;
  2541. writel ( portsc, xhci->op + XHCI_OP_PORTSC ( port->address ) );
  2542. return 0;
  2543. }
  2544. /**
  2545. * Update root hub port speed
  2546. *
  2547. * @v hub USB hub
  2548. * @v port USB port
  2549. * @ret rc Return status code
  2550. */
  2551. static int xhci_hub_speed ( struct usb_hub *hub, struct usb_port *port ) {
  2552. struct xhci_device *xhci = usb_hub_get_drvdata ( hub );
  2553. uint32_t portsc;
  2554. unsigned int psiv;
  2555. int ccs;
  2556. int ped;
  2557. int speed;
  2558. int rc;
  2559. /* Read port status */
  2560. portsc = readl ( xhci->op + XHCI_OP_PORTSC ( port->address ) );
  2561. DBGC2 ( xhci, "XHCI %p port %d status is %08x\n",
  2562. xhci, port->address, portsc );
  2563. /* Check whether or not port is connected */
  2564. ccs = ( portsc & XHCI_PORTSC_CCS );
  2565. if ( ! ccs ) {
  2566. port->speed = USB_SPEED_NONE;
  2567. return 0;
  2568. }
  2569. /* For USB2 ports, the PSIV field is not valid until the port
  2570. * completes reset and becomes enabled.
  2571. */
  2572. ped = ( portsc & XHCI_PORTSC_PED );
  2573. if ( ( port->protocol < USB_PROTO_3_0 ) && ! ped ) {
  2574. port->speed = USB_SPEED_FULL;
  2575. return 0;
  2576. }
  2577. /* Get port speed and map to generic USB speed */
  2578. psiv = XHCI_PORTSC_PSIV ( portsc );
  2579. speed = xhci_port_speed ( xhci, port->address, psiv );
  2580. if ( speed < 0 ) {
  2581. rc = speed;
  2582. return rc;
  2583. }
  2584. port->speed = speed;
  2585. return 0;
  2586. }
  2587. /******************************************************************************
  2588. *
  2589. * PCI interface
  2590. *
  2591. ******************************************************************************
  2592. */
  2593. /** USB host controller operations */
  2594. static struct usb_host_operations xhci_operations = {
  2595. .endpoint = {
  2596. .open = xhci_endpoint_open,
  2597. .close = xhci_endpoint_close,
  2598. .reset = xhci_endpoint_reset,
  2599. .mtu = xhci_endpoint_mtu,
  2600. .message = xhci_endpoint_message,
  2601. .stream = xhci_endpoint_stream,
  2602. },
  2603. .device = {
  2604. .open = xhci_device_open,
  2605. .close = xhci_device_close,
  2606. .address = xhci_device_address,
  2607. },
  2608. .bus = {
  2609. .open = xhci_bus_open,
  2610. .close = xhci_bus_close,
  2611. .poll = xhci_bus_poll,
  2612. },
  2613. .hub = {
  2614. .open = xhci_hub_open,
  2615. .close = xhci_hub_close,
  2616. .enable = xhci_hub_enable,
  2617. .disable = xhci_hub_disable,
  2618. .speed = xhci_hub_speed,
  2619. },
  2620. };
  2621. /**
  2622. * Fix Intel PCH-specific quirks
  2623. *
  2624. * @v xhci xHCI device
  2625. * @v pci PCI device
  2626. */
  2627. static void xhci_pch_fix ( struct xhci_device *xhci, struct pci_device *pci ) {
  2628. struct xhci_pch *pch = &xhci->pch;
  2629. uint32_t xusb2pr;
  2630. uint32_t xusb2prm;
  2631. uint32_t usb3pssen;
  2632. uint32_t usb3prm;
  2633. /* Enable SuperSpeed capability. Do this before rerouting
  2634. * USB2 ports, so that USB3 devices connect at SuperSpeed.
  2635. */
  2636. pci_read_config_dword ( pci, XHCI_PCH_USB3PSSEN, &usb3pssen );
  2637. pci_read_config_dword ( pci, XHCI_PCH_USB3PRM, &usb3prm );
  2638. if ( usb3prm & ~usb3pssen ) {
  2639. DBGC ( xhci, "XHCI %p enabling SuperSpeed on ports %08x\n",
  2640. xhci, ( usb3prm & ~usb3pssen ) );
  2641. }
  2642. pch->usb3pssen = usb3pssen;
  2643. usb3pssen |= usb3prm;
  2644. pci_write_config_dword ( pci, XHCI_PCH_USB3PSSEN, usb3pssen );
  2645. /* Route USB2 ports from EHCI to xHCI */
  2646. pci_read_config_dword ( pci, XHCI_PCH_XUSB2PR, &xusb2pr );
  2647. pci_read_config_dword ( pci, XHCI_PCH_XUSB2PRM, &xusb2prm );
  2648. if ( xusb2prm & ~xusb2pr ) {
  2649. DBGC ( xhci, "XHCI %p routing ports %08x from EHCI to xHCI\n",
  2650. xhci, ( xusb2prm & ~xusb2pr ) );
  2651. }
  2652. pch->xusb2pr = xusb2pr;
  2653. xusb2pr |= xusb2prm;
  2654. pci_write_config_dword ( pci, XHCI_PCH_XUSB2PR, xusb2pr );
  2655. }
  2656. /**
  2657. * Undo Intel PCH-specific quirk fixes
  2658. *
  2659. * @v xhci xHCI device
  2660. * @v pci PCI device
  2661. */
  2662. static void xhci_pch_undo ( struct xhci_device *xhci, struct pci_device *pci ) {
  2663. struct xhci_pch *pch = &xhci->pch;
  2664. /* Restore USB2 port routing to original state */
  2665. pci_write_config_dword ( pci, XHCI_PCH_XUSB2PR, pch->xusb2pr );
  2666. /* Restore SuperSpeed capability to original state */
  2667. pci_write_config_dword ( pci, XHCI_PCH_USB3PSSEN, pch->usb3pssen );
  2668. }
  2669. /**
  2670. * Probe PCI device
  2671. *
  2672. * @v pci PCI device
  2673. * @ret rc Return status code
  2674. */
  2675. static int xhci_probe ( struct pci_device *pci ) {
  2676. struct xhci_device *xhci;
  2677. struct usb_port *port;
  2678. unsigned long bar_start;
  2679. size_t bar_size;
  2680. unsigned int i;
  2681. int rc;
  2682. /* Allocate and initialise structure */
  2683. xhci = zalloc ( sizeof ( *xhci ) );
  2684. if ( ! xhci ) {
  2685. rc = -ENOMEM;
  2686. goto err_alloc;
  2687. }
  2688. /* Fix up PCI device */
  2689. adjust_pci_device ( pci );
  2690. /* Map registers */
  2691. bar_start = pci_bar_start ( pci, XHCI_BAR );
  2692. bar_size = pci_bar_size ( pci, XHCI_BAR );
  2693. xhci->regs = ioremap ( bar_start, bar_size );
  2694. if ( ! xhci->regs ) {
  2695. rc = -ENODEV;
  2696. goto err_ioremap;
  2697. }
  2698. /* Initialise xHCI device */
  2699. xhci_init ( xhci, xhci->regs );
  2700. /* Initialise USB legacy support and claim ownership */
  2701. xhci_legacy_init ( xhci );
  2702. xhci_legacy_claim ( xhci );
  2703. /* Fix Intel PCH-specific quirks, if applicable */
  2704. if ( pci->id->driver_data & XHCI_PCH )
  2705. xhci_pch_fix ( xhci, pci );
  2706. /* Reset device */
  2707. if ( ( rc = xhci_reset ( xhci ) ) != 0 )
  2708. goto err_reset;
  2709. /* Allocate USB bus */
  2710. xhci->bus = alloc_usb_bus ( &pci->dev, xhci->ports, XHCI_MTU,
  2711. &xhci_operations );
  2712. if ( ! xhci->bus ) {
  2713. rc = -ENOMEM;
  2714. goto err_alloc_bus;
  2715. }
  2716. usb_bus_set_hostdata ( xhci->bus, xhci );
  2717. usb_hub_set_drvdata ( xhci->bus->hub, xhci );
  2718. /* Set port protocols */
  2719. for ( i = 1 ; i <= xhci->ports ; i++ ) {
  2720. port = usb_port ( xhci->bus->hub, i );
  2721. port->protocol = xhci_port_protocol ( xhci, i );
  2722. }
  2723. /* Register USB bus */
  2724. if ( ( rc = register_usb_bus ( xhci->bus ) ) != 0 )
  2725. goto err_register;
  2726. pci_set_drvdata ( pci, xhci );
  2727. return 0;
  2728. unregister_usb_bus ( xhci->bus );
  2729. err_register:
  2730. free_usb_bus ( xhci->bus );
  2731. err_alloc_bus:
  2732. xhci_reset ( xhci );
  2733. err_reset:
  2734. if ( pci->id->driver_data & XHCI_PCH )
  2735. xhci_pch_undo ( xhci, pci );
  2736. xhci_legacy_release ( xhci );
  2737. iounmap ( xhci->regs );
  2738. err_ioremap:
  2739. free ( xhci );
  2740. err_alloc:
  2741. return rc;
  2742. }
  2743. /**
  2744. * Remove PCI device
  2745. *
  2746. * @v pci PCI device
  2747. */
  2748. static void xhci_remove ( struct pci_device *pci ) {
  2749. struct xhci_device *xhci = pci_get_drvdata ( pci );
  2750. struct usb_bus *bus = xhci->bus;
  2751. unregister_usb_bus ( bus );
  2752. free_usb_bus ( bus );
  2753. xhci_reset ( xhci );
  2754. if ( pci->id->driver_data & XHCI_PCH )
  2755. xhci_pch_undo ( xhci, pci );
  2756. xhci_legacy_release ( xhci );
  2757. iounmap ( xhci->regs );
  2758. free ( xhci );
  2759. }
  2760. /** XHCI PCI device IDs */
  2761. static struct pci_device_id xhci_ids[] = {
  2762. PCI_ROM ( 0x8086, 0xffff, "xhci-pch", "xHCI (Intel PCH)", XHCI_PCH ),
  2763. PCI_ROM ( 0xffff, 0xffff, "xhci", "xHCI", 0 ),
  2764. };
  2765. /** XHCI PCI driver */
  2766. struct pci_driver xhci_driver __pci_driver = {
  2767. .ids = xhci_ids,
  2768. .id_count = ( sizeof ( xhci_ids ) / sizeof ( xhci_ids[0] ) ),
  2769. .class = PCI_CLASS ( PCI_CLASS_SERIAL, PCI_CLASS_SERIAL_USB,
  2770. PCI_CLASS_SERIAL_USB_XHCI ),
  2771. .probe = xhci_probe,
  2772. .remove = xhci_remove,
  2773. };