您最多选择25个主题 主题必须以字母或数字开头,可以包含连字符 (-),并且长度不得超过35个字符

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313
  1. /*
  2. * Copyright (C) 2014 Michael Brown <mbrown@fensystems.co.uk>.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License as
  6. * published by the Free Software Foundation; either version 2 of the
  7. * License, or (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  17. * 02110-1301, USA.
  18. *
  19. * You can also choose to distribute this program under the terms of
  20. * the Unmodified Binary Distribution Licence (as given in the file
  21. * COPYING.UBDL), provided that you have satisfied its requirements.
  22. */
  23. FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
  24. #include <stdlib.h>
  25. #include <stdio.h>
  26. #include <unistd.h>
  27. #include <string.h>
  28. #include <strings.h>
  29. #include <errno.h>
  30. #include <byteswap.h>
  31. #include <ipxe/malloc.h>
  32. #include <ipxe/umalloc.h>
  33. #include <ipxe/pci.h>
  34. #include <ipxe/usb.h>
  35. #include <ipxe/init.h>
  36. #include <ipxe/profile.h>
  37. #include "xhci.h"
  38. /** @file
  39. *
  40. * USB eXtensible Host Controller Interface (xHCI) driver
  41. *
  42. */
  43. /** Message transfer profiler */
  44. static struct profiler xhci_message_profiler __profiler =
  45. { .name = "xhci.message" };
  46. /** Stream transfer profiler */
  47. static struct profiler xhci_stream_profiler __profiler =
  48. { .name = "xhci.stream" };
  49. /** Event ring profiler */
  50. static struct profiler xhci_event_profiler __profiler =
  51. { .name = "xhci.event" };
  52. /** Transfer event profiler */
  53. static struct profiler xhci_transfer_profiler __profiler =
  54. { .name = "xhci.transfer" };
  55. /* Disambiguate the various error causes */
  56. #define EIO_DATA \
  57. __einfo_error ( EINFO_EIO_DATA )
  58. #define EINFO_EIO_DATA \
  59. __einfo_uniqify ( EINFO_EIO, ( 2 - 0 ), \
  60. "Data buffer error" )
  61. #define EIO_BABBLE \
  62. __einfo_error ( EINFO_EIO_BABBLE )
  63. #define EINFO_EIO_BABBLE \
  64. __einfo_uniqify ( EINFO_EIO, ( 3 - 0 ), \
  65. "Babble detected" )
  66. #define EIO_USB \
  67. __einfo_error ( EINFO_EIO_USB )
  68. #define EINFO_EIO_USB \
  69. __einfo_uniqify ( EINFO_EIO, ( 4 - 0 ), \
  70. "USB transaction error" )
  71. #define EIO_TRB \
  72. __einfo_error ( EINFO_EIO_TRB )
  73. #define EINFO_EIO_TRB \
  74. __einfo_uniqify ( EINFO_EIO, ( 5 - 0 ), \
  75. "TRB error" )
  76. #define EIO_STALL \
  77. __einfo_error ( EINFO_EIO_STALL )
  78. #define EINFO_EIO_STALL \
  79. __einfo_uniqify ( EINFO_EIO, ( 6 - 0 ), \
  80. "Stall error" )
  81. #define EIO_RESOURCE \
  82. __einfo_error ( EINFO_EIO_RESOURCE )
  83. #define EINFO_EIO_RESOURCE \
  84. __einfo_uniqify ( EINFO_EIO, ( 7 - 0 ), \
  85. "Resource error" )
  86. #define EIO_BANDWIDTH \
  87. __einfo_error ( EINFO_EIO_BANDWIDTH )
  88. #define EINFO_EIO_BANDWIDTH \
  89. __einfo_uniqify ( EINFO_EIO, ( 8 - 0 ), \
  90. "Bandwidth error" )
  91. #define EIO_NO_SLOTS \
  92. __einfo_error ( EINFO_EIO_NO_SLOTS )
  93. #define EINFO_EIO_NO_SLOTS \
  94. __einfo_uniqify ( EINFO_EIO, ( 9 - 0 ), \
  95. "No slots available" )
  96. #define EIO_STREAM_TYPE \
  97. __einfo_error ( EINFO_EIO_STREAM_TYPE )
  98. #define EINFO_EIO_STREAM_TYPE \
  99. __einfo_uniqify ( EINFO_EIO, ( 10 - 0 ), \
  100. "Invalid stream type" )
  101. #define EIO_SLOT \
  102. __einfo_error ( EINFO_EIO_SLOT )
  103. #define EINFO_EIO_SLOT \
  104. __einfo_uniqify ( EINFO_EIO, ( 11 - 0 ), \
  105. "Slot not enabled" )
  106. #define EIO_ENDPOINT \
  107. __einfo_error ( EINFO_EIO_ENDPOINT )
  108. #define EINFO_EIO_ENDPOINT \
  109. __einfo_uniqify ( EINFO_EIO, ( 12 - 0 ), \
  110. "Endpoint not enabled" )
  111. #define EIO_SHORT \
  112. __einfo_error ( EINFO_EIO_SHORT )
  113. #define EINFO_EIO_SHORT \
  114. __einfo_uniqify ( EINFO_EIO, ( 13 - 0 ), \
  115. "Short packet" )
  116. #define EIO_UNDERRUN \
  117. __einfo_error ( EINFO_EIO_UNDERRUN )
  118. #define EINFO_EIO_UNDERRUN \
  119. __einfo_uniqify ( EINFO_EIO, ( 14 - 0 ), \
  120. "Ring underrun" )
  121. #define EIO_OVERRUN \
  122. __einfo_error ( EINFO_EIO_OVERRUN )
  123. #define EINFO_EIO_OVERRUN \
  124. __einfo_uniqify ( EINFO_EIO, ( 15 - 0 ), \
  125. "Ring overrun" )
  126. #define EIO_VF_RING_FULL \
  127. __einfo_error ( EINFO_EIO_VF_RING_FULL )
  128. #define EINFO_EIO_VF_RING_FULL \
  129. __einfo_uniqify ( EINFO_EIO, ( 16 - 0 ), \
  130. "Virtual function event ring full" )
  131. #define EIO_PARAMETER \
  132. __einfo_error ( EINFO_EIO_PARAMETER )
  133. #define EINFO_EIO_PARAMETER \
  134. __einfo_uniqify ( EINFO_EIO, ( 17 - 0 ), \
  135. "Parameter error" )
  136. #define EIO_BANDWIDTH_OVERRUN \
  137. __einfo_error ( EINFO_EIO_BANDWIDTH_OVERRUN )
  138. #define EINFO_EIO_BANDWIDTH_OVERRUN \
  139. __einfo_uniqify ( EINFO_EIO, ( 18 - 0 ), \
  140. "Bandwidth overrun" )
  141. #define EIO_CONTEXT \
  142. __einfo_error ( EINFO_EIO_CONTEXT )
  143. #define EINFO_EIO_CONTEXT \
  144. __einfo_uniqify ( EINFO_EIO, ( 19 - 0 ), \
  145. "Context state error" )
  146. #define EIO_NO_PING \
  147. __einfo_error ( EINFO_EIO_NO_PING )
  148. #define EINFO_EIO_NO_PING \
  149. __einfo_uniqify ( EINFO_EIO, ( 20 - 0 ), \
  150. "No ping response" )
  151. #define EIO_RING_FULL \
  152. __einfo_error ( EINFO_EIO_RING_FULL )
  153. #define EINFO_EIO_RING_FULL \
  154. __einfo_uniqify ( EINFO_EIO, ( 21 - 0 ), \
  155. "Event ring full" )
  156. #define EIO_INCOMPATIBLE \
  157. __einfo_error ( EINFO_EIO_INCOMPATIBLE )
  158. #define EINFO_EIO_INCOMPATIBLE \
  159. __einfo_uniqify ( EINFO_EIO, ( 22 - 0 ), \
  160. "Incompatible device" )
  161. #define EIO_MISSED \
  162. __einfo_error ( EINFO_EIO_MISSED )
  163. #define EINFO_EIO_MISSED \
  164. __einfo_uniqify ( EINFO_EIO, ( 23 - 0 ), \
  165. "Missed service error" )
  166. #define EIO_CMD_STOPPED \
  167. __einfo_error ( EINFO_EIO_CMD_STOPPED )
  168. #define EINFO_EIO_CMD_STOPPED \
  169. __einfo_uniqify ( EINFO_EIO, ( 24 - 0 ), \
  170. "Command ring stopped" )
  171. #define EIO_CMD_ABORTED \
  172. __einfo_error ( EINFO_EIO_CMD_ABORTED )
  173. #define EINFO_EIO_CMD_ABORTED \
  174. __einfo_uniqify ( EINFO_EIO, ( 25 - 0 ), \
  175. "Command aborted" )
  176. #define EIO_STOP \
  177. __einfo_error ( EINFO_EIO_STOP )
  178. #define EINFO_EIO_STOP \
  179. __einfo_uniqify ( EINFO_EIO, ( 26 - 0 ), \
  180. "Stopped" )
  181. #define EIO_STOP_LEN \
  182. __einfo_error ( EINFO_EIO_STOP_LEN )
  183. #define EINFO_EIO_STOP_LEN \
  184. __einfo_uniqify ( EINFO_EIO, ( 27 - 0 ), \
  185. "Stopped - length invalid" )
  186. #define EIO_STOP_SHORT \
  187. __einfo_error ( EINFO_EIO_STOP_SHORT )
  188. #define EINFO_EIO_STOP_SHORT \
  189. __einfo_uniqify ( EINFO_EIO, ( 28 - 0 ), \
  190. "Stopped - short packet" )
  191. #define EIO_LATENCY \
  192. __einfo_error ( EINFO_EIO_LATENCY )
  193. #define EINFO_EIO_LATENCY \
  194. __einfo_uniqify ( EINFO_EIO, ( 29 - 0 ), \
  195. "Maximum exit latency too large" )
  196. #define EIO_ISOCH \
  197. __einfo_error ( EINFO_EIO_ISOCH )
  198. #define EINFO_EIO_ISOCH \
  199. __einfo_uniqify ( EINFO_EIO, ( 31 - 0 ), \
  200. "Isochronous buffer overrun" )
  201. #define EPROTO_LOST \
  202. __einfo_error ( EINFO_EPROTO_LOST )
  203. #define EINFO_EPROTO_LOST \
  204. __einfo_uniqify ( EINFO_EPROTO, ( 32 - 32 ), \
  205. "Event lost" )
  206. #define EPROTO_UNDEFINED \
  207. __einfo_error ( EINFO_EPROTO_UNDEFINED )
  208. #define EINFO_EPROTO_UNDEFINED \
  209. __einfo_uniqify ( EINFO_EPROTO, ( 33 - 32 ), \
  210. "Undefined error" )
  211. #define EPROTO_STREAM_ID \
  212. __einfo_error ( EINFO_EPROTO_STREAM_ID )
  213. #define EINFO_EPROTO_STREAM_ID \
  214. __einfo_uniqify ( EINFO_EPROTO, ( 34 - 32 ), \
  215. "Invalid stream ID" )
  216. #define EPROTO_SECONDARY \
  217. __einfo_error ( EINFO_EPROTO_SECONDARY )
  218. #define EINFO_EPROTO_SECONDARY \
  219. __einfo_uniqify ( EINFO_EPROTO, ( 35 - 32 ), \
  220. "Secondary bandwidth error" )
  221. #define EPROTO_SPLIT \
  222. __einfo_error ( EINFO_EPROTO_SPLIT )
  223. #define EINFO_EPROTO_SPLIT \
  224. __einfo_uniqify ( EINFO_EPROTO, ( 36 - 32 ), \
  225. "Split transaction error" )
  226. #define ECODE(code) \
  227. ( ( (code) < 32 ) ? \
  228. EUNIQ ( EINFO_EIO, ( (code) & 31 ), EIO_DATA, EIO_BABBLE, \
  229. EIO_USB, EIO_TRB, EIO_STALL, EIO_RESOURCE, \
  230. EIO_BANDWIDTH, EIO_NO_SLOTS, EIO_STREAM_TYPE, \
  231. EIO_SLOT, EIO_ENDPOINT, EIO_SHORT, EIO_UNDERRUN, \
  232. EIO_OVERRUN, EIO_VF_RING_FULL, EIO_PARAMETER, \
  233. EIO_BANDWIDTH_OVERRUN, EIO_CONTEXT, EIO_NO_PING, \
  234. EIO_RING_FULL, EIO_INCOMPATIBLE, EIO_MISSED, \
  235. EIO_CMD_STOPPED, EIO_CMD_ABORTED, EIO_STOP, \
  236. EIO_STOP_LEN, EIO_STOP_SHORT, EIO_LATENCY, \
  237. EIO_ISOCH ) : \
  238. ( (code) < 64 ) ? \
  239. EUNIQ ( EINFO_EPROTO, ( (code) & 31 ), EPROTO_LOST, \
  240. EPROTO_UNDEFINED, EPROTO_STREAM_ID, \
  241. EPROTO_SECONDARY, EPROTO_SPLIT ) : \
  242. EFAULT )
  243. /******************************************************************************
  244. *
  245. * Register access
  246. *
  247. ******************************************************************************
  248. */
  249. /**
  250. * Initialise device
  251. *
  252. * @v xhci xHCI device
  253. * @v regs MMIO registers
  254. */
  255. static void xhci_init ( struct xhci_device *xhci, void *regs ) {
  256. uint32_t hcsparams1;
  257. uint32_t hcsparams2;
  258. uint32_t hccparams1;
  259. uint32_t pagesize;
  260. size_t caplength;
  261. size_t rtsoff;
  262. size_t dboff;
  263. /* Locate capability, operational, runtime, and doorbell registers */
  264. xhci->cap = regs;
  265. caplength = readb ( xhci->cap + XHCI_CAP_CAPLENGTH );
  266. rtsoff = readl ( xhci->cap + XHCI_CAP_RTSOFF );
  267. dboff = readl ( xhci->cap + XHCI_CAP_DBOFF );
  268. xhci->op = ( xhci->cap + caplength );
  269. xhci->run = ( xhci->cap + rtsoff );
  270. xhci->db = ( xhci->cap + dboff );
  271. DBGC2 ( xhci, "XHCI %p cap %08lx op %08lx run %08lx db %08lx\n",
  272. xhci, virt_to_phys ( xhci->cap ), virt_to_phys ( xhci->op ),
  273. virt_to_phys ( xhci->run ), virt_to_phys ( xhci->db ) );
  274. /* Read structural parameters 1 */
  275. hcsparams1 = readl ( xhci->cap + XHCI_CAP_HCSPARAMS1 );
  276. xhci->slots = XHCI_HCSPARAMS1_SLOTS ( hcsparams1 );
  277. xhci->intrs = XHCI_HCSPARAMS1_INTRS ( hcsparams1 );
  278. xhci->ports = XHCI_HCSPARAMS1_PORTS ( hcsparams1 );
  279. DBGC ( xhci, "XHCI %p has %d slots %d intrs %d ports\n",
  280. xhci, xhci->slots, xhci->intrs, xhci->ports );
  281. /* Read structural parameters 2 */
  282. hcsparams2 = readl ( xhci->cap + XHCI_CAP_HCSPARAMS2 );
  283. xhci->scratchpads = XHCI_HCSPARAMS2_SCRATCHPADS ( hcsparams2 );
  284. DBGC2 ( xhci, "XHCI %p needs %d scratchpads\n",
  285. xhci, xhci->scratchpads );
  286. /* Read capability parameters 1 */
  287. hccparams1 = readl ( xhci->cap + XHCI_CAP_HCCPARAMS1 );
  288. xhci->addr64 = XHCI_HCCPARAMS1_ADDR64 ( hccparams1 );
  289. xhci->csz_shift = XHCI_HCCPARAMS1_CSZ_SHIFT ( hccparams1 );
  290. xhci->xecp = XHCI_HCCPARAMS1_XECP ( hccparams1 );
  291. /* Read page size */
  292. pagesize = readl ( xhci->op + XHCI_OP_PAGESIZE );
  293. xhci->pagesize = XHCI_PAGESIZE ( pagesize );
  294. assert ( xhci->pagesize != 0 );
  295. assert ( ( ( xhci->pagesize ) & ( xhci->pagesize - 1 ) ) == 0 );
  296. DBGC2 ( xhci, "XHCI %p page size %zd bytes\n",
  297. xhci, xhci->pagesize );
  298. }
  299. /**
  300. * Find extended capability
  301. *
  302. * @v xhci xHCI device
  303. * @v id Capability ID
  304. * @v offset Offset to previous extended capability instance, or zero
  305. * @ret offset Offset to extended capability, or zero if not found
  306. */
  307. static unsigned int xhci_extended_capability ( struct xhci_device *xhci,
  308. unsigned int id,
  309. unsigned int offset ) {
  310. uint32_t xecp;
  311. unsigned int next;
  312. /* Locate the extended capability */
  313. while ( 1 ) {
  314. /* Locate first or next capability as applicable */
  315. if ( offset ) {
  316. xecp = readl ( xhci->cap + offset );
  317. next = XHCI_XECP_NEXT ( xecp );
  318. } else {
  319. next = xhci->xecp;
  320. }
  321. if ( ! next )
  322. return 0;
  323. offset += next;
  324. /* Check if this is the requested capability */
  325. xecp = readl ( xhci->cap + offset );
  326. if ( XHCI_XECP_ID ( xecp ) == id )
  327. return offset;
  328. }
  329. }
  330. /**
  331. * Write potentially 64-bit register
  332. *
  333. * @v xhci xHCI device
  334. * @v value Value
  335. * @v reg Register address
  336. * @ret rc Return status code
  337. */
  338. static inline __attribute__ (( always_inline )) int
  339. xhci_writeq ( struct xhci_device *xhci, physaddr_t value, void *reg ) {
  340. /* If this is a 32-bit build, then this can never fail
  341. * (allowing the compiler to optimise out the error path).
  342. */
  343. if ( sizeof ( value ) <= sizeof ( uint32_t ) ) {
  344. writel ( value, reg );
  345. writel ( 0, ( reg + sizeof ( uint32_t ) ) );
  346. return 0;
  347. }
  348. /* If the device does not support 64-bit addresses and this
  349. * address is outside the 32-bit address space, then fail.
  350. */
  351. if ( ( value & ~0xffffffffULL ) && ! xhci->addr64 ) {
  352. DBGC ( xhci, "XHCI %p cannot access address %lx\n",
  353. xhci, value );
  354. return -ENOTSUP;
  355. }
  356. /* If this is a 64-bit build, then writeq() is available */
  357. writeq ( value, reg );
  358. return 0;
  359. }
  360. /**
  361. * Calculate buffer alignment
  362. *
  363. * @v len Length
  364. * @ret align Buffer alignment
  365. *
  366. * Determine alignment required for a buffer which must be aligned to
  367. * at least XHCI_MIN_ALIGN and which must not cross a page boundary.
  368. */
  369. static inline size_t xhci_align ( size_t len ) {
  370. size_t align;
  371. /* Align to own length (rounded up to a power of two) */
  372. align = ( 1 << fls ( len - 1 ) );
  373. /* Round up to XHCI_MIN_ALIGN if needed */
  374. if ( align < XHCI_MIN_ALIGN )
  375. align = XHCI_MIN_ALIGN;
  376. return align;
  377. }
  378. /**
  379. * Calculate device context offset
  380. *
  381. * @v xhci xHCI device
  382. * @v ctx Context index
  383. */
  384. static inline size_t xhci_device_context_offset ( struct xhci_device *xhci,
  385. unsigned int ctx ) {
  386. return ( XHCI_DCI ( ctx ) << xhci->csz_shift );
  387. }
  388. /**
  389. * Calculate input context offset
  390. *
  391. * @v xhci xHCI device
  392. * @v ctx Context index
  393. */
  394. static inline size_t xhci_input_context_offset ( struct xhci_device *xhci,
  395. unsigned int ctx ) {
  396. return ( XHCI_ICI ( ctx ) << xhci->csz_shift );
  397. }
  398. /******************************************************************************
  399. *
  400. * Diagnostics
  401. *
  402. ******************************************************************************
  403. */
  404. /**
  405. * Dump host controller registers
  406. *
  407. * @v xhci xHCI device
  408. */
  409. static inline void xhci_dump ( struct xhci_device *xhci ) {
  410. uint32_t usbcmd;
  411. uint32_t usbsts;
  412. uint32_t pagesize;
  413. uint32_t dnctrl;
  414. uint32_t config;
  415. /* Do nothing unless debugging is enabled */
  416. if ( ! DBG_LOG )
  417. return;
  418. /* Dump USBCMD */
  419. usbcmd = readl ( xhci->op + XHCI_OP_USBCMD );
  420. DBGC ( xhci, "XHCI %p USBCMD %08x%s%s\n", xhci, usbcmd,
  421. ( ( usbcmd & XHCI_USBCMD_RUN ) ? " run" : "" ),
  422. ( ( usbcmd & XHCI_USBCMD_HCRST ) ? " hcrst" : "" ) );
  423. /* Dump USBSTS */
  424. usbsts = readl ( xhci->op + XHCI_OP_USBSTS );
  425. DBGC ( xhci, "XHCI %p USBSTS %08x%s\n", xhci, usbsts,
  426. ( ( usbsts & XHCI_USBSTS_HCH ) ? " hch" : "" ) );
  427. /* Dump PAGESIZE */
  428. pagesize = readl ( xhci->op + XHCI_OP_PAGESIZE );
  429. DBGC ( xhci, "XHCI %p PAGESIZE %08x\n", xhci, pagesize );
  430. /* Dump DNCTRL */
  431. dnctrl = readl ( xhci->op + XHCI_OP_DNCTRL );
  432. DBGC ( xhci, "XHCI %p DNCTRL %08x\n", xhci, dnctrl );
  433. /* Dump CONFIG */
  434. config = readl ( xhci->op + XHCI_OP_CONFIG );
  435. DBGC ( xhci, "XHCI %p CONFIG %08x\n", xhci, config );
  436. }
  437. /**
  438. * Dump port registers
  439. *
  440. * @v xhci xHCI device
  441. * @v port Port number
  442. */
  443. static inline void xhci_dump_port ( struct xhci_device *xhci,
  444. unsigned int port ) {
  445. uint32_t portsc;
  446. uint32_t portpmsc;
  447. uint32_t portli;
  448. uint32_t porthlpmc;
  449. /* Do nothing unless debugging is enabled */
  450. if ( ! DBG_LOG )
  451. return;
  452. /* Dump PORTSC */
  453. portsc = readl ( xhci->op + XHCI_OP_PORTSC ( port ) );
  454. DBGC ( xhci, "XHCI %p port %d PORTSC %08x%s%s%s%s psiv=%d\n",
  455. xhci, port, portsc,
  456. ( ( portsc & XHCI_PORTSC_CCS ) ? " ccs" : "" ),
  457. ( ( portsc & XHCI_PORTSC_PED ) ? " ped" : "" ),
  458. ( ( portsc & XHCI_PORTSC_PR ) ? " pr" : "" ),
  459. ( ( portsc & XHCI_PORTSC_PP ) ? " pp" : "" ),
  460. XHCI_PORTSC_PSIV ( portsc ) );
  461. /* Dump PORTPMSC */
  462. portpmsc = readl ( xhci->op + XHCI_OP_PORTPMSC ( port ) );
  463. DBGC ( xhci, "XHCI %p port %d PORTPMSC %08x\n", xhci, port, portpmsc );
  464. /* Dump PORTLI */
  465. portli = readl ( xhci->op + XHCI_OP_PORTLI ( port ) );
  466. DBGC ( xhci, "XHCI %p port %d PORTLI %08x\n", xhci, port, portli );
  467. /* Dump PORTHLPMC */
  468. porthlpmc = readl ( xhci->op + XHCI_OP_PORTHLPMC ( port ) );
  469. DBGC ( xhci, "XHCI %p port %d PORTHLPMC %08x\n",
  470. xhci, port, porthlpmc );
  471. }
  472. /******************************************************************************
  473. *
  474. * USB legacy support
  475. *
  476. ******************************************************************************
  477. */
  478. /** Prevent the release of ownership back to BIOS */
  479. static int xhci_legacy_prevent_release;
  480. /**
  481. * Initialise USB legacy support
  482. *
  483. * @v xhci xHCI device
  484. */
  485. static void xhci_legacy_init ( struct xhci_device *xhci ) {
  486. unsigned int legacy;
  487. uint8_t bios;
  488. /* Locate USB legacy support capability (if present) */
  489. legacy = xhci_extended_capability ( xhci, XHCI_XECP_ID_LEGACY, 0 );
  490. if ( ! legacy ) {
  491. /* Not an error; capability may not be present */
  492. DBGC ( xhci, "XHCI %p has no USB legacy support capability\n",
  493. xhci );
  494. return;
  495. }
  496. /* Check if legacy USB support is enabled */
  497. bios = readb ( xhci->cap + legacy + XHCI_USBLEGSUP_BIOS );
  498. if ( ! ( bios & XHCI_USBLEGSUP_BIOS_OWNED ) ) {
  499. /* Not an error; already owned by OS */
  500. DBGC ( xhci, "XHCI %p USB legacy support already disabled\n",
  501. xhci );
  502. return;
  503. }
  504. /* Record presence of USB legacy support capability */
  505. xhci->legacy = legacy;
  506. }
  507. /**
  508. * Claim ownership from BIOS
  509. *
  510. * @v xhci xHCI device
  511. */
  512. static void xhci_legacy_claim ( struct xhci_device *xhci ) {
  513. uint32_t ctlsts;
  514. uint8_t bios;
  515. unsigned int i;
  516. /* Do nothing unless legacy support capability is present */
  517. if ( ! xhci->legacy )
  518. return;
  519. /* Claim ownership */
  520. writeb ( XHCI_USBLEGSUP_OS_OWNED,
  521. xhci->cap + xhci->legacy + XHCI_USBLEGSUP_OS );
  522. /* Wait for BIOS to release ownership */
  523. for ( i = 0 ; i < XHCI_USBLEGSUP_MAX_WAIT_MS ; i++ ) {
  524. /* Check if BIOS has released ownership */
  525. bios = readb ( xhci->cap + xhci->legacy + XHCI_USBLEGSUP_BIOS );
  526. if ( ! ( bios & XHCI_USBLEGSUP_BIOS_OWNED ) ) {
  527. DBGC ( xhci, "XHCI %p claimed ownership from BIOS\n",
  528. xhci );
  529. ctlsts = readl ( xhci->cap + xhci->legacy +
  530. XHCI_USBLEGSUP_CTLSTS );
  531. if ( ctlsts ) {
  532. DBGC ( xhci, "XHCI %p warning: BIOS retained "
  533. "SMIs: %08x\n", xhci, ctlsts );
  534. }
  535. return;
  536. }
  537. /* Delay */
  538. mdelay ( 1 );
  539. }
  540. /* BIOS did not release ownership. Claim it forcibly by
  541. * disabling all SMIs.
  542. */
  543. DBGC ( xhci, "XHCI %p could not claim ownership from BIOS: forcibly "
  544. "disabling SMIs\n", xhci );
  545. writel ( 0, xhci->cap + xhci->legacy + XHCI_USBLEGSUP_CTLSTS );
  546. }
  547. /**
  548. * Release ownership back to BIOS
  549. *
  550. * @v xhci xHCI device
  551. */
  552. static void xhci_legacy_release ( struct xhci_device *xhci ) {
  553. /* Do nothing unless legacy support capability is present */
  554. if ( ! xhci->legacy )
  555. return;
  556. /* Do nothing if releasing ownership is prevented */
  557. if ( xhci_legacy_prevent_release ) {
  558. DBGC ( xhci, "XHCI %p not releasing ownership to BIOS\n", xhci);
  559. return;
  560. }
  561. /* Release ownership */
  562. writeb ( 0, xhci->cap + xhci->legacy + XHCI_USBLEGSUP_OS );
  563. DBGC ( xhci, "XHCI %p released ownership to BIOS\n", xhci );
  564. }
  565. /******************************************************************************
  566. *
  567. * Supported protocols
  568. *
  569. ******************************************************************************
  570. */
  571. /**
  572. * Transcribe port speed (for debugging)
  573. *
  574. * @v psi Protocol speed ID
  575. * @ret speed Transcribed speed
  576. */
  577. static inline const char * xhci_speed_name ( uint32_t psi ) {
  578. static const char *exponents[4] = { "", "k", "M", "G" };
  579. static char buf[ 10 /* "xxxxxXbps" + NUL */ ];
  580. unsigned int mantissa;
  581. unsigned int exponent;
  582. /* Extract mantissa and exponent */
  583. mantissa = XHCI_SUPPORTED_PSI_MANTISSA ( psi );
  584. exponent = XHCI_SUPPORTED_PSI_EXPONENT ( psi );
  585. /* Transcribe speed */
  586. snprintf ( buf, sizeof ( buf ), "%d%sbps",
  587. mantissa, exponents[exponent] );
  588. return buf;
  589. }
  590. /**
  591. * Find supported protocol extended capability for a port
  592. *
  593. * @v xhci xHCI device
  594. * @v port Port number
  595. * @ret supported Offset to extended capability, or zero if not found
  596. */
  597. static unsigned int xhci_supported_protocol ( struct xhci_device *xhci,
  598. unsigned int port ) {
  599. unsigned int supported = 0;
  600. unsigned int offset;
  601. unsigned int count;
  602. uint32_t ports;
  603. /* Iterate over all supported protocol structures */
  604. while ( ( supported = xhci_extended_capability ( xhci,
  605. XHCI_XECP_ID_SUPPORTED,
  606. supported ) ) ) {
  607. /* Determine port range */
  608. ports = readl ( xhci->cap + supported + XHCI_SUPPORTED_PORTS );
  609. offset = XHCI_SUPPORTED_PORTS_OFFSET ( ports );
  610. count = XHCI_SUPPORTED_PORTS_COUNT ( ports );
  611. /* Check if port lies within this range */
  612. if ( ( port - offset ) < count )
  613. return supported;
  614. }
  615. DBGC ( xhci, "XHCI %p port %d has no supported protocol\n",
  616. xhci, port );
  617. return 0;
  618. }
  619. /**
  620. * Find port protocol
  621. *
  622. * @v xhci xHCI device
  623. * @v port Port number
  624. * @ret protocol USB protocol, or zero if not found
  625. */
  626. static unsigned int xhci_port_protocol ( struct xhci_device *xhci,
  627. unsigned int port ) {
  628. unsigned int supported = xhci_supported_protocol ( xhci, port );
  629. union {
  630. uint32_t raw;
  631. char text[5];
  632. } name;
  633. unsigned int protocol;
  634. unsigned int type;
  635. unsigned int psic;
  636. unsigned int psiv;
  637. unsigned int i;
  638. uint32_t revision;
  639. uint32_t ports;
  640. uint32_t slot;
  641. uint32_t psi;
  642. /* Fail if there is no supported protocol */
  643. if ( ! supported )
  644. return 0;
  645. /* Determine protocol version */
  646. revision = readl ( xhci->cap + supported + XHCI_SUPPORTED_REVISION );
  647. protocol = XHCI_SUPPORTED_REVISION_VER ( revision );
  648. /* Describe port protocol */
  649. if ( DBG_EXTRA ) {
  650. name.raw = cpu_to_le32 ( readl ( xhci->cap + supported +
  651. XHCI_SUPPORTED_NAME ) );
  652. name.text[4] = '\0';
  653. slot = readl ( xhci->cap + supported + XHCI_SUPPORTED_SLOT );
  654. type = XHCI_SUPPORTED_SLOT_TYPE ( slot );
  655. DBGC2 ( xhci, "XHCI %p port %d %sv%04x type %d",
  656. xhci, port, name.text, protocol, type );
  657. ports = readl ( xhci->cap + supported + XHCI_SUPPORTED_PORTS );
  658. psic = XHCI_SUPPORTED_PORTS_PSIC ( ports );
  659. if ( psic ) {
  660. DBGC2 ( xhci, " speeds" );
  661. for ( i = 0 ; i < psic ; i++ ) {
  662. psi = readl ( xhci->cap + supported +
  663. XHCI_SUPPORTED_PSI ( i ) );
  664. psiv = XHCI_SUPPORTED_PSI_VALUE ( psi );
  665. DBGC2 ( xhci, " %d:%s", psiv,
  666. xhci_speed_name ( psi ) );
  667. }
  668. }
  669. DBGC2 ( xhci, "\n" );
  670. }
  671. return protocol;
  672. }
  673. /**
  674. * Find port slot type
  675. *
  676. * @v xhci xHCI device
  677. * @v port Port number
  678. * @ret type Slot type, or negative error
  679. */
  680. static int xhci_port_slot_type ( struct xhci_device *xhci, unsigned int port ) {
  681. unsigned int supported = xhci_supported_protocol ( xhci, port );
  682. unsigned int type;
  683. uint32_t slot;
  684. /* Fail if there is no supported protocol */
  685. if ( ! supported )
  686. return -ENOTSUP;
  687. /* Get slot type */
  688. slot = readl ( xhci->cap + supported + XHCI_SUPPORTED_SLOT );
  689. type = XHCI_SUPPORTED_SLOT_TYPE ( slot );
  690. return type;
  691. }
  692. /**
  693. * Find port speed
  694. *
  695. * @v xhci xHCI device
  696. * @v port Port number
  697. * @v psiv Protocol speed ID value
  698. * @ret speed Port speed, or negative error
  699. */
  700. static int xhci_port_speed ( struct xhci_device *xhci, unsigned int port,
  701. unsigned int psiv ) {
  702. unsigned int supported = xhci_supported_protocol ( xhci, port );
  703. unsigned int psic;
  704. unsigned int mantissa;
  705. unsigned int exponent;
  706. unsigned int speed;
  707. unsigned int i;
  708. uint32_t ports;
  709. uint32_t psi;
  710. /* Fail if there is no supported protocol */
  711. if ( ! supported )
  712. return -ENOTSUP;
  713. /* Get protocol speed ID count */
  714. ports = readl ( xhci->cap + supported + XHCI_SUPPORTED_PORTS );
  715. psic = XHCI_SUPPORTED_PORTS_PSIC ( ports );
  716. /* Use the default mappings if applicable */
  717. if ( ! psic ) {
  718. switch ( psiv ) {
  719. case XHCI_SPEED_LOW : return USB_SPEED_LOW;
  720. case XHCI_SPEED_FULL : return USB_SPEED_FULL;
  721. case XHCI_SPEED_HIGH : return USB_SPEED_HIGH;
  722. case XHCI_SPEED_SUPER : return USB_SPEED_SUPER;
  723. default:
  724. DBGC ( xhci, "XHCI %p port %d non-standard PSI value "
  725. "%d\n", xhci, port, psiv );
  726. return -ENOTSUP;
  727. }
  728. }
  729. /* Iterate over PSI dwords looking for a match */
  730. for ( i = 0 ; i < psic ; i++ ) {
  731. psi = readl ( xhci->cap + supported + XHCI_SUPPORTED_PSI ( i ));
  732. if ( psiv == XHCI_SUPPORTED_PSI_VALUE ( psi ) ) {
  733. mantissa = XHCI_SUPPORTED_PSI_MANTISSA ( psi );
  734. exponent = XHCI_SUPPORTED_PSI_EXPONENT ( psi );
  735. speed = USB_SPEED ( mantissa, exponent );
  736. return speed;
  737. }
  738. }
  739. DBGC ( xhci, "XHCI %p port %d spurious PSI value %d\n",
  740. xhci, port, psiv );
  741. return -ENOENT;
  742. }
  743. /**
  744. * Find protocol speed ID value
  745. *
  746. * @v xhci xHCI device
  747. * @v port Port number
  748. * @v speed USB speed
  749. * @ret psiv Protocol speed ID value, or negative error
  750. */
  751. static int xhci_port_psiv ( struct xhci_device *xhci, unsigned int port,
  752. unsigned int speed ) {
  753. unsigned int supported = xhci_supported_protocol ( xhci, port );
  754. unsigned int psic;
  755. unsigned int mantissa;
  756. unsigned int exponent;
  757. unsigned int psiv;
  758. unsigned int i;
  759. uint32_t ports;
  760. uint32_t psi;
  761. /* Fail if there is no supported protocol */
  762. if ( ! supported )
  763. return -ENOTSUP;
  764. /* Get protocol speed ID count */
  765. ports = readl ( xhci->cap + supported + XHCI_SUPPORTED_PORTS );
  766. psic = XHCI_SUPPORTED_PORTS_PSIC ( ports );
  767. /* Use the default mappings if applicable */
  768. if ( ! psic ) {
  769. switch ( speed ) {
  770. case USB_SPEED_LOW : return XHCI_SPEED_LOW;
  771. case USB_SPEED_FULL : return XHCI_SPEED_FULL;
  772. case USB_SPEED_HIGH : return XHCI_SPEED_HIGH;
  773. case USB_SPEED_SUPER : return XHCI_SPEED_SUPER;
  774. default:
  775. DBGC ( xhci, "XHCI %p port %d non-standad speed %d\n",
  776. xhci, port, speed );
  777. return -ENOTSUP;
  778. }
  779. }
  780. /* Iterate over PSI dwords looking for a match */
  781. for ( i = 0 ; i < psic ; i++ ) {
  782. psi = readl ( xhci->cap + supported + XHCI_SUPPORTED_PSI ( i ));
  783. mantissa = XHCI_SUPPORTED_PSI_MANTISSA ( psi );
  784. exponent = XHCI_SUPPORTED_PSI_EXPONENT ( psi );
  785. if ( speed == USB_SPEED ( mantissa, exponent ) ) {
  786. psiv = XHCI_SUPPORTED_PSI_VALUE ( psi );
  787. return psiv;
  788. }
  789. }
  790. DBGC ( xhci, "XHCI %p port %d unrepresentable speed %#x\n",
  791. xhci, port, speed );
  792. return -ENOENT;
  793. }
  794. /******************************************************************************
  795. *
  796. * Device context base address array
  797. *
  798. ******************************************************************************
  799. */
  800. /**
  801. * Allocate device context base address array
  802. *
  803. * @v xhci xHCI device
  804. * @ret rc Return status code
  805. */
  806. static int xhci_dcbaa_alloc ( struct xhci_device *xhci ) {
  807. size_t len;
  808. physaddr_t dcbaap;
  809. int rc;
  810. /* Allocate and initialise structure. Must be at least
  811. * 64-byte aligned and must not cross a page boundary, so
  812. * align on its own size (rounded up to a power of two and
  813. * with a minimum of 64 bytes).
  814. */
  815. len = ( ( xhci->slots + 1 ) * sizeof ( xhci->dcbaa[0] ) );
  816. xhci->dcbaa = malloc_dma ( len, xhci_align ( len ) );
  817. if ( ! xhci->dcbaa ) {
  818. DBGC ( xhci, "XHCI %p could not allocate DCBAA\n", xhci );
  819. rc = -ENOMEM;
  820. goto err_alloc;
  821. }
  822. memset ( xhci->dcbaa, 0, len );
  823. /* Program DCBAA pointer */
  824. dcbaap = virt_to_phys ( xhci->dcbaa );
  825. if ( ( rc = xhci_writeq ( xhci, dcbaap,
  826. xhci->op + XHCI_OP_DCBAAP ) ) != 0 )
  827. goto err_writeq;
  828. DBGC2 ( xhci, "XHCI %p DCBAA at [%08lx,%08lx)\n",
  829. xhci, dcbaap, ( dcbaap + len ) );
  830. return 0;
  831. err_writeq:
  832. free_dma ( xhci->dcbaa, len );
  833. err_alloc:
  834. return rc;
  835. }
  836. /**
  837. * Free device context base address array
  838. *
  839. * @v xhci xHCI device
  840. */
  841. static void xhci_dcbaa_free ( struct xhci_device *xhci ) {
  842. size_t len;
  843. unsigned int i;
  844. /* Sanity check */
  845. for ( i = 0 ; i <= xhci->slots ; i++ )
  846. assert ( xhci->dcbaa[i] == 0 );
  847. /* Clear DCBAA pointer */
  848. xhci_writeq ( xhci, 0, xhci->op + XHCI_OP_DCBAAP );
  849. /* Free DCBAA */
  850. len = ( ( xhci->slots + 1 ) * sizeof ( xhci->dcbaa[0] ) );
  851. free_dma ( xhci->dcbaa, len );
  852. }
  853. /******************************************************************************
  854. *
  855. * Scratchpad buffers
  856. *
  857. ******************************************************************************
  858. */
  859. /**
  860. * Allocate scratchpad buffers
  861. *
  862. * @v xhci xHCI device
  863. * @ret rc Return status code
  864. */
  865. static int xhci_scratchpad_alloc ( struct xhci_device *xhci ) {
  866. size_t array_len;
  867. size_t len;
  868. physaddr_t phys;
  869. unsigned int i;
  870. int rc;
  871. /* Do nothing if no scratchpad buffers are used */
  872. if ( ! xhci->scratchpads )
  873. return 0;
  874. /* Allocate scratchpads */
  875. len = ( xhci->scratchpads * xhci->pagesize );
  876. xhci->scratchpad = umalloc ( len );
  877. if ( ! xhci->scratchpad ) {
  878. DBGC ( xhci, "XHCI %p could not allocate scratchpad buffers\n",
  879. xhci );
  880. rc = -ENOMEM;
  881. goto err_alloc;
  882. }
  883. memset_user ( xhci->scratchpad, 0, 0, len );
  884. /* Allocate scratchpad array */
  885. array_len = ( xhci->scratchpads * sizeof ( xhci->scratchpad_array[0] ));
  886. xhci->scratchpad_array =
  887. malloc_dma ( array_len, xhci_align ( array_len ) );
  888. if ( ! xhci->scratchpad_array ) {
  889. DBGC ( xhci, "XHCI %p could not allocate scratchpad buffer "
  890. "array\n", xhci );
  891. rc = -ENOMEM;
  892. goto err_alloc_array;
  893. }
  894. /* Populate scratchpad array */
  895. for ( i = 0 ; i < xhci->scratchpads ; i++ ) {
  896. phys = user_to_phys ( xhci->scratchpad, ( i * xhci->pagesize ));
  897. xhci->scratchpad_array[i] = phys;
  898. }
  899. /* Set scratchpad array pointer */
  900. assert ( xhci->dcbaa != NULL );
  901. xhci->dcbaa[0] = cpu_to_le64 ( virt_to_phys ( xhci->scratchpad_array ));
  902. DBGC2 ( xhci, "XHCI %p scratchpad [%08lx,%08lx) array [%08lx,%08lx)\n",
  903. xhci, user_to_phys ( xhci->scratchpad, 0 ),
  904. user_to_phys ( xhci->scratchpad, len ),
  905. virt_to_phys ( xhci->scratchpad_array ),
  906. ( virt_to_phys ( xhci->scratchpad_array ) + array_len ) );
  907. return 0;
  908. free_dma ( xhci->scratchpad_array, array_len );
  909. err_alloc_array:
  910. ufree ( xhci->scratchpad );
  911. err_alloc:
  912. return rc;
  913. }
  914. /**
  915. * Free scratchpad buffers
  916. *
  917. * @v xhci xHCI device
  918. */
  919. static void xhci_scratchpad_free ( struct xhci_device *xhci ) {
  920. size_t array_len;
  921. /* Do nothing if no scratchpad buffers are used */
  922. if ( ! xhci->scratchpads )
  923. return;
  924. /* Clear scratchpad array pointer */
  925. assert ( xhci->dcbaa != NULL );
  926. xhci->dcbaa[0] = 0;
  927. /* Free scratchpad array */
  928. array_len = ( xhci->scratchpads * sizeof ( xhci->scratchpad_array[0] ));
  929. free_dma ( xhci->scratchpad_array, array_len );
  930. /* Free scratchpads */
  931. ufree ( xhci->scratchpad );
  932. }
  933. /******************************************************************************
  934. *
  935. * Run / stop / reset
  936. *
  937. ******************************************************************************
  938. */
  939. /**
  940. * Start xHCI device
  941. *
  942. * @v xhci xHCI device
  943. */
  944. static void xhci_run ( struct xhci_device *xhci ) {
  945. uint32_t config;
  946. uint32_t usbcmd;
  947. /* Configure number of device slots */
  948. config = readl ( xhci->op + XHCI_OP_CONFIG );
  949. config &= ~XHCI_CONFIG_MAX_SLOTS_EN_MASK;
  950. config |= XHCI_CONFIG_MAX_SLOTS_EN ( xhci->slots );
  951. writel ( config, xhci->op + XHCI_OP_CONFIG );
  952. /* Set run/stop bit */
  953. usbcmd = readl ( xhci->op + XHCI_OP_USBCMD );
  954. usbcmd |= XHCI_USBCMD_RUN;
  955. writel ( usbcmd, xhci->op + XHCI_OP_USBCMD );
  956. }
  957. /**
  958. * Stop xHCI device
  959. *
  960. * @v xhci xHCI device
  961. * @ret rc Return status code
  962. */
  963. static int xhci_stop ( struct xhci_device *xhci ) {
  964. uint32_t usbcmd;
  965. uint32_t usbsts;
  966. unsigned int i;
  967. /* Clear run/stop bit */
  968. usbcmd = readl ( xhci->op + XHCI_OP_USBCMD );
  969. usbcmd &= ~XHCI_USBCMD_RUN;
  970. writel ( usbcmd, xhci->op + XHCI_OP_USBCMD );
  971. /* Wait for device to stop */
  972. for ( i = 0 ; i < XHCI_STOP_MAX_WAIT_MS ; i++ ) {
  973. /* Check if device is stopped */
  974. usbsts = readl ( xhci->op + XHCI_OP_USBSTS );
  975. if ( usbsts & XHCI_USBSTS_HCH )
  976. return 0;
  977. /* Delay */
  978. mdelay ( 1 );
  979. }
  980. DBGC ( xhci, "XHCI %p timed out waiting for stop\n", xhci );
  981. return -ETIMEDOUT;
  982. }
  983. /**
  984. * Reset xHCI device
  985. *
  986. * @v xhci xHCI device
  987. * @ret rc Return status code
  988. */
  989. static int xhci_reset ( struct xhci_device *xhci ) {
  990. uint32_t usbcmd;
  991. unsigned int i;
  992. int rc;
  993. /* The xHCI specification states that resetting a running
  994. * device may result in undefined behaviour, so try stopping
  995. * it first.
  996. */
  997. if ( ( rc = xhci_stop ( xhci ) ) != 0 ) {
  998. /* Ignore errors and attempt to reset the device anyway */
  999. }
  1000. /* Reset device */
  1001. writel ( XHCI_USBCMD_HCRST, xhci->op + XHCI_OP_USBCMD );
  1002. /* Wait for reset to complete */
  1003. for ( i = 0 ; i < XHCI_RESET_MAX_WAIT_MS ; i++ ) {
  1004. /* Check if reset is complete */
  1005. usbcmd = readl ( xhci->op + XHCI_OP_USBCMD );
  1006. if ( ! ( usbcmd & XHCI_USBCMD_HCRST ) )
  1007. return 0;
  1008. /* Delay */
  1009. mdelay ( 1 );
  1010. }
  1011. DBGC ( xhci, "XHCI %p timed out waiting for reset\n", xhci );
  1012. return -ETIMEDOUT;
  1013. }
  1014. /******************************************************************************
  1015. *
  1016. * Transfer request blocks
  1017. *
  1018. ******************************************************************************
  1019. */
  1020. /**
  1021. * Allocate transfer request block ring
  1022. *
  1023. * @v xhci xHCI device
  1024. * @v ring TRB ring
  1025. * @v shift Ring size (log2)
  1026. * @v slot Device slot
  1027. * @v target Doorbell target
  1028. * @v stream Doorbell stream ID
  1029. * @ret rc Return status code
  1030. */
  1031. static int xhci_ring_alloc ( struct xhci_device *xhci,
  1032. struct xhci_trb_ring *ring,
  1033. unsigned int shift, unsigned int slot,
  1034. unsigned int target, unsigned int stream ) {
  1035. struct xhci_trb_link *link;
  1036. unsigned int count;
  1037. int rc;
  1038. /* Sanity check */
  1039. assert ( shift > 0 );
  1040. /* Initialise structure */
  1041. memset ( ring, 0, sizeof ( *ring ) );
  1042. ring->shift = shift;
  1043. count = ( 1U << shift );
  1044. ring->mask = ( count - 1 );
  1045. ring->len = ( ( count + 1 /* Link TRB */ ) * sizeof ( ring->trb[0] ) );
  1046. ring->db = ( xhci->db + ( slot * sizeof ( ring->dbval ) ) );
  1047. ring->dbval = XHCI_DBVAL ( target, stream );
  1048. /* Allocate I/O buffers */
  1049. ring->iobuf = zalloc ( count * sizeof ( ring->iobuf[0] ) );
  1050. if ( ! ring->iobuf ) {
  1051. rc = -ENOMEM;
  1052. goto err_alloc_iobuf;
  1053. }
  1054. /* Allocate TRBs */
  1055. ring->trb = malloc_dma ( ring->len, xhci_align ( ring->len ) );
  1056. if ( ! ring->trb ) {
  1057. rc = -ENOMEM;
  1058. goto err_alloc_trb;
  1059. }
  1060. memset ( ring->trb, 0, ring->len );
  1061. /* Initialise Link TRB */
  1062. link = &ring->trb[count].link;
  1063. link->next = cpu_to_le64 ( virt_to_phys ( ring->trb ) );
  1064. link->flags = XHCI_TRB_TC;
  1065. link->type = XHCI_TRB_LINK;
  1066. ring->link = link;
  1067. return 0;
  1068. free_dma ( ring->trb, ring->len );
  1069. err_alloc_trb:
  1070. free ( ring->iobuf );
  1071. err_alloc_iobuf:
  1072. return rc;
  1073. }
  1074. /**
  1075. * Reset transfer request block ring
  1076. *
  1077. * @v ring TRB ring
  1078. */
  1079. static void xhci_ring_reset ( struct xhci_trb_ring *ring ) {
  1080. unsigned int count = ( 1U << ring->shift );
  1081. /* Reset producer and consumer counters */
  1082. ring->prod = 0;
  1083. ring->cons = 0;
  1084. /* Reset TRBs (except Link TRB) */
  1085. memset ( ring->trb, 0, ( count * sizeof ( ring->trb[0] ) ) );
  1086. }
  1087. /**
  1088. * Free transfer request block ring
  1089. *
  1090. * @v ring TRB ring
  1091. */
  1092. static void xhci_ring_free ( struct xhci_trb_ring *ring ) {
  1093. unsigned int count = ( 1U << ring->shift );
  1094. unsigned int i;
  1095. /* Sanity checks */
  1096. assert ( ring->cons == ring->prod );
  1097. for ( i = 0 ; i < count ; i++ )
  1098. assert ( ring->iobuf[i] == NULL );
  1099. /* Free TRBs */
  1100. free_dma ( ring->trb, ring->len );
  1101. /* Free I/O buffers */
  1102. free ( ring->iobuf );
  1103. }
  1104. /**
  1105. * Enqueue a transfer request block
  1106. *
  1107. * @v ring TRB ring
  1108. * @v iobuf I/O buffer (if any)
  1109. * @v trb Transfer request block (with empty Cycle flag)
  1110. * @ret rc Return status code
  1111. *
  1112. * This operation does not implicitly ring the doorbell register.
  1113. */
  1114. static int xhci_enqueue ( struct xhci_trb_ring *ring, struct io_buffer *iobuf,
  1115. const union xhci_trb *trb ) {
  1116. union xhci_trb *dest;
  1117. unsigned int prod;
  1118. unsigned int mask;
  1119. unsigned int index;
  1120. unsigned int cycle;
  1121. /* Sanity check */
  1122. assert ( ! ( trb->common.flags & XHCI_TRB_C ) );
  1123. /* Fail if ring is full */
  1124. if ( ! xhci_ring_remaining ( ring ) )
  1125. return -ENOBUFS;
  1126. /* Update producer counter (and link TRB, if applicable) */
  1127. prod = ring->prod++;
  1128. mask = ring->mask;
  1129. cycle = ( ( ~( prod >> ring->shift ) ) & XHCI_TRB_C );
  1130. index = ( prod & mask );
  1131. if ( index == 0 )
  1132. ring->link->flags = ( XHCI_TRB_TC | ( cycle ^ XHCI_TRB_C ) );
  1133. /* Record I/O buffer */
  1134. ring->iobuf[index] = iobuf;
  1135. /* Enqueue TRB */
  1136. dest = &ring->trb[index];
  1137. dest->template.parameter = trb->template.parameter;
  1138. dest->template.status = trb->template.status;
  1139. wmb();
  1140. dest->template.control = ( trb->template.control |
  1141. cpu_to_le32 ( cycle ) );
  1142. return 0;
  1143. }
  1144. /**
  1145. * Dequeue a transfer request block
  1146. *
  1147. * @v ring TRB ring
  1148. * @ret iobuf I/O buffer
  1149. */
  1150. static struct io_buffer * xhci_dequeue ( struct xhci_trb_ring *ring ) {
  1151. struct io_buffer *iobuf;
  1152. unsigned int cons;
  1153. unsigned int mask;
  1154. unsigned int index;
  1155. /* Sanity check */
  1156. assert ( xhci_ring_fill ( ring ) != 0 );
  1157. /* Update consumer counter */
  1158. cons = ring->cons++;
  1159. mask = ring->mask;
  1160. index = ( cons & mask );
  1161. /* Retrieve I/O buffer */
  1162. iobuf = ring->iobuf[index];
  1163. ring->iobuf[index] = NULL;
  1164. return iobuf;
  1165. }
  1166. /**
  1167. * Enqueue multiple transfer request blocks
  1168. *
  1169. * @v ring TRB ring
  1170. * @v iobuf I/O buffer
  1171. * @v trbs Transfer request blocks (with empty Cycle flag)
  1172. * @v count Number of transfer request blocks
  1173. * @ret rc Return status code
  1174. *
  1175. * This operation does not implicitly ring the doorbell register.
  1176. */
  1177. static int xhci_enqueue_multi ( struct xhci_trb_ring *ring,
  1178. struct io_buffer *iobuf,
  1179. const union xhci_trb *trbs,
  1180. unsigned int count ) {
  1181. const union xhci_trb *trb = trbs;
  1182. int rc;
  1183. /* Sanity check */
  1184. assert ( iobuf != NULL );
  1185. /* Fail if ring does not have sufficient space */
  1186. if ( xhci_ring_remaining ( ring ) < count )
  1187. return -ENOBUFS;
  1188. /* Enqueue each TRB, recording the I/O buffer with the final TRB */
  1189. while ( count-- ) {
  1190. rc = xhci_enqueue ( ring, ( count ? NULL : iobuf ), trb++ );
  1191. assert ( rc == 0 ); /* Should never be able to fail */
  1192. }
  1193. return 0;
  1194. }
  1195. /**
  1196. * Dequeue multiple transfer request blocks
  1197. *
  1198. * @v ring TRB ring
  1199. * @ret iobuf I/O buffer
  1200. */
  1201. static struct io_buffer * xhci_dequeue_multi ( struct xhci_trb_ring *ring ) {
  1202. struct io_buffer *iobuf;
  1203. /* Dequeue TRBs until we reach the final TRB for an I/O buffer */
  1204. do {
  1205. iobuf = xhci_dequeue ( ring );
  1206. } while ( iobuf == NULL );
  1207. return iobuf;
  1208. }
  1209. /**
  1210. * Ring doorbell register
  1211. *
  1212. * @v ring TRB ring
  1213. */
  1214. static inline __attribute__ (( always_inline )) void
  1215. xhci_doorbell ( struct xhci_trb_ring *ring ) {
  1216. wmb();
  1217. writel ( ring->dbval, ring->db );
  1218. }
  1219. /******************************************************************************
  1220. *
  1221. * Command and event rings
  1222. *
  1223. ******************************************************************************
  1224. */
  1225. /**
  1226. * Allocate command ring
  1227. *
  1228. * @v xhci xHCI device
  1229. * @ret rc Return status code
  1230. */
  1231. static int xhci_command_alloc ( struct xhci_device *xhci ) {
  1232. physaddr_t crp;
  1233. int rc;
  1234. /* Allocate TRB ring */
  1235. if ( ( rc = xhci_ring_alloc ( xhci, &xhci->command, XHCI_CMD_TRBS_LOG2,
  1236. 0, 0, 0 ) ) != 0 )
  1237. goto err_ring_alloc;
  1238. /* Program command ring control register */
  1239. crp = virt_to_phys ( xhci->command.trb );
  1240. if ( ( rc = xhci_writeq ( xhci, ( crp | XHCI_CRCR_RCS ),
  1241. xhci->op + XHCI_OP_CRCR ) ) != 0 )
  1242. goto err_writeq;
  1243. DBGC2 ( xhci, "XHCI %p CRCR at [%08lx,%08lx)\n",
  1244. xhci, crp, ( crp + xhci->command.len ) );
  1245. return 0;
  1246. err_writeq:
  1247. xhci_ring_free ( &xhci->command );
  1248. err_ring_alloc:
  1249. return rc;
  1250. }
  1251. /**
  1252. * Free command ring
  1253. *
  1254. * @v xhci xHCI device
  1255. */
  1256. static void xhci_command_free ( struct xhci_device *xhci ) {
  1257. /* Sanity check */
  1258. assert ( ( readl ( xhci->op + XHCI_OP_CRCR ) & XHCI_CRCR_CRR ) == 0 );
  1259. /* Clear command ring control register */
  1260. xhci_writeq ( xhci, 0, xhci->op + XHCI_OP_CRCR );
  1261. /* Free TRB ring */
  1262. xhci_ring_free ( &xhci->command );
  1263. }
  1264. /**
  1265. * Allocate event ring
  1266. *
  1267. * @v xhci xHCI device
  1268. * @ret rc Return status code
  1269. */
  1270. static int xhci_event_alloc ( struct xhci_device *xhci ) {
  1271. struct xhci_event_ring *event = &xhci->event;
  1272. unsigned int count;
  1273. size_t len;
  1274. int rc;
  1275. /* Allocate event ring */
  1276. count = ( 1 << XHCI_EVENT_TRBS_LOG2 );
  1277. len = ( count * sizeof ( event->trb[0] ) );
  1278. event->trb = malloc_dma ( len, xhci_align ( len ) );
  1279. if ( ! event->trb ) {
  1280. rc = -ENOMEM;
  1281. goto err_alloc_trb;
  1282. }
  1283. memset ( event->trb, 0, len );
  1284. /* Allocate event ring segment table */
  1285. event->segment = malloc_dma ( sizeof ( event->segment[0] ),
  1286. xhci_align ( sizeof (event->segment[0])));
  1287. if ( ! event->segment ) {
  1288. rc = -ENOMEM;
  1289. goto err_alloc_segment;
  1290. }
  1291. memset ( event->segment, 0, sizeof ( event->segment[0] ) );
  1292. event->segment[0].base = cpu_to_le64 ( virt_to_phys ( event->trb ) );
  1293. event->segment[0].count = cpu_to_le32 ( count );
  1294. /* Program event ring registers */
  1295. writel ( 1, xhci->run + XHCI_RUN_ERSTSZ ( 0 ) );
  1296. if ( ( rc = xhci_writeq ( xhci, virt_to_phys ( event->trb ),
  1297. xhci->run + XHCI_RUN_ERDP ( 0 ) ) ) != 0 )
  1298. goto err_writeq_erdp;
  1299. if ( ( rc = xhci_writeq ( xhci, virt_to_phys ( event->segment ),
  1300. xhci->run + XHCI_RUN_ERSTBA ( 0 ) ) ) != 0 )
  1301. goto err_writeq_erstba;
  1302. DBGC2 ( xhci, "XHCI %p event ring [%08lx,%08lx) table [%08lx,%08lx)\n",
  1303. xhci, virt_to_phys ( event->trb ),
  1304. ( virt_to_phys ( event->trb ) + len ),
  1305. virt_to_phys ( event->segment ),
  1306. ( virt_to_phys ( event->segment ) +
  1307. sizeof (event->segment[0] ) ) );
  1308. return 0;
  1309. xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERSTBA ( 0 ) );
  1310. err_writeq_erstba:
  1311. xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERDP ( 0 ) );
  1312. err_writeq_erdp:
  1313. free_dma ( event->trb, len );
  1314. err_alloc_segment:
  1315. free_dma ( event->segment, sizeof ( event->segment[0] ) );
  1316. err_alloc_trb:
  1317. return rc;
  1318. }
  1319. /**
  1320. * Free event ring
  1321. *
  1322. * @v xhci xHCI device
  1323. */
  1324. static void xhci_event_free ( struct xhci_device *xhci ) {
  1325. struct xhci_event_ring *event = &xhci->event;
  1326. unsigned int count;
  1327. size_t len;
  1328. /* Clear event ring registers */
  1329. writel ( 0, xhci->run + XHCI_RUN_ERSTSZ ( 0 ) );
  1330. xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERSTBA ( 0 ) );
  1331. xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERDP ( 0 ) );
  1332. /* Free event ring segment table */
  1333. free_dma ( event->segment, sizeof ( event->segment[0] ) );
  1334. /* Free event ring */
  1335. count = ( 1 << XHCI_EVENT_TRBS_LOG2 );
  1336. len = ( count * sizeof ( event->trb[0] ) );
  1337. free_dma ( event->trb, len );
  1338. }
  1339. /**
  1340. * Handle transfer event
  1341. *
  1342. * @v xhci xHCI device
  1343. * @v trb Transfer event TRB
  1344. */
  1345. static void xhci_transfer ( struct xhci_device *xhci,
  1346. struct xhci_trb_transfer *trb ) {
  1347. struct xhci_slot *slot;
  1348. struct xhci_endpoint *endpoint;
  1349. struct io_buffer *iobuf;
  1350. int rc;
  1351. /* Profile transfer events */
  1352. profile_start ( &xhci_transfer_profiler );
  1353. /* Identify slot */
  1354. if ( ( trb->slot > xhci->slots ) ||
  1355. ( ( slot = xhci->slot[trb->slot] ) == NULL ) ) {
  1356. DBGC ( xhci, "XHCI %p transfer event invalid slot %d:\n",
  1357. xhci, trb->slot );
  1358. DBGC_HDA ( xhci, 0, trb, sizeof ( *trb ) );
  1359. return;
  1360. }
  1361. /* Identify endpoint */
  1362. if ( ( trb->endpoint > XHCI_CTX_END ) ||
  1363. ( ( endpoint = slot->endpoint[trb->endpoint] ) == NULL ) ) {
  1364. DBGC ( xhci, "XHCI %p slot %d transfer event invalid epid "
  1365. "%d:\n", xhci, slot->id, trb->endpoint );
  1366. DBGC_HDA ( xhci, 0, trb, sizeof ( *trb ) );
  1367. return;
  1368. }
  1369. /* Dequeue TRB(s) */
  1370. iobuf = xhci_dequeue_multi ( &endpoint->ring );
  1371. assert ( iobuf != NULL );
  1372. /* Check for errors */
  1373. if ( ! ( ( trb->code == XHCI_CMPLT_SUCCESS ) ||
  1374. ( trb->code == XHCI_CMPLT_SHORT ) ) ) {
  1375. /* Construct error */
  1376. rc = -ECODE ( trb->code );
  1377. DBGC ( xhci, "XHCI %p slot %d ctx %d failed (code %d): %s\n",
  1378. xhci, slot->id, endpoint->ctx, trb->code,
  1379. strerror ( rc ) );
  1380. DBGC_HDA ( xhci, 0, trb, sizeof ( *trb ) );
  1381. /* Sanity check */
  1382. assert ( ( endpoint->context->state & XHCI_ENDPOINT_STATE_MASK )
  1383. != XHCI_ENDPOINT_RUNNING );
  1384. /* Report failure to USB core */
  1385. usb_complete_err ( endpoint->ep, iobuf, rc );
  1386. return;
  1387. }
  1388. /* Record actual transfer size */
  1389. iob_unput ( iobuf, le16_to_cpu ( trb->residual ) );
  1390. /* Sanity check (for successful completions only) */
  1391. assert ( xhci_ring_consumed ( &endpoint->ring ) ==
  1392. le64_to_cpu ( trb->transfer ) );
  1393. /* Report completion to USB core */
  1394. usb_complete ( endpoint->ep, iobuf );
  1395. profile_stop ( &xhci_transfer_profiler );
  1396. }
  1397. /**
  1398. * Handle command completion event
  1399. *
  1400. * @v xhci xHCI device
  1401. * @v trb Command completion event
  1402. */
  1403. static void xhci_complete ( struct xhci_device *xhci,
  1404. struct xhci_trb_complete *trb ) {
  1405. int rc;
  1406. /* Ignore "command ring stopped" notifications */
  1407. if ( trb->code == XHCI_CMPLT_CMD_STOPPED ) {
  1408. DBGC2 ( xhci, "XHCI %p command ring stopped\n", xhci );
  1409. return;
  1410. }
  1411. /* Ignore unexpected completions */
  1412. if ( ! xhci->pending ) {
  1413. rc = -ECODE ( trb->code );
  1414. DBGC ( xhci, "XHCI %p unexpected completion (code %d): %s\n",
  1415. xhci, trb->code, strerror ( rc ) );
  1416. DBGC_HDA ( xhci, 0, trb, sizeof ( *trb ) );
  1417. return;
  1418. }
  1419. /* Dequeue command TRB */
  1420. xhci_dequeue ( &xhci->command );
  1421. /* Sanity check */
  1422. assert ( xhci_ring_consumed ( &xhci->command ) ==
  1423. le64_to_cpu ( trb->command ) );
  1424. /* Record completion */
  1425. memcpy ( xhci->pending, trb, sizeof ( *xhci->pending ) );
  1426. xhci->pending = NULL;
  1427. }
  1428. /**
  1429. * Handle port status event
  1430. *
  1431. * @v xhci xHCI device
  1432. * @v trb Port status event
  1433. */
  1434. static void xhci_port_status ( struct xhci_device *xhci,
  1435. struct xhci_trb_port_status *trb ) {
  1436. struct usb_port *port = usb_port ( xhci->bus->hub, trb->port );
  1437. uint32_t portsc;
  1438. /* Sanity check */
  1439. assert ( ( trb->port > 0 ) && ( trb->port <= xhci->ports ) );
  1440. /* Record disconnections and clear changes */
  1441. portsc = readl ( xhci->op + XHCI_OP_PORTSC ( trb->port ) );
  1442. port->disconnected |= ( portsc & XHCI_PORTSC_CSC );
  1443. portsc &= ( XHCI_PORTSC_PRESERVE | XHCI_PORTSC_CHANGE );
  1444. writel ( portsc, xhci->op + XHCI_OP_PORTSC ( trb->port ) );
  1445. /* Report port status change */
  1446. usb_port_changed ( port );
  1447. }
  1448. /**
  1449. * Handle host controller event
  1450. *
  1451. * @v xhci xHCI device
  1452. * @v trb Host controller event
  1453. */
  1454. static void xhci_host_controller ( struct xhci_device *xhci,
  1455. struct xhci_trb_host_controller *trb ) {
  1456. int rc;
  1457. /* Construct error */
  1458. rc = -ECODE ( trb->code );
  1459. DBGC ( xhci, "XHCI %p host controller event (code %d): %s\n",
  1460. xhci, trb->code, strerror ( rc ) );
  1461. }
  1462. /**
  1463. * Poll event ring
  1464. *
  1465. * @v xhci xHCI device
  1466. */
  1467. static void xhci_event_poll ( struct xhci_device *xhci ) {
  1468. struct xhci_event_ring *event = &xhci->event;
  1469. union xhci_trb *trb;
  1470. unsigned int shift = XHCI_EVENT_TRBS_LOG2;
  1471. unsigned int count = ( 1 << shift );
  1472. unsigned int mask = ( count - 1 );
  1473. unsigned int consumed;
  1474. unsigned int type;
  1475. /* Poll for events */
  1476. profile_start ( &xhci_event_profiler );
  1477. for ( consumed = 0 ; ; consumed++ ) {
  1478. /* Stop if we reach an empty TRB */
  1479. rmb();
  1480. trb = &event->trb[ event->cons & mask ];
  1481. if ( ! ( ( trb->common.flags ^
  1482. ( event->cons >> shift ) ) & XHCI_TRB_C ) )
  1483. break;
  1484. /* Handle TRB */
  1485. type = ( trb->common.type & XHCI_TRB_TYPE_MASK );
  1486. switch ( type ) {
  1487. case XHCI_TRB_TRANSFER :
  1488. xhci_transfer ( xhci, &trb->transfer );
  1489. break;
  1490. case XHCI_TRB_COMPLETE :
  1491. xhci_complete ( xhci, &trb->complete );
  1492. break;
  1493. case XHCI_TRB_PORT_STATUS:
  1494. xhci_port_status ( xhci, &trb->port );
  1495. break;
  1496. case XHCI_TRB_HOST_CONTROLLER:
  1497. xhci_host_controller ( xhci, &trb->host );
  1498. break;
  1499. default:
  1500. DBGC ( xhci, "XHCI %p unrecognised event %#x\n:",
  1501. xhci, event->cons );
  1502. DBGC_HDA ( xhci, virt_to_phys ( trb ),
  1503. trb, sizeof ( *trb ) );
  1504. break;
  1505. }
  1506. /* Consume this TRB */
  1507. event->cons++;
  1508. }
  1509. /* Update dequeue pointer if applicable */
  1510. if ( consumed ) {
  1511. xhci_writeq ( xhci, virt_to_phys ( trb ),
  1512. xhci->run + XHCI_RUN_ERDP ( 0 ) );
  1513. profile_stop ( &xhci_event_profiler );
  1514. }
  1515. }
  1516. /**
  1517. * Abort command
  1518. *
  1519. * @v xhci xHCI device
  1520. */
  1521. static void xhci_abort ( struct xhci_device *xhci ) {
  1522. physaddr_t crp;
  1523. /* Abort the command */
  1524. DBGC2 ( xhci, "XHCI %p aborting command\n", xhci );
  1525. xhci_writeq ( xhci, XHCI_CRCR_CA, xhci->op + XHCI_OP_CRCR );
  1526. /* Allow time for command to abort */
  1527. mdelay ( XHCI_COMMAND_ABORT_DELAY_MS );
  1528. /* Sanity check */
  1529. assert ( ( readl ( xhci->op + XHCI_OP_CRCR ) & XHCI_CRCR_CRR ) == 0 );
  1530. /* Consume (and ignore) any final command status */
  1531. xhci_event_poll ( xhci );
  1532. /* Reset the command ring control register */
  1533. xhci_ring_reset ( &xhci->command );
  1534. crp = virt_to_phys ( xhci->command.trb );
  1535. xhci_writeq ( xhci, ( crp | XHCI_CRCR_RCS ), xhci->op + XHCI_OP_CRCR );
  1536. }
  1537. /**
  1538. * Issue command and wait for completion
  1539. *
  1540. * @v xhci xHCI device
  1541. * @v trb Transfer request block (with empty Cycle flag)
  1542. * @ret rc Return status code
  1543. *
  1544. * On a successful completion, the TRB will be overwritten with the
  1545. * completion.
  1546. */
  1547. static int xhci_command ( struct xhci_device *xhci, union xhci_trb *trb ) {
  1548. struct xhci_trb_complete *complete = &trb->complete;
  1549. unsigned int i;
  1550. int rc;
  1551. /* Record the pending command */
  1552. xhci->pending = trb;
  1553. /* Enqueue the command */
  1554. if ( ( rc = xhci_enqueue ( &xhci->command, NULL, trb ) ) != 0 )
  1555. goto err_enqueue;
  1556. /* Ring the command doorbell */
  1557. xhci_doorbell ( &xhci->command );
  1558. /* Wait for the command to complete */
  1559. for ( i = 0 ; i < XHCI_COMMAND_MAX_WAIT_MS ; i++ ) {
  1560. /* Poll event ring */
  1561. xhci_event_poll ( xhci );
  1562. /* Check for completion */
  1563. if ( ! xhci->pending ) {
  1564. if ( complete->code != XHCI_CMPLT_SUCCESS ) {
  1565. rc = -ECODE ( complete->code );
  1566. DBGC ( xhci, "XHCI %p command failed (code "
  1567. "%d): %s\n", xhci, complete->code,
  1568. strerror ( rc ) );
  1569. DBGC_HDA ( xhci, 0, trb, sizeof ( *trb ) );
  1570. return rc;
  1571. }
  1572. return 0;
  1573. }
  1574. /* Delay */
  1575. mdelay ( 1 );
  1576. }
  1577. /* Timeout */
  1578. DBGC ( xhci, "XHCI %p timed out waiting for completion\n", xhci );
  1579. rc = -ETIMEDOUT;
  1580. /* Abort command */
  1581. xhci_abort ( xhci );
  1582. err_enqueue:
  1583. xhci->pending = NULL;
  1584. return rc;
  1585. }
  1586. /**
  1587. * Issue NOP and wait for completion
  1588. *
  1589. * @v xhci xHCI device
  1590. * @ret rc Return status code
  1591. */
  1592. static inline int xhci_nop ( struct xhci_device *xhci ) {
  1593. union xhci_trb trb;
  1594. struct xhci_trb_common *nop = &trb.common;
  1595. int rc;
  1596. /* Construct command */
  1597. memset ( nop, 0, sizeof ( *nop ) );
  1598. nop->flags = XHCI_TRB_IOC;
  1599. nop->type = XHCI_TRB_NOP_CMD;
  1600. /* Issue command and wait for completion */
  1601. if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 )
  1602. return rc;
  1603. return 0;
  1604. }
  1605. /**
  1606. * Enable slot
  1607. *
  1608. * @v xhci xHCI device
  1609. * @v type Slot type
  1610. * @ret slot Device slot ID, or negative error
  1611. */
  1612. static inline int xhci_enable_slot ( struct xhci_device *xhci,
  1613. unsigned int type ) {
  1614. union xhci_trb trb;
  1615. struct xhci_trb_enable_slot *enable = &trb.enable;
  1616. struct xhci_trb_complete *enabled = &trb.complete;
  1617. unsigned int slot;
  1618. int rc;
  1619. /* Construct command */
  1620. memset ( enable, 0, sizeof ( *enable ) );
  1621. enable->slot = type;
  1622. enable->type = XHCI_TRB_ENABLE_SLOT;
  1623. /* Issue command and wait for completion */
  1624. if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) {
  1625. DBGC ( xhci, "XHCI %p could not enable new slot: %s\n",
  1626. xhci, strerror ( rc ) );
  1627. return rc;
  1628. }
  1629. /* Extract slot number */
  1630. slot = enabled->slot;
  1631. DBGC2 ( xhci, "XHCI %p slot %d enabled\n", xhci, slot );
  1632. return slot;
  1633. }
  1634. /**
  1635. * Disable slot
  1636. *
  1637. * @v xhci xHCI device
  1638. * @v slot Device slot
  1639. * @ret rc Return status code
  1640. */
  1641. static inline int xhci_disable_slot ( struct xhci_device *xhci,
  1642. unsigned int slot ) {
  1643. union xhci_trb trb;
  1644. struct xhci_trb_disable_slot *disable = &trb.disable;
  1645. int rc;
  1646. /* Construct command */
  1647. memset ( disable, 0, sizeof ( *disable ) );
  1648. disable->type = XHCI_TRB_DISABLE_SLOT;
  1649. disable->slot = slot;
  1650. /* Issue command and wait for completion */
  1651. if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) {
  1652. DBGC ( xhci, "XHCI %p could not disable slot %d: %s\n",
  1653. xhci, slot, strerror ( rc ) );
  1654. return rc;
  1655. }
  1656. DBGC2 ( xhci, "XHCI %p slot %d disabled\n", xhci, slot );
  1657. return 0;
  1658. }
  1659. /**
  1660. * Issue context-based command and wait for completion
  1661. *
  1662. * @v xhci xHCI device
  1663. * @v slot Device slot
  1664. * @v endpoint Endpoint
  1665. * @v type TRB type
  1666. * @v populate Input context populater
  1667. * @ret rc Return status code
  1668. */
  1669. static int xhci_context ( struct xhci_device *xhci, struct xhci_slot *slot,
  1670. struct xhci_endpoint *endpoint, unsigned int type,
  1671. void ( * populate ) ( struct xhci_device *xhci,
  1672. struct xhci_slot *slot,
  1673. struct xhci_endpoint *endpoint,
  1674. void *input ) ) {
  1675. union xhci_trb trb;
  1676. struct xhci_trb_context *context = &trb.context;
  1677. size_t len;
  1678. void *input;
  1679. int rc;
  1680. /* Allocate an input context */
  1681. len = xhci_input_context_offset ( xhci, XHCI_CTX_END );
  1682. input = malloc_dma ( len, xhci_align ( len ) );
  1683. if ( ! input ) {
  1684. rc = -ENOMEM;
  1685. goto err_alloc;
  1686. }
  1687. memset ( input, 0, len );
  1688. /* Populate input context */
  1689. populate ( xhci, slot, endpoint, input );
  1690. /* Construct command */
  1691. memset ( context, 0, sizeof ( *context ) );
  1692. context->type = type;
  1693. context->input = cpu_to_le64 ( virt_to_phys ( input ) );
  1694. context->slot = slot->id;
  1695. /* Issue command and wait for completion */
  1696. if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 )
  1697. goto err_command;
  1698. err_command:
  1699. free_dma ( input, len );
  1700. err_alloc:
  1701. return rc;
  1702. }
  1703. /**
  1704. * Populate address device input context
  1705. *
  1706. * @v xhci xHCI device
  1707. * @v slot Device slot
  1708. * @v endpoint Endpoint
  1709. * @v input Input context
  1710. */
  1711. static void xhci_address_device_input ( struct xhci_device *xhci,
  1712. struct xhci_slot *slot,
  1713. struct xhci_endpoint *endpoint,
  1714. void *input ) {
  1715. struct xhci_control_context *control_ctx;
  1716. struct xhci_slot_context *slot_ctx;
  1717. struct xhci_endpoint_context *ep_ctx;
  1718. /* Sanity checks */
  1719. assert ( endpoint->ctx == XHCI_CTX_EP0 );
  1720. /* Populate control context */
  1721. control_ctx = input;
  1722. control_ctx->add = cpu_to_le32 ( ( 1 << XHCI_CTX_SLOT ) |
  1723. ( 1 << XHCI_CTX_EP0 ) );
  1724. /* Populate slot context */
  1725. slot_ctx = ( input + xhci_input_context_offset ( xhci, XHCI_CTX_SLOT ));
  1726. slot_ctx->info = cpu_to_le32 ( XHCI_SLOT_INFO ( 1, 0, slot->psiv,
  1727. slot->route ) );
  1728. slot_ctx->port = slot->port;
  1729. slot_ctx->tt_id = slot->tt_id;
  1730. slot_ctx->tt_port = slot->tt_port;
  1731. /* Populate control endpoint context */
  1732. ep_ctx = ( input + xhci_input_context_offset ( xhci, XHCI_CTX_EP0 ) );
  1733. ep_ctx->type = XHCI_EP_TYPE_CONTROL;
  1734. ep_ctx->burst = endpoint->ep->burst;
  1735. ep_ctx->mtu = cpu_to_le16 ( endpoint->ep->mtu );
  1736. ep_ctx->dequeue = cpu_to_le64 ( virt_to_phys ( endpoint->ring.trb ) |
  1737. XHCI_EP_DCS );
  1738. ep_ctx->trb_len = cpu_to_le16 ( XHCI_EP0_TRB_LEN );
  1739. }
  1740. /**
  1741. * Address device
  1742. *
  1743. * @v xhci xHCI device
  1744. * @v slot Device slot
  1745. * @ret rc Return status code
  1746. */
  1747. static inline int xhci_address_device ( struct xhci_device *xhci,
  1748. struct xhci_slot *slot ) {
  1749. struct usb_device *usb = slot->usb;
  1750. struct xhci_slot_context *slot_ctx;
  1751. int rc;
  1752. /* Assign device address */
  1753. if ( ( rc = xhci_context ( xhci, slot, slot->endpoint[XHCI_CTX_EP0],
  1754. XHCI_TRB_ADDRESS_DEVICE,
  1755. xhci_address_device_input ) ) != 0 )
  1756. return rc;
  1757. /* Get assigned address */
  1758. slot_ctx = ( slot->context +
  1759. xhci_device_context_offset ( xhci, XHCI_CTX_SLOT ) );
  1760. usb->address = slot_ctx->address;
  1761. DBGC2 ( xhci, "XHCI %p assigned address %d to %s\n",
  1762. xhci, usb->address, usb->name );
  1763. return 0;
  1764. }
  1765. /**
  1766. * Populate configure endpoint input context
  1767. *
  1768. * @v xhci xHCI device
  1769. * @v slot Device slot
  1770. * @v endpoint Endpoint
  1771. * @v input Input context
  1772. */
  1773. static void xhci_configure_endpoint_input ( struct xhci_device *xhci,
  1774. struct xhci_slot *slot,
  1775. struct xhci_endpoint *endpoint,
  1776. void *input ) {
  1777. struct xhci_control_context *control_ctx;
  1778. struct xhci_slot_context *slot_ctx;
  1779. struct xhci_endpoint_context *ep_ctx;
  1780. /* Populate control context */
  1781. control_ctx = input;
  1782. control_ctx->add = cpu_to_le32 ( ( 1 << XHCI_CTX_SLOT ) |
  1783. ( 1 << endpoint->ctx ) );
  1784. /* Populate slot context */
  1785. slot_ctx = ( input + xhci_input_context_offset ( xhci, XHCI_CTX_SLOT ));
  1786. slot_ctx->info = cpu_to_le32 ( XHCI_SLOT_INFO ( ( XHCI_CTX_END - 1 ),
  1787. ( slot->ports ? 1 : 0 ),
  1788. slot->psiv, 0 ) );
  1789. slot_ctx->ports = slot->ports;
  1790. /* Populate endpoint context */
  1791. ep_ctx = ( input + xhci_input_context_offset ( xhci, endpoint->ctx ) );
  1792. ep_ctx->interval = endpoint->interval;
  1793. ep_ctx->type = endpoint->type;
  1794. ep_ctx->burst = endpoint->ep->burst;
  1795. ep_ctx->mtu = cpu_to_le16 ( endpoint->ep->mtu );
  1796. ep_ctx->dequeue = cpu_to_le64 ( virt_to_phys ( endpoint->ring.trb ) |
  1797. XHCI_EP_DCS );
  1798. ep_ctx->trb_len = cpu_to_le16 ( endpoint->ep->mtu ); /* best guess */
  1799. }
  1800. /**
  1801. * Configure endpoint
  1802. *
  1803. * @v xhci xHCI device
  1804. * @v slot Device slot
  1805. * @v endpoint Endpoint
  1806. * @ret rc Return status code
  1807. */
  1808. static inline int xhci_configure_endpoint ( struct xhci_device *xhci,
  1809. struct xhci_slot *slot,
  1810. struct xhci_endpoint *endpoint ) {
  1811. int rc;
  1812. /* Configure endpoint */
  1813. if ( ( rc = xhci_context ( xhci, slot, endpoint,
  1814. XHCI_TRB_CONFIGURE_ENDPOINT,
  1815. xhci_configure_endpoint_input ) ) != 0 )
  1816. return rc;
  1817. DBGC2 ( xhci, "XHCI %p slot %d ctx %d configured\n",
  1818. xhci, slot->id, endpoint->ctx );
  1819. return 0;
  1820. }
  1821. /**
  1822. * Populate deconfigure endpoint input context
  1823. *
  1824. * @v xhci xHCI device
  1825. * @v slot Device slot
  1826. * @v endpoint Endpoint
  1827. * @v input Input context
  1828. */
  1829. static void
  1830. xhci_deconfigure_endpoint_input ( struct xhci_device *xhci __unused,
  1831. struct xhci_slot *slot __unused,
  1832. struct xhci_endpoint *endpoint,
  1833. void *input ) {
  1834. struct xhci_control_context *control_ctx;
  1835. struct xhci_slot_context *slot_ctx;
  1836. /* Populate control context */
  1837. control_ctx = input;
  1838. control_ctx->add = cpu_to_le32 ( 1 << XHCI_CTX_SLOT );
  1839. control_ctx->drop = cpu_to_le32 ( 1 << endpoint->ctx );
  1840. /* Populate slot context */
  1841. slot_ctx = ( input + xhci_input_context_offset ( xhci, XHCI_CTX_SLOT ));
  1842. slot_ctx->info = cpu_to_le32 ( XHCI_SLOT_INFO ( ( XHCI_CTX_END - 1 ),
  1843. 0, 0, 0 ) );
  1844. }
  1845. /**
  1846. * Deconfigure endpoint
  1847. *
  1848. * @v xhci xHCI device
  1849. * @v slot Device slot
  1850. * @v endpoint Endpoint
  1851. * @ret rc Return status code
  1852. */
  1853. static inline int xhci_deconfigure_endpoint ( struct xhci_device *xhci,
  1854. struct xhci_slot *slot,
  1855. struct xhci_endpoint *endpoint ) {
  1856. int rc;
  1857. /* Deconfigure endpoint */
  1858. if ( ( rc = xhci_context ( xhci, slot, endpoint,
  1859. XHCI_TRB_CONFIGURE_ENDPOINT,
  1860. xhci_deconfigure_endpoint_input ) ) != 0 )
  1861. return rc;
  1862. DBGC2 ( xhci, "XHCI %p slot %d ctx %d deconfigured\n",
  1863. xhci, slot->id, endpoint->ctx );
  1864. return 0;
  1865. }
  1866. /**
  1867. * Populate evaluate context input context
  1868. *
  1869. * @v xhci xHCI device
  1870. * @v slot Device slot
  1871. * @v endpoint Endpoint
  1872. * @v input Input context
  1873. */
  1874. static void xhci_evaluate_context_input ( struct xhci_device *xhci,
  1875. struct xhci_slot *slot __unused,
  1876. struct xhci_endpoint *endpoint,
  1877. void *input ) {
  1878. struct xhci_control_context *control_ctx;
  1879. struct xhci_slot_context *slot_ctx;
  1880. struct xhci_endpoint_context *ep_ctx;
  1881. /* Populate control context */
  1882. control_ctx = input;
  1883. control_ctx->add = cpu_to_le32 ( ( 1 << XHCI_CTX_SLOT ) |
  1884. ( 1 << endpoint->ctx ) );
  1885. /* Populate slot context */
  1886. slot_ctx = ( input + xhci_input_context_offset ( xhci, XHCI_CTX_SLOT ));
  1887. slot_ctx->info = cpu_to_le32 ( XHCI_SLOT_INFO ( ( XHCI_CTX_END - 1 ),
  1888. 0, 0, 0 ) );
  1889. /* Populate endpoint context */
  1890. ep_ctx = ( input + xhci_input_context_offset ( xhci, endpoint->ctx ) );
  1891. ep_ctx->mtu = cpu_to_le16 ( endpoint->ep->mtu );
  1892. }
  1893. /**
  1894. * Evaluate context
  1895. *
  1896. * @v xhci xHCI device
  1897. * @v slot Device slot
  1898. * @v endpoint Endpoint
  1899. * @ret rc Return status code
  1900. */
  1901. static inline int xhci_evaluate_context ( struct xhci_device *xhci,
  1902. struct xhci_slot *slot,
  1903. struct xhci_endpoint *endpoint ) {
  1904. int rc;
  1905. /* Configure endpoint */
  1906. if ( ( rc = xhci_context ( xhci, slot, endpoint,
  1907. XHCI_TRB_EVALUATE_CONTEXT,
  1908. xhci_evaluate_context_input ) ) != 0 )
  1909. return rc;
  1910. DBGC2 ( xhci, "XHCI %p slot %d ctx %d (re-)evaluated\n",
  1911. xhci, slot->id, endpoint->ctx );
  1912. return 0;
  1913. }
  1914. /**
  1915. * Reset endpoint
  1916. *
  1917. * @v xhci xHCI device
  1918. * @v slot Device slot
  1919. * @v endpoint Endpoint
  1920. * @ret rc Return status code
  1921. */
  1922. static inline int xhci_reset_endpoint ( struct xhci_device *xhci,
  1923. struct xhci_slot *slot,
  1924. struct xhci_endpoint *endpoint ) {
  1925. union xhci_trb trb;
  1926. struct xhci_trb_reset_endpoint *reset = &trb.reset;
  1927. int rc;
  1928. /* Construct command */
  1929. memset ( reset, 0, sizeof ( *reset ) );
  1930. reset->slot = slot->id;
  1931. reset->endpoint = endpoint->ctx;
  1932. reset->type = XHCI_TRB_RESET_ENDPOINT;
  1933. /* Issue command and wait for completion */
  1934. if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) {
  1935. DBGC ( xhci, "XHCI %p slot %d ctx %d could not reset endpoint "
  1936. "in state %d: %s\n", xhci, slot->id, endpoint->ctx,
  1937. endpoint->context->state, strerror ( rc ) );
  1938. return rc;
  1939. }
  1940. return 0;
  1941. }
  1942. /**
  1943. * Stop endpoint
  1944. *
  1945. * @v xhci xHCI device
  1946. * @v slot Device slot
  1947. * @v endpoint Endpoint
  1948. * @ret rc Return status code
  1949. */
  1950. static inline int xhci_stop_endpoint ( struct xhci_device *xhci,
  1951. struct xhci_slot *slot,
  1952. struct xhci_endpoint *endpoint ) {
  1953. union xhci_trb trb;
  1954. struct xhci_trb_stop_endpoint *stop = &trb.stop;
  1955. int rc;
  1956. /* Construct command */
  1957. memset ( stop, 0, sizeof ( *stop ) );
  1958. stop->slot = slot->id;
  1959. stop->endpoint = endpoint->ctx;
  1960. stop->type = XHCI_TRB_STOP_ENDPOINT;
  1961. /* Issue command and wait for completion */
  1962. if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) {
  1963. DBGC ( xhci, "XHCI %p slot %d ctx %d could not stop endpoint "
  1964. "in state %d: %s\n", xhci, slot->id, endpoint->ctx,
  1965. endpoint->context->state, strerror ( rc ) );
  1966. return rc;
  1967. }
  1968. return 0;
  1969. }
  1970. /**
  1971. * Set transfer ring dequeue pointer
  1972. *
  1973. * @v xhci xHCI device
  1974. * @v slot Device slot
  1975. * @v endpoint Endpoint
  1976. * @ret rc Return status code
  1977. */
  1978. static inline int
  1979. xhci_set_tr_dequeue_pointer ( struct xhci_device *xhci,
  1980. struct xhci_slot *slot,
  1981. struct xhci_endpoint *endpoint ) {
  1982. union xhci_trb trb;
  1983. struct xhci_trb_set_tr_dequeue_pointer *dequeue = &trb.dequeue;
  1984. struct xhci_trb_ring *ring = &endpoint->ring;
  1985. unsigned int cons;
  1986. unsigned int mask;
  1987. unsigned int index;
  1988. unsigned int dcs;
  1989. int rc;
  1990. /* Construct command */
  1991. memset ( dequeue, 0, sizeof ( *dequeue ) );
  1992. cons = ring->cons;
  1993. mask = ring->mask;
  1994. dcs = ( ( ~( cons >> ring->shift ) ) & XHCI_EP_DCS );
  1995. index = ( cons & mask );
  1996. dequeue->dequeue =
  1997. cpu_to_le64 ( virt_to_phys ( &ring->trb[index] ) | dcs );
  1998. dequeue->slot = slot->id;
  1999. dequeue->endpoint = endpoint->ctx;
  2000. dequeue->type = XHCI_TRB_SET_TR_DEQUEUE_POINTER;
  2001. /* Issue command and wait for completion */
  2002. if ( ( rc = xhci_command ( xhci, &trb ) ) != 0 ) {
  2003. DBGC ( xhci, "XHCI %p slot %d ctx %d could not set TR dequeue "
  2004. "pointer in state %d: %s\n", xhci, slot->id,
  2005. endpoint->ctx, endpoint->context->state, strerror ( rc));
  2006. return rc;
  2007. }
  2008. return 0;
  2009. }
  2010. /******************************************************************************
  2011. *
  2012. * Endpoint operations
  2013. *
  2014. ******************************************************************************
  2015. */
  2016. /**
  2017. * Open endpoint
  2018. *
  2019. * @v ep USB endpoint
  2020. * @ret rc Return status code
  2021. */
  2022. static int xhci_endpoint_open ( struct usb_endpoint *ep ) {
  2023. struct usb_device *usb = ep->usb;
  2024. struct xhci_slot *slot = usb_get_hostdata ( usb );
  2025. struct xhci_device *xhci = slot->xhci;
  2026. struct xhci_endpoint *endpoint;
  2027. unsigned int ctx;
  2028. unsigned int type;
  2029. unsigned int interval;
  2030. int rc;
  2031. /* Calculate context index */
  2032. ctx = XHCI_CTX ( ep->address );
  2033. assert ( slot->endpoint[ctx] == NULL );
  2034. /* Calculate endpoint type */
  2035. type = XHCI_EP_TYPE ( ep->attributes & USB_ENDPOINT_ATTR_TYPE_MASK );
  2036. if ( type == XHCI_EP_TYPE ( USB_ENDPOINT_ATTR_CONTROL ) )
  2037. type = XHCI_EP_TYPE_CONTROL;
  2038. if ( ep->address & USB_DIR_IN )
  2039. type |= XHCI_EP_TYPE_IN;
  2040. /* Calculate interval */
  2041. if ( type & XHCI_EP_TYPE_PERIODIC ) {
  2042. interval = ( fls ( ep->interval ) - 1 );
  2043. } else {
  2044. interval = ep->interval;
  2045. }
  2046. /* Allocate and initialise structure */
  2047. endpoint = zalloc ( sizeof ( *endpoint ) );
  2048. if ( ! endpoint ) {
  2049. rc = -ENOMEM;
  2050. goto err_alloc;
  2051. }
  2052. usb_endpoint_set_hostdata ( ep, endpoint );
  2053. slot->endpoint[ctx] = endpoint;
  2054. endpoint->xhci = xhci;
  2055. endpoint->slot = slot;
  2056. endpoint->ep = ep;
  2057. endpoint->ctx = ctx;
  2058. endpoint->type = type;
  2059. endpoint->interval = interval;
  2060. endpoint->context = ( ( ( void * ) slot->context ) +
  2061. xhci_device_context_offset ( xhci, ctx ) );
  2062. /* Allocate transfer ring */
  2063. if ( ( rc = xhci_ring_alloc ( xhci, &endpoint->ring,
  2064. XHCI_TRANSFER_TRBS_LOG2,
  2065. slot->id, ctx, 0 ) ) != 0 )
  2066. goto err_ring_alloc;
  2067. /* Configure endpoint, if applicable */
  2068. if ( ( ctx != XHCI_CTX_EP0 ) &&
  2069. ( ( rc = xhci_configure_endpoint ( xhci, slot, endpoint ) ) != 0 ))
  2070. goto err_configure_endpoint;
  2071. DBGC2 ( xhci, "XHCI %p slot %d ctx %d ring [%08lx,%08lx)\n",
  2072. xhci, slot->id, ctx, virt_to_phys ( endpoint->ring.trb ),
  2073. ( virt_to_phys ( endpoint->ring.trb ) + endpoint->ring.len ) );
  2074. return 0;
  2075. xhci_deconfigure_endpoint ( xhci, slot, endpoint );
  2076. err_configure_endpoint:
  2077. xhci_ring_free ( &endpoint->ring );
  2078. err_ring_alloc:
  2079. slot->endpoint[ctx] = NULL;
  2080. free ( endpoint );
  2081. err_alloc:
  2082. return rc;
  2083. }
  2084. /**
  2085. * Close endpoint
  2086. *
  2087. * @v ep USB endpoint
  2088. */
  2089. static void xhci_endpoint_close ( struct usb_endpoint *ep ) {
  2090. struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
  2091. struct xhci_slot *slot = endpoint->slot;
  2092. struct xhci_device *xhci = slot->xhci;
  2093. struct io_buffer *iobuf;
  2094. unsigned int ctx = endpoint->ctx;
  2095. /* Deconfigure endpoint, if applicable */
  2096. if ( ctx != XHCI_CTX_EP0 )
  2097. xhci_deconfigure_endpoint ( xhci, slot, endpoint );
  2098. /* Cancel any incomplete transfers */
  2099. while ( xhci_ring_fill ( &endpoint->ring ) ) {
  2100. iobuf = xhci_dequeue_multi ( &endpoint->ring );
  2101. usb_complete_err ( ep, iobuf, -ECANCELED );
  2102. }
  2103. /* Free endpoint */
  2104. xhci_ring_free ( &endpoint->ring );
  2105. slot->endpoint[ctx] = NULL;
  2106. free ( endpoint );
  2107. }
  2108. /**
  2109. * Reset endpoint
  2110. *
  2111. * @v ep USB endpoint
  2112. * @ret rc Return status code
  2113. */
  2114. static int xhci_endpoint_reset ( struct usb_endpoint *ep ) {
  2115. struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
  2116. struct xhci_slot *slot = endpoint->slot;
  2117. struct xhci_device *xhci = slot->xhci;
  2118. int rc;
  2119. /* Reset endpoint context */
  2120. if ( ( rc = xhci_reset_endpoint ( xhci, slot, endpoint ) ) != 0 )
  2121. return rc;
  2122. /* Set transfer ring dequeue pointer */
  2123. if ( ( rc = xhci_set_tr_dequeue_pointer ( xhci, slot, endpoint ) ) != 0)
  2124. return rc;
  2125. /* Ring doorbell to resume processing */
  2126. xhci_doorbell ( &endpoint->ring );
  2127. DBGC ( xhci, "XHCI %p slot %d ctx %d reset\n",
  2128. xhci, slot->id, endpoint->ctx );
  2129. return 0;
  2130. }
  2131. /**
  2132. * Update MTU
  2133. *
  2134. * @v ep USB endpoint
  2135. * @ret rc Return status code
  2136. */
  2137. static int xhci_endpoint_mtu ( struct usb_endpoint *ep ) {
  2138. struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
  2139. struct xhci_slot *slot = endpoint->slot;
  2140. struct xhci_device *xhci = slot->xhci;
  2141. int rc;
  2142. /* Evalulate context */
  2143. if ( ( rc = xhci_evaluate_context ( xhci, slot, endpoint ) ) != 0 )
  2144. return rc;
  2145. return 0;
  2146. }
  2147. /**
  2148. * Enqueue message transfer
  2149. *
  2150. * @v ep USB endpoint
  2151. * @v iobuf I/O buffer
  2152. * @ret rc Return status code
  2153. */
  2154. static int xhci_endpoint_message ( struct usb_endpoint *ep,
  2155. struct io_buffer *iobuf ) {
  2156. struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
  2157. struct usb_setup_packet *packet;
  2158. unsigned int input;
  2159. size_t len;
  2160. union xhci_trb trbs[ 1 /* setup */ + 1 /* possible data */ +
  2161. 1 /* status */ ];
  2162. union xhci_trb *trb = trbs;
  2163. struct xhci_trb_setup *setup;
  2164. struct xhci_trb_data *data;
  2165. struct xhci_trb_status *status;
  2166. int rc;
  2167. /* Profile message transfers */
  2168. profile_start ( &xhci_message_profiler );
  2169. /* Construct setup stage TRB */
  2170. memset ( trbs, 0, sizeof ( trbs ) );
  2171. assert ( iob_len ( iobuf ) >= sizeof ( *packet ) );
  2172. packet = iobuf->data;
  2173. iob_pull ( iobuf, sizeof ( *packet ) );
  2174. setup = &(trb++)->setup;
  2175. memcpy ( &setup->packet, packet, sizeof ( setup->packet ) );
  2176. setup->len = cpu_to_le32 ( sizeof ( *packet ) );
  2177. setup->flags = XHCI_TRB_IDT;
  2178. setup->type = XHCI_TRB_SETUP;
  2179. len = iob_len ( iobuf );
  2180. input = ( packet->request & cpu_to_le16 ( USB_DIR_IN ) );
  2181. if ( len )
  2182. setup->direction = ( input ? XHCI_SETUP_IN : XHCI_SETUP_OUT );
  2183. /* Construct data stage TRB, if applicable */
  2184. if ( len ) {
  2185. data = &(trb++)->data;
  2186. data->data = cpu_to_le64 ( virt_to_phys ( iobuf->data ) );
  2187. data->len = cpu_to_le32 ( len );
  2188. data->type = XHCI_TRB_DATA;
  2189. data->direction = ( input ? XHCI_DATA_IN : XHCI_DATA_OUT );
  2190. }
  2191. /* Construct status stage TRB */
  2192. status = &(trb++)->status;
  2193. status->flags = XHCI_TRB_IOC;
  2194. status->type = XHCI_TRB_STATUS;
  2195. status->direction =
  2196. ( ( len && input ) ? XHCI_STATUS_OUT : XHCI_STATUS_IN );
  2197. /* Enqueue TRBs */
  2198. if ( ( rc = xhci_enqueue_multi ( &endpoint->ring, iobuf, trbs,
  2199. ( trb - trbs ) ) ) != 0 )
  2200. return rc;
  2201. /* Ring the doorbell */
  2202. xhci_doorbell ( &endpoint->ring );
  2203. profile_stop ( &xhci_message_profiler );
  2204. return 0;
  2205. }
  2206. /**
  2207. * Enqueue stream transfer
  2208. *
  2209. * @v ep USB endpoint
  2210. * @v iobuf I/O buffer
  2211. * @v terminate Terminate using a short packet
  2212. * @ret rc Return status code
  2213. */
  2214. static int xhci_endpoint_stream ( struct usb_endpoint *ep,
  2215. struct io_buffer *iobuf, int terminate ) {
  2216. struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
  2217. union xhci_trb trbs[ 1 /* Normal */ + 1 /* Possible zero-length */ ];
  2218. union xhci_trb *trb = trbs;
  2219. struct xhci_trb_normal *normal;
  2220. size_t len = iob_len ( iobuf );
  2221. int rc;
  2222. /* Profile stream transfers */
  2223. profile_start ( &xhci_stream_profiler );
  2224. /* Construct normal TRBs */
  2225. memset ( &trbs, 0, sizeof ( trbs ) );
  2226. normal = &(trb++)->normal;
  2227. normal->data = cpu_to_le64 ( virt_to_phys ( iobuf->data ) );
  2228. normal->len = cpu_to_le32 ( len );
  2229. normal->type = XHCI_TRB_NORMAL;
  2230. if ( terminate && ( ( len & ( ep->mtu - 1 ) ) == 0 ) ) {
  2231. normal->flags = XHCI_TRB_CH;
  2232. normal = &(trb++)->normal;
  2233. normal->type = XHCI_TRB_NORMAL;
  2234. }
  2235. normal->flags = XHCI_TRB_IOC;
  2236. /* Enqueue TRBs */
  2237. if ( ( rc = xhci_enqueue_multi ( &endpoint->ring, iobuf, trbs,
  2238. ( trb - trbs ) ) ) != 0 )
  2239. return rc;
  2240. /* Ring the doorbell */
  2241. xhci_doorbell ( &endpoint->ring );
  2242. profile_stop ( &xhci_stream_profiler );
  2243. return 0;
  2244. }
  2245. /******************************************************************************
  2246. *
  2247. * Device operations
  2248. *
  2249. ******************************************************************************
  2250. */
  2251. /**
  2252. * Open device
  2253. *
  2254. * @v usb USB device
  2255. * @ret rc Return status code
  2256. */
  2257. static int xhci_device_open ( struct usb_device *usb ) {
  2258. struct xhci_device *xhci = usb_bus_get_hostdata ( usb->port->hub->bus );
  2259. struct usb_port *tt = usb_transaction_translator ( usb );
  2260. struct xhci_slot *slot;
  2261. struct xhci_slot *tt_slot;
  2262. size_t len;
  2263. int type;
  2264. int id;
  2265. int rc;
  2266. /* Determine applicable slot type */
  2267. type = xhci_port_slot_type ( xhci, usb->port->address );
  2268. if ( type < 0 ) {
  2269. rc = type;
  2270. DBGC ( xhci, "XHCI %p port %d has no slot type\n",
  2271. xhci, usb->port->address );
  2272. goto err_type;
  2273. }
  2274. /* Allocate a device slot number */
  2275. id = xhci_enable_slot ( xhci, type );
  2276. if ( id < 0 ) {
  2277. rc = id;
  2278. goto err_enable_slot;
  2279. }
  2280. assert ( xhci->slot[id] == NULL );
  2281. /* Allocate and initialise structure */
  2282. slot = zalloc ( sizeof ( *slot ) );
  2283. if ( ! slot ) {
  2284. rc = -ENOMEM;
  2285. goto err_alloc;
  2286. }
  2287. usb_set_hostdata ( usb, slot );
  2288. xhci->slot[id] = slot;
  2289. slot->xhci = xhci;
  2290. slot->usb = usb;
  2291. slot->id = id;
  2292. if ( tt ) {
  2293. tt_slot = usb_get_hostdata ( tt->hub->usb );
  2294. slot->tt_id = tt_slot->id;
  2295. slot->tt_port = tt->address;
  2296. }
  2297. /* Allocate a device context */
  2298. len = xhci_device_context_offset ( xhci, XHCI_CTX_END );
  2299. slot->context = malloc_dma ( len, xhci_align ( len ) );
  2300. if ( ! slot->context ) {
  2301. rc = -ENOMEM;
  2302. goto err_alloc_context;
  2303. }
  2304. memset ( slot->context, 0, len );
  2305. /* Set device context base address */
  2306. assert ( xhci->dcbaa[id] == 0 );
  2307. xhci->dcbaa[id] = cpu_to_le64 ( virt_to_phys ( slot->context ) );
  2308. DBGC2 ( xhci, "XHCI %p slot %d device context [%08lx,%08lx) for %s\n",
  2309. xhci, slot->id, virt_to_phys ( slot->context ),
  2310. ( virt_to_phys ( slot->context ) + len ), usb->name );
  2311. return 0;
  2312. xhci->dcbaa[id] = 0;
  2313. free_dma ( slot->context, len );
  2314. err_alloc_context:
  2315. xhci->slot[id] = NULL;
  2316. free ( slot );
  2317. err_alloc:
  2318. xhci_disable_slot ( xhci, id );
  2319. err_enable_slot:
  2320. err_type:
  2321. return rc;
  2322. }
  2323. /**
  2324. * Close device
  2325. *
  2326. * @v usb USB device
  2327. */
  2328. static void xhci_device_close ( struct usb_device *usb ) {
  2329. struct xhci_slot *slot = usb_get_hostdata ( usb );
  2330. struct xhci_device *xhci = slot->xhci;
  2331. size_t len = xhci_device_context_offset ( xhci, XHCI_CTX_END );
  2332. unsigned int id = slot->id;
  2333. int rc;
  2334. /* Disable slot */
  2335. if ( ( rc = xhci_disable_slot ( xhci, id ) ) != 0 ) {
  2336. /* Slot is still enabled. Leak the slot context,
  2337. * since the controller may still write to this
  2338. * memory, and leave the DCBAA entry intact.
  2339. *
  2340. * If the controller later reports that this same slot
  2341. * has been re-enabled, then some assertions will be
  2342. * triggered.
  2343. */
  2344. DBGC ( xhci, "XHCI %p slot %d leaking context memory\n",
  2345. xhci, slot->id );
  2346. slot->context = NULL;
  2347. }
  2348. /* Free slot */
  2349. if ( slot->context ) {
  2350. free_dma ( slot->context, len );
  2351. xhci->dcbaa[id] = 0;
  2352. }
  2353. xhci->slot[id] = NULL;
  2354. free ( slot );
  2355. }
  2356. /**
  2357. * Assign device address
  2358. *
  2359. * @v usb USB device
  2360. * @ret rc Return status code
  2361. */
  2362. static int xhci_device_address ( struct usb_device *usb ) {
  2363. struct xhci_slot *slot = usb_get_hostdata ( usb );
  2364. struct xhci_device *xhci = slot->xhci;
  2365. struct usb_port *port = usb->port;
  2366. struct usb_port *root_port;
  2367. int psiv;
  2368. int rc;
  2369. /* Calculate route string */
  2370. slot->route = usb_route_string ( usb );
  2371. /* Calculate root hub port number */
  2372. root_port = usb_root_hub_port ( usb );
  2373. slot->port = root_port->address;
  2374. /* Calculate protocol speed ID */
  2375. psiv = xhci_port_psiv ( xhci, slot->port, port->speed );
  2376. if ( psiv < 0 ) {
  2377. rc = psiv;
  2378. return rc;
  2379. }
  2380. slot->psiv = psiv;
  2381. /* Address device */
  2382. if ( ( rc = xhci_address_device ( xhci, slot ) ) != 0 )
  2383. return rc;
  2384. return 0;
  2385. }
  2386. /******************************************************************************
  2387. *
  2388. * Bus operations
  2389. *
  2390. ******************************************************************************
  2391. */
  2392. /**
  2393. * Open USB bus
  2394. *
  2395. * @v bus USB bus
  2396. * @ret rc Return status code
  2397. */
  2398. static int xhci_bus_open ( struct usb_bus *bus ) {
  2399. struct xhci_device *xhci = usb_bus_get_hostdata ( bus );
  2400. int rc;
  2401. /* Allocate device slot array */
  2402. xhci->slot = zalloc ( xhci->slots * sizeof ( xhci->slot[0] ) );
  2403. if ( ! xhci->slot ) {
  2404. rc = -ENOMEM;
  2405. goto err_slot_alloc;
  2406. }
  2407. /* Allocate device context base address array */
  2408. if ( ( rc = xhci_dcbaa_alloc ( xhci ) ) != 0 )
  2409. goto err_dcbaa_alloc;
  2410. /* Allocate scratchpad buffers */
  2411. if ( ( rc = xhci_scratchpad_alloc ( xhci ) ) != 0 )
  2412. goto err_scratchpad_alloc;
  2413. /* Allocate command ring */
  2414. if ( ( rc = xhci_command_alloc ( xhci ) ) != 0 )
  2415. goto err_command_alloc;
  2416. /* Allocate event ring */
  2417. if ( ( rc = xhci_event_alloc ( xhci ) ) != 0 )
  2418. goto err_event_alloc;
  2419. /* Start controller */
  2420. xhci_run ( xhci );
  2421. return 0;
  2422. xhci_stop ( xhci );
  2423. xhci_event_free ( xhci );
  2424. err_event_alloc:
  2425. xhci_command_free ( xhci );
  2426. err_command_alloc:
  2427. xhci_scratchpad_free ( xhci );
  2428. err_scratchpad_alloc:
  2429. xhci_dcbaa_free ( xhci );
  2430. err_dcbaa_alloc:
  2431. free ( xhci->slot );
  2432. err_slot_alloc:
  2433. return rc;
  2434. }
  2435. /**
  2436. * Close USB bus
  2437. *
  2438. * @v bus USB bus
  2439. */
  2440. static void xhci_bus_close ( struct usb_bus *bus ) {
  2441. struct xhci_device *xhci = usb_bus_get_hostdata ( bus );
  2442. unsigned int i;
  2443. /* Sanity checks */
  2444. assert ( xhci->slot != NULL );
  2445. for ( i = 0 ; i < xhci->slots ; i++ )
  2446. assert ( xhci->slot[i] == NULL );
  2447. xhci_stop ( xhci );
  2448. xhci_event_free ( xhci );
  2449. xhci_command_free ( xhci );
  2450. xhci_scratchpad_free ( xhci );
  2451. xhci_dcbaa_free ( xhci );
  2452. free ( xhci->slot );
  2453. }
  2454. /**
  2455. * Poll USB bus
  2456. *
  2457. * @v bus USB bus
  2458. */
  2459. static void xhci_bus_poll ( struct usb_bus *bus ) {
  2460. struct xhci_device *xhci = usb_bus_get_hostdata ( bus );
  2461. /* Poll event ring */
  2462. xhci_event_poll ( xhci );
  2463. }
  2464. /******************************************************************************
  2465. *
  2466. * Hub operations
  2467. *
  2468. ******************************************************************************
  2469. */
  2470. /**
  2471. * Open hub
  2472. *
  2473. * @v hub USB hub
  2474. * @ret rc Return status code
  2475. */
  2476. static int xhci_hub_open ( struct usb_hub *hub ) {
  2477. struct xhci_slot *slot;
  2478. /* Do nothing if this is the root hub */
  2479. if ( ! hub->usb )
  2480. return 0;
  2481. /* Get device slot */
  2482. slot = usb_get_hostdata ( hub->usb );
  2483. /* Update device slot hub parameters. We don't inform the
  2484. * hardware of this information until the hub's interrupt
  2485. * endpoint is opened, since the only mechanism for so doing
  2486. * provided by the xHCI specification is a Configure Endpoint
  2487. * command, and we can't issue that command until we have a
  2488. * non-EP0 endpoint to configure.
  2489. */
  2490. slot->ports = hub->ports;
  2491. return 0;
  2492. }
  2493. /**
  2494. * Close hub
  2495. *
  2496. * @v hub USB hub
  2497. */
  2498. static void xhci_hub_close ( struct usb_hub *hub __unused ) {
  2499. /* Nothing to do */
  2500. }
  2501. /******************************************************************************
  2502. *
  2503. * Root hub operations
  2504. *
  2505. ******************************************************************************
  2506. */
  2507. /**
  2508. * Open root hub
  2509. *
  2510. * @v hub USB hub
  2511. * @ret rc Return status code
  2512. */
  2513. static int xhci_root_open ( struct usb_hub *hub ) {
  2514. struct usb_bus *bus = hub->bus;
  2515. struct xhci_device *xhci = usb_bus_get_hostdata ( bus );
  2516. struct usb_port *port;
  2517. uint32_t portsc;
  2518. unsigned int i;
  2519. /* Enable power to all ports */
  2520. for ( i = 1 ; i <= xhci->ports ; i++ ) {
  2521. portsc = readl ( xhci->op + XHCI_OP_PORTSC ( i ) );
  2522. portsc &= XHCI_PORTSC_PRESERVE;
  2523. portsc |= XHCI_PORTSC_PP;
  2524. writel ( portsc, xhci->op + XHCI_OP_PORTSC ( i ) );
  2525. }
  2526. /* xHCI spec requires us to potentially wait 20ms after
  2527. * enabling power to a port.
  2528. */
  2529. mdelay ( XHCI_PORT_POWER_DELAY_MS );
  2530. /* USB3 ports may power up as Disabled */
  2531. for ( i = 1 ; i <= xhci->ports ; i++ ) {
  2532. portsc = readl ( xhci->op + XHCI_OP_PORTSC ( i ) );
  2533. port = usb_port ( hub, i );
  2534. if ( ( port->protocol >= USB_PROTO_3_0 ) &&
  2535. ( ( portsc & XHCI_PORTSC_PLS_MASK ) ==
  2536. XHCI_PORTSC_PLS_DISABLED ) ) {
  2537. /* Force link state to RxDetect */
  2538. portsc &= XHCI_PORTSC_PRESERVE;
  2539. portsc |= ( XHCI_PORTSC_PLS_RXDETECT | XHCI_PORTSC_LWS);
  2540. writel ( portsc, xhci->op + XHCI_OP_PORTSC ( i ) );
  2541. }
  2542. }
  2543. /* Some xHCI cards seem to require an additional delay after
  2544. * setting the link state to RxDetect.
  2545. */
  2546. mdelay ( XHCI_LINK_STATE_DELAY_MS );
  2547. /* Record hub driver private data */
  2548. usb_hub_set_drvdata ( hub, xhci );
  2549. return 0;
  2550. }
  2551. /**
  2552. * Close root hub
  2553. *
  2554. * @v hub USB hub
  2555. */
  2556. static void xhci_root_close ( struct usb_hub *hub ) {
  2557. /* Clear hub driver private data */
  2558. usb_hub_set_drvdata ( hub, NULL );
  2559. }
  2560. /**
  2561. * Enable port
  2562. *
  2563. * @v hub USB hub
  2564. * @v port USB port
  2565. * @ret rc Return status code
  2566. */
  2567. static int xhci_root_enable ( struct usb_hub *hub, struct usb_port *port ) {
  2568. struct xhci_device *xhci = usb_hub_get_drvdata ( hub );
  2569. uint32_t portsc;
  2570. unsigned int i;
  2571. /* Reset port */
  2572. portsc = readl ( xhci->op + XHCI_OP_PORTSC ( port->address ) );
  2573. portsc &= XHCI_PORTSC_PRESERVE;
  2574. portsc |= XHCI_PORTSC_PR;
  2575. writel ( portsc, xhci->op + XHCI_OP_PORTSC ( port->address ) );
  2576. /* Wait for port to become enabled */
  2577. for ( i = 0 ; i < XHCI_PORT_RESET_MAX_WAIT_MS ; i++ ) {
  2578. /* Check port status */
  2579. portsc = readl ( xhci->op + XHCI_OP_PORTSC ( port->address ) );
  2580. if ( portsc & XHCI_PORTSC_PED )
  2581. return 0;
  2582. /* Delay */
  2583. mdelay ( 1 );
  2584. }
  2585. DBGC ( xhci, "XHCI %p timed out waiting for port %d to enable\n",
  2586. xhci, port->address );
  2587. return -ETIMEDOUT;
  2588. }
  2589. /**
  2590. * Disable port
  2591. *
  2592. * @v hub USB hub
  2593. * @v port USB port
  2594. * @ret rc Return status code
  2595. */
  2596. static int xhci_root_disable ( struct usb_hub *hub, struct usb_port *port ) {
  2597. struct xhci_device *xhci = usb_hub_get_drvdata ( hub );
  2598. uint32_t portsc;
  2599. /* Disable port */
  2600. portsc = readl ( xhci->op + XHCI_OP_PORTSC ( port->address ) );
  2601. portsc &= XHCI_PORTSC_PRESERVE;
  2602. portsc |= XHCI_PORTSC_PED;
  2603. writel ( portsc, xhci->op + XHCI_OP_PORTSC ( port->address ) );
  2604. return 0;
  2605. }
  2606. /**
  2607. * Update root hub port speed
  2608. *
  2609. * @v hub USB hub
  2610. * @v port USB port
  2611. * @ret rc Return status code
  2612. */
  2613. static int xhci_root_speed ( struct usb_hub *hub, struct usb_port *port ) {
  2614. struct xhci_device *xhci = usb_hub_get_drvdata ( hub );
  2615. uint32_t portsc;
  2616. unsigned int psiv;
  2617. int ccs;
  2618. int ped;
  2619. int csc;
  2620. int speed;
  2621. int rc;
  2622. /* Read port status */
  2623. portsc = readl ( xhci->op + XHCI_OP_PORTSC ( port->address ) );
  2624. DBGC2 ( xhci, "XHCI %p port %d status is %08x\n",
  2625. xhci, port->address, portsc );
  2626. ccs = ( portsc & XHCI_PORTSC_CCS );
  2627. ped = ( portsc & XHCI_PORTSC_PED );
  2628. csc = ( portsc & XHCI_PORTSC_CSC );
  2629. psiv = XHCI_PORTSC_PSIV ( portsc );
  2630. /* Record disconnections and clear changes */
  2631. port->disconnected |= csc;
  2632. portsc &= ( XHCI_PORTSC_PRESERVE | XHCI_PORTSC_CHANGE );
  2633. writel ( portsc, xhci->op + XHCI_OP_PORTSC ( port->address ) );
  2634. /* Port speed is not valid unless port is connected */
  2635. if ( ! ccs ) {
  2636. port->speed = USB_SPEED_NONE;
  2637. return 0;
  2638. }
  2639. /* For USB2 ports, the PSIV field is not valid until the port
  2640. * completes reset and becomes enabled.
  2641. */
  2642. if ( ( port->protocol < USB_PROTO_3_0 ) && ! ped ) {
  2643. port->speed = USB_SPEED_FULL;
  2644. return 0;
  2645. }
  2646. /* Get port speed and map to generic USB speed */
  2647. speed = xhci_port_speed ( xhci, port->address, psiv );
  2648. if ( speed < 0 ) {
  2649. rc = speed;
  2650. return rc;
  2651. }
  2652. port->speed = speed;
  2653. return 0;
  2654. }
  2655. /**
  2656. * Clear transaction translator buffer
  2657. *
  2658. * @v hub USB hub
  2659. * @v port USB port
  2660. * @v ep USB endpoint
  2661. * @ret rc Return status code
  2662. */
  2663. static int xhci_root_clear_tt ( struct usb_hub *hub, struct usb_port *port,
  2664. struct usb_endpoint *ep ) {
  2665. struct ehci_device *ehci = usb_hub_get_drvdata ( hub );
  2666. /* Should never be called; this is a root hub */
  2667. DBGC ( ehci, "XHCI %p port %d nonsensical CLEAR_TT for %s endpoint "
  2668. "%02x\n", ehci, port->address, ep->usb->name, ep->address );
  2669. return -ENOTSUP;
  2670. }
  2671. /******************************************************************************
  2672. *
  2673. * PCI interface
  2674. *
  2675. ******************************************************************************
  2676. */
  2677. /** USB host controller operations */
  2678. static struct usb_host_operations xhci_operations = {
  2679. .endpoint = {
  2680. .open = xhci_endpoint_open,
  2681. .close = xhci_endpoint_close,
  2682. .reset = xhci_endpoint_reset,
  2683. .mtu = xhci_endpoint_mtu,
  2684. .message = xhci_endpoint_message,
  2685. .stream = xhci_endpoint_stream,
  2686. },
  2687. .device = {
  2688. .open = xhci_device_open,
  2689. .close = xhci_device_close,
  2690. .address = xhci_device_address,
  2691. },
  2692. .bus = {
  2693. .open = xhci_bus_open,
  2694. .close = xhci_bus_close,
  2695. .poll = xhci_bus_poll,
  2696. },
  2697. .hub = {
  2698. .open = xhci_hub_open,
  2699. .close = xhci_hub_close,
  2700. },
  2701. .root = {
  2702. .open = xhci_root_open,
  2703. .close = xhci_root_close,
  2704. .enable = xhci_root_enable,
  2705. .disable = xhci_root_disable,
  2706. .speed = xhci_root_speed,
  2707. .clear_tt = xhci_root_clear_tt,
  2708. },
  2709. };
  2710. /**
  2711. * Fix Intel PCH-specific quirks
  2712. *
  2713. * @v xhci xHCI device
  2714. * @v pci PCI device
  2715. */
  2716. static void xhci_pch_fix ( struct xhci_device *xhci, struct pci_device *pci ) {
  2717. struct xhci_pch *pch = &xhci->pch;
  2718. uint32_t xusb2pr;
  2719. uint32_t xusb2prm;
  2720. uint32_t usb3pssen;
  2721. uint32_t usb3prm;
  2722. /* Enable SuperSpeed capability. Do this before rerouting
  2723. * USB2 ports, so that USB3 devices connect at SuperSpeed.
  2724. */
  2725. pci_read_config_dword ( pci, XHCI_PCH_USB3PSSEN, &usb3pssen );
  2726. pci_read_config_dword ( pci, XHCI_PCH_USB3PRM, &usb3prm );
  2727. if ( usb3prm & ~usb3pssen ) {
  2728. DBGC ( xhci, "XHCI %p enabling SuperSpeed on ports %08x\n",
  2729. xhci, ( usb3prm & ~usb3pssen ) );
  2730. }
  2731. pch->usb3pssen = usb3pssen;
  2732. usb3pssen |= usb3prm;
  2733. pci_write_config_dword ( pci, XHCI_PCH_USB3PSSEN, usb3pssen );
  2734. /* Route USB2 ports from EHCI to xHCI */
  2735. pci_read_config_dword ( pci, XHCI_PCH_XUSB2PR, &xusb2pr );
  2736. pci_read_config_dword ( pci, XHCI_PCH_XUSB2PRM, &xusb2prm );
  2737. if ( xusb2prm & ~xusb2pr ) {
  2738. DBGC ( xhci, "XHCI %p routing ports %08x from EHCI to xHCI\n",
  2739. xhci, ( xusb2prm & ~xusb2pr ) );
  2740. }
  2741. pch->xusb2pr = xusb2pr;
  2742. xusb2pr |= xusb2prm;
  2743. pci_write_config_dword ( pci, XHCI_PCH_XUSB2PR, xusb2pr );
  2744. }
  2745. /**
  2746. * Undo Intel PCH-specific quirk fixes
  2747. *
  2748. * @v xhci xHCI device
  2749. * @v pci PCI device
  2750. */
  2751. static void xhci_pch_undo ( struct xhci_device *xhci, struct pci_device *pci ) {
  2752. struct xhci_pch *pch = &xhci->pch;
  2753. /* Restore USB2 port routing to original state */
  2754. pci_write_config_dword ( pci, XHCI_PCH_XUSB2PR, pch->xusb2pr );
  2755. /* Restore SuperSpeed capability to original state */
  2756. pci_write_config_dword ( pci, XHCI_PCH_USB3PSSEN, pch->usb3pssen );
  2757. }
  2758. /**
  2759. * Probe PCI device
  2760. *
  2761. * @v pci PCI device
  2762. * @ret rc Return status code
  2763. */
  2764. static int xhci_probe ( struct pci_device *pci ) {
  2765. struct xhci_device *xhci;
  2766. struct usb_port *port;
  2767. unsigned long bar_start;
  2768. size_t bar_size;
  2769. unsigned int i;
  2770. int rc;
  2771. /* Allocate and initialise structure */
  2772. xhci = zalloc ( sizeof ( *xhci ) );
  2773. if ( ! xhci ) {
  2774. rc = -ENOMEM;
  2775. goto err_alloc;
  2776. }
  2777. /* Fix up PCI device */
  2778. adjust_pci_device ( pci );
  2779. /* Map registers */
  2780. bar_start = pci_bar_start ( pci, XHCI_BAR );
  2781. bar_size = pci_bar_size ( pci, XHCI_BAR );
  2782. xhci->regs = ioremap ( bar_start, bar_size );
  2783. if ( ! xhci->regs ) {
  2784. rc = -ENODEV;
  2785. goto err_ioremap;
  2786. }
  2787. /* Initialise xHCI device */
  2788. xhci_init ( xhci, xhci->regs );
  2789. /* Initialise USB legacy support and claim ownership */
  2790. xhci_legacy_init ( xhci );
  2791. xhci_legacy_claim ( xhci );
  2792. /* Fix Intel PCH-specific quirks, if applicable */
  2793. if ( pci->id->driver_data & XHCI_PCH )
  2794. xhci_pch_fix ( xhci, pci );
  2795. /* Reset device */
  2796. if ( ( rc = xhci_reset ( xhci ) ) != 0 )
  2797. goto err_reset;
  2798. /* Allocate USB bus */
  2799. xhci->bus = alloc_usb_bus ( &pci->dev, xhci->ports, XHCI_MTU,
  2800. &xhci_operations );
  2801. if ( ! xhci->bus ) {
  2802. rc = -ENOMEM;
  2803. goto err_alloc_bus;
  2804. }
  2805. usb_bus_set_hostdata ( xhci->bus, xhci );
  2806. usb_hub_set_drvdata ( xhci->bus->hub, xhci );
  2807. /* Set port protocols */
  2808. for ( i = 1 ; i <= xhci->ports ; i++ ) {
  2809. port = usb_port ( xhci->bus->hub, i );
  2810. port->protocol = xhci_port_protocol ( xhci, i );
  2811. }
  2812. /* Register USB bus */
  2813. if ( ( rc = register_usb_bus ( xhci->bus ) ) != 0 )
  2814. goto err_register;
  2815. pci_set_drvdata ( pci, xhci );
  2816. return 0;
  2817. unregister_usb_bus ( xhci->bus );
  2818. err_register:
  2819. free_usb_bus ( xhci->bus );
  2820. err_alloc_bus:
  2821. xhci_reset ( xhci );
  2822. err_reset:
  2823. if ( pci->id->driver_data & XHCI_PCH )
  2824. xhci_pch_undo ( xhci, pci );
  2825. xhci_legacy_release ( xhci );
  2826. iounmap ( xhci->regs );
  2827. err_ioremap:
  2828. free ( xhci );
  2829. err_alloc:
  2830. return rc;
  2831. }
  2832. /**
  2833. * Remove PCI device
  2834. *
  2835. * @v pci PCI device
  2836. */
  2837. static void xhci_remove ( struct pci_device *pci ) {
  2838. struct xhci_device *xhci = pci_get_drvdata ( pci );
  2839. struct usb_bus *bus = xhci->bus;
  2840. unregister_usb_bus ( bus );
  2841. free_usb_bus ( bus );
  2842. xhci_reset ( xhci );
  2843. if ( pci->id->driver_data & XHCI_PCH )
  2844. xhci_pch_undo ( xhci, pci );
  2845. xhci_legacy_release ( xhci );
  2846. iounmap ( xhci->regs );
  2847. free ( xhci );
  2848. }
  2849. /** XHCI PCI device IDs */
  2850. static struct pci_device_id xhci_ids[] = {
  2851. PCI_ROM ( 0x8086, 0xffff, "xhci-pch", "xHCI (Intel PCH)", XHCI_PCH ),
  2852. PCI_ROM ( 0xffff, 0xffff, "xhci", "xHCI", 0 ),
  2853. };
  2854. /** XHCI PCI driver */
  2855. struct pci_driver xhci_driver __pci_driver = {
  2856. .ids = xhci_ids,
  2857. .id_count = ( sizeof ( xhci_ids ) / sizeof ( xhci_ids[0] ) ),
  2858. .class = PCI_CLASS_ID ( PCI_CLASS_SERIAL, PCI_CLASS_SERIAL_USB,
  2859. PCI_CLASS_SERIAL_USB_XHCI ),
  2860. .probe = xhci_probe,
  2861. .remove = xhci_remove,
  2862. };
  2863. /**
  2864. * Prepare for exit
  2865. *
  2866. * @v booting System is shutting down for OS boot
  2867. */
  2868. static void xhci_shutdown ( int booting ) {
  2869. /* If we are shutting down to boot an OS, then prevent the
  2870. * release of ownership back to BIOS.
  2871. */
  2872. xhci_legacy_prevent_release = booting;
  2873. }
  2874. /** Startup/shutdown function */
  2875. struct startup_fn xhci_startup __startup_fn ( STARTUP_LATE ) = {
  2876. .shutdown = xhci_shutdown,
  2877. };