You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

arbel.c 92KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096
  1. /*
  2. * Copyright (C) 2007 Michael Brown <mbrown@fensystems.co.uk>.
  3. *
  4. * Based in part upon the original driver by Mellanox Technologies
  5. * Ltd. Portions may be Copyright (c) Mellanox Technologies Ltd.
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License as
  9. * published by the Free Software Foundation; either version 2 of the
  10. * License, or any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  20. * 02110-1301, USA.
  21. */
  22. FILE_LICENCE ( GPL2_OR_LATER );
  23. #include <stdint.h>
  24. #include <stdlib.h>
  25. #include <stdio.h>
  26. #include <string.h>
  27. #include <strings.h>
  28. #include <unistd.h>
  29. #include <errno.h>
  30. #include <byteswap.h>
  31. #include <ipxe/io.h>
  32. #include <ipxe/pci.h>
  33. #include <ipxe/pcibackup.h>
  34. #include <ipxe/malloc.h>
  35. #include <ipxe/umalloc.h>
  36. #include <ipxe/iobuf.h>
  37. #include <ipxe/netdevice.h>
  38. #include <ipxe/infiniband.h>
  39. #include <ipxe/ib_smc.h>
  40. #include "arbel.h"
  41. /**
  42. * @file
  43. *
  44. * Mellanox Arbel Infiniband HCA
  45. *
  46. */
  47. /***************************************************************************
  48. *
  49. * Queue number allocation
  50. *
  51. ***************************************************************************
  52. */
  53. /**
  54. * Allocate offset within usage bitmask
  55. *
  56. * @v bits Usage bitmask
  57. * @v bits_len Length of usage bitmask
  58. * @ret bit First free bit within bitmask, or negative error
  59. */
  60. static int arbel_bitmask_alloc ( arbel_bitmask_t *bits,
  61. unsigned int bits_len ) {
  62. unsigned int bit = 0;
  63. arbel_bitmask_t mask = 1;
  64. while ( bit < bits_len ) {
  65. if ( ( mask & *bits ) == 0 ) {
  66. *bits |= mask;
  67. return bit;
  68. }
  69. bit++;
  70. mask = ( mask << 1 ) | ( mask >> ( 8 * sizeof ( mask ) - 1 ) );
  71. if ( mask == 1 )
  72. bits++;
  73. }
  74. return -ENFILE;
  75. }
  76. /**
  77. * Free offset within usage bitmask
  78. *
  79. * @v bits Usage bitmask
  80. * @v bit Bit within bitmask
  81. */
  82. static void arbel_bitmask_free ( arbel_bitmask_t *bits, int bit ) {
  83. arbel_bitmask_t mask;
  84. mask = ( 1 << ( bit % ( 8 * sizeof ( mask ) ) ) );
  85. bits += ( bit / ( 8 * sizeof ( mask ) ) );
  86. *bits &= ~mask;
  87. }
  88. /***************************************************************************
  89. *
  90. * HCA commands
  91. *
  92. ***************************************************************************
  93. */
  94. /**
  95. * Wait for Arbel command completion
  96. *
  97. * @v arbel Arbel device
  98. * @ret rc Return status code
  99. */
  100. static int arbel_cmd_wait ( struct arbel *arbel,
  101. struct arbelprm_hca_command_register *hcr ) {
  102. unsigned int wait;
  103. for ( wait = ARBEL_HCR_MAX_WAIT_MS ; wait ; wait-- ) {
  104. hcr->u.dwords[6] =
  105. readl ( arbel->config + ARBEL_HCR_REG ( 6 ) );
  106. if ( MLX_GET ( hcr, go ) == 0 )
  107. return 0;
  108. mdelay ( 1 );
  109. }
  110. return -EBUSY;
  111. }
  112. /**
  113. * Issue HCA command
  114. *
  115. * @v arbel Arbel device
  116. * @v command Command opcode, flags and input/output lengths
  117. * @v op_mod Opcode modifier (0 if no modifier applicable)
  118. * @v in Input parameters
  119. * @v in_mod Input modifier (0 if no modifier applicable)
  120. * @v out Output parameters
  121. * @ret rc Return status code
  122. */
  123. static int arbel_cmd ( struct arbel *arbel, unsigned long command,
  124. unsigned int op_mod, const void *in,
  125. unsigned int in_mod, void *out ) {
  126. struct arbelprm_hca_command_register hcr;
  127. unsigned int opcode = ARBEL_HCR_OPCODE ( command );
  128. size_t in_len = ARBEL_HCR_IN_LEN ( command );
  129. size_t out_len = ARBEL_HCR_OUT_LEN ( command );
  130. void *in_buffer;
  131. void *out_buffer;
  132. unsigned int status;
  133. unsigned int i;
  134. int rc;
  135. assert ( in_len <= ARBEL_MBOX_SIZE );
  136. assert ( out_len <= ARBEL_MBOX_SIZE );
  137. DBGC2 ( arbel, "Arbel %p command %02x in %zx%s out %zx%s\n",
  138. arbel, opcode, in_len,
  139. ( ( command & ARBEL_HCR_IN_MBOX ) ? "(mbox)" : "" ), out_len,
  140. ( ( command & ARBEL_HCR_OUT_MBOX ) ? "(mbox)" : "" ) );
  141. /* Check that HCR is free */
  142. if ( ( rc = arbel_cmd_wait ( arbel, &hcr ) ) != 0 ) {
  143. DBGC ( arbel, "Arbel %p command interface locked\n", arbel );
  144. return rc;
  145. }
  146. /* Prepare HCR */
  147. memset ( &hcr, 0, sizeof ( hcr ) );
  148. in_buffer = &hcr.u.dwords[0];
  149. if ( in_len && ( command & ARBEL_HCR_IN_MBOX ) ) {
  150. in_buffer = arbel->mailbox_in;
  151. MLX_FILL_H ( &hcr, 0, in_param_h, virt_to_bus ( in_buffer ) );
  152. MLX_FILL_1 ( &hcr, 1, in_param_l, virt_to_bus ( in_buffer ) );
  153. }
  154. memcpy ( in_buffer, in, in_len );
  155. MLX_FILL_1 ( &hcr, 2, input_modifier, in_mod );
  156. out_buffer = &hcr.u.dwords[3];
  157. if ( out_len && ( command & ARBEL_HCR_OUT_MBOX ) ) {
  158. out_buffer = arbel->mailbox_out;
  159. MLX_FILL_H ( &hcr, 3, out_param_h,
  160. virt_to_bus ( out_buffer ) );
  161. MLX_FILL_1 ( &hcr, 4, out_param_l,
  162. virt_to_bus ( out_buffer ) );
  163. }
  164. MLX_FILL_3 ( &hcr, 6,
  165. opcode, opcode,
  166. opcode_modifier, op_mod,
  167. go, 1 );
  168. DBGC ( arbel, "Arbel %p issuing command %04x\n", arbel, opcode );
  169. DBGC2_HDA ( arbel, virt_to_phys ( arbel->config + ARBEL_HCR_BASE ),
  170. &hcr, sizeof ( hcr ) );
  171. if ( in_len && ( command & ARBEL_HCR_IN_MBOX ) ) {
  172. DBGC2 ( arbel, "Input mailbox:\n" );
  173. DBGC2_HDA ( arbel, virt_to_phys ( in_buffer ), in_buffer,
  174. ( ( in_len < 512 ) ? in_len : 512 ) );
  175. }
  176. /* Issue command */
  177. for ( i = 0 ; i < ( sizeof ( hcr ) / sizeof ( hcr.u.dwords[0] ) ) ;
  178. i++ ) {
  179. writel ( hcr.u.dwords[i],
  180. arbel->config + ARBEL_HCR_REG ( i ) );
  181. barrier();
  182. }
  183. /* Wait for command completion */
  184. if ( ( rc = arbel_cmd_wait ( arbel, &hcr ) ) != 0 ) {
  185. DBGC ( arbel, "Arbel %p timed out waiting for command:\n",
  186. arbel );
  187. DBGC_HD ( arbel, &hcr, sizeof ( hcr ) );
  188. return rc;
  189. }
  190. /* Check command status */
  191. status = MLX_GET ( &hcr, status );
  192. if ( status != 0 ) {
  193. DBGC ( arbel, "Arbel %p command failed with status %02x:\n",
  194. arbel, status );
  195. DBGC_HD ( arbel, &hcr, sizeof ( hcr ) );
  196. return -EIO;
  197. }
  198. /* Read output parameters, if any */
  199. hcr.u.dwords[3] = readl ( arbel->config + ARBEL_HCR_REG ( 3 ) );
  200. hcr.u.dwords[4] = readl ( arbel->config + ARBEL_HCR_REG ( 4 ) );
  201. memcpy ( out, out_buffer, out_len );
  202. if ( out_len ) {
  203. DBGC2 ( arbel, "Output%s:\n",
  204. ( command & ARBEL_HCR_OUT_MBOX ) ? " mailbox" : "" );
  205. DBGC2_HDA ( arbel, virt_to_phys ( out_buffer ), out_buffer,
  206. ( ( out_len < 512 ) ? out_len : 512 ) );
  207. }
  208. return 0;
  209. }
  210. static inline int
  211. arbel_cmd_query_dev_lim ( struct arbel *arbel,
  212. struct arbelprm_query_dev_lim *dev_lim ) {
  213. return arbel_cmd ( arbel,
  214. ARBEL_HCR_OUT_CMD ( ARBEL_HCR_QUERY_DEV_LIM,
  215. 1, sizeof ( *dev_lim ) ),
  216. 0, NULL, 0, dev_lim );
  217. }
  218. static inline int
  219. arbel_cmd_query_fw ( struct arbel *arbel, struct arbelprm_query_fw *fw ) {
  220. return arbel_cmd ( arbel,
  221. ARBEL_HCR_OUT_CMD ( ARBEL_HCR_QUERY_FW,
  222. 1, sizeof ( *fw ) ),
  223. 0, NULL, 0, fw );
  224. }
  225. static inline int
  226. arbel_cmd_init_hca ( struct arbel *arbel,
  227. const struct arbelprm_init_hca *init_hca ) {
  228. return arbel_cmd ( arbel,
  229. ARBEL_HCR_IN_CMD ( ARBEL_HCR_INIT_HCA,
  230. 1, sizeof ( *init_hca ) ),
  231. 0, init_hca, 0, NULL );
  232. }
  233. static inline int
  234. arbel_cmd_close_hca ( struct arbel *arbel ) {
  235. return arbel_cmd ( arbel,
  236. ARBEL_HCR_VOID_CMD ( ARBEL_HCR_CLOSE_HCA ),
  237. 0, NULL, 0, NULL );
  238. }
  239. static inline int
  240. arbel_cmd_init_ib ( struct arbel *arbel, unsigned int port,
  241. const struct arbelprm_init_ib *init_ib ) {
  242. return arbel_cmd ( arbel,
  243. ARBEL_HCR_IN_CMD ( ARBEL_HCR_INIT_IB,
  244. 1, sizeof ( *init_ib ) ),
  245. 0, init_ib, port, NULL );
  246. }
  247. static inline int
  248. arbel_cmd_close_ib ( struct arbel *arbel, unsigned int port ) {
  249. return arbel_cmd ( arbel,
  250. ARBEL_HCR_VOID_CMD ( ARBEL_HCR_CLOSE_IB ),
  251. 0, NULL, port, NULL );
  252. }
  253. static inline int
  254. arbel_cmd_sw2hw_mpt ( struct arbel *arbel, unsigned int index,
  255. const struct arbelprm_mpt *mpt ) {
  256. return arbel_cmd ( arbel,
  257. ARBEL_HCR_IN_CMD ( ARBEL_HCR_SW2HW_MPT,
  258. 1, sizeof ( *mpt ) ),
  259. 0, mpt, index, NULL );
  260. }
  261. static inline int
  262. arbel_cmd_map_eq ( struct arbel *arbel, unsigned long index_map,
  263. const struct arbelprm_event_mask *mask ) {
  264. return arbel_cmd ( arbel,
  265. ARBEL_HCR_IN_CMD ( ARBEL_HCR_MAP_EQ,
  266. 0, sizeof ( *mask ) ),
  267. 0, mask, index_map, NULL );
  268. }
  269. static inline int
  270. arbel_cmd_sw2hw_eq ( struct arbel *arbel, unsigned int index,
  271. const struct arbelprm_eqc *eqctx ) {
  272. return arbel_cmd ( arbel,
  273. ARBEL_HCR_IN_CMD ( ARBEL_HCR_SW2HW_EQ,
  274. 1, sizeof ( *eqctx ) ),
  275. 0, eqctx, index, NULL );
  276. }
  277. static inline int
  278. arbel_cmd_hw2sw_eq ( struct arbel *arbel, unsigned int index,
  279. struct arbelprm_eqc *eqctx ) {
  280. return arbel_cmd ( arbel,
  281. ARBEL_HCR_OUT_CMD ( ARBEL_HCR_HW2SW_EQ,
  282. 1, sizeof ( *eqctx ) ),
  283. 1, NULL, index, eqctx );
  284. }
  285. static inline int
  286. arbel_cmd_sw2hw_cq ( struct arbel *arbel, unsigned long cqn,
  287. const struct arbelprm_completion_queue_context *cqctx ) {
  288. return arbel_cmd ( arbel,
  289. ARBEL_HCR_IN_CMD ( ARBEL_HCR_SW2HW_CQ,
  290. 1, sizeof ( *cqctx ) ),
  291. 0, cqctx, cqn, NULL );
  292. }
  293. static inline int
  294. arbel_cmd_hw2sw_cq ( struct arbel *arbel, unsigned long cqn,
  295. struct arbelprm_completion_queue_context *cqctx) {
  296. return arbel_cmd ( arbel,
  297. ARBEL_HCR_OUT_CMD ( ARBEL_HCR_HW2SW_CQ,
  298. 1, sizeof ( *cqctx ) ),
  299. 0, NULL, cqn, cqctx );
  300. }
  301. static inline int
  302. arbel_cmd_query_cq ( struct arbel *arbel, unsigned long cqn,
  303. struct arbelprm_completion_queue_context *cqctx ) {
  304. return arbel_cmd ( arbel,
  305. ARBEL_HCR_OUT_CMD ( ARBEL_HCR_QUERY_CQ,
  306. 1, sizeof ( *cqctx ) ),
  307. 0, NULL, cqn, cqctx );
  308. }
  309. static inline int
  310. arbel_cmd_rst2init_qpee ( struct arbel *arbel, unsigned long qpn,
  311. const struct arbelprm_qp_ee_state_transitions *ctx ){
  312. return arbel_cmd ( arbel,
  313. ARBEL_HCR_IN_CMD ( ARBEL_HCR_RST2INIT_QPEE,
  314. 1, sizeof ( *ctx ) ),
  315. 0, ctx, qpn, NULL );
  316. }
  317. static inline int
  318. arbel_cmd_init2rtr_qpee ( struct arbel *arbel, unsigned long qpn,
  319. const struct arbelprm_qp_ee_state_transitions *ctx ){
  320. return arbel_cmd ( arbel,
  321. ARBEL_HCR_IN_CMD ( ARBEL_HCR_INIT2RTR_QPEE,
  322. 1, sizeof ( *ctx ) ),
  323. 0, ctx, qpn, NULL );
  324. }
  325. static inline int
  326. arbel_cmd_rtr2rts_qpee ( struct arbel *arbel, unsigned long qpn,
  327. const struct arbelprm_qp_ee_state_transitions *ctx ) {
  328. return arbel_cmd ( arbel,
  329. ARBEL_HCR_IN_CMD ( ARBEL_HCR_RTR2RTS_QPEE,
  330. 1, sizeof ( *ctx ) ),
  331. 0, ctx, qpn, NULL );
  332. }
  333. static inline int
  334. arbel_cmd_rts2rts_qpee ( struct arbel *arbel, unsigned long qpn,
  335. const struct arbelprm_qp_ee_state_transitions *ctx ) {
  336. return arbel_cmd ( arbel,
  337. ARBEL_HCR_IN_CMD ( ARBEL_HCR_RTS2RTS_QPEE,
  338. 1, sizeof ( *ctx ) ),
  339. 0, ctx, qpn, NULL );
  340. }
  341. static inline int
  342. arbel_cmd_2rst_qpee ( struct arbel *arbel, unsigned long qpn ) {
  343. return arbel_cmd ( arbel,
  344. ARBEL_HCR_VOID_CMD ( ARBEL_HCR_2RST_QPEE ),
  345. 0x03, NULL, qpn, NULL );
  346. }
  347. static inline int
  348. arbel_cmd_query_qpee ( struct arbel *arbel, unsigned long qpn,
  349. struct arbelprm_qp_ee_state_transitions *ctx ) {
  350. return arbel_cmd ( arbel,
  351. ARBEL_HCR_OUT_CMD ( ARBEL_HCR_QUERY_QPEE,
  352. 1, sizeof ( *ctx ) ),
  353. 0, NULL, qpn, ctx );
  354. }
  355. static inline int
  356. arbel_cmd_conf_special_qp ( struct arbel *arbel, unsigned int qp_type,
  357. unsigned long base_qpn ) {
  358. return arbel_cmd ( arbel,
  359. ARBEL_HCR_VOID_CMD ( ARBEL_HCR_CONF_SPECIAL_QP ),
  360. qp_type, NULL, base_qpn, NULL );
  361. }
  362. static inline int
  363. arbel_cmd_mad_ifc ( struct arbel *arbel, unsigned int port,
  364. union arbelprm_mad *mad ) {
  365. return arbel_cmd ( arbel,
  366. ARBEL_HCR_INOUT_CMD ( ARBEL_HCR_MAD_IFC,
  367. 1, sizeof ( *mad ),
  368. 1, sizeof ( *mad ) ),
  369. 0x03, mad, port, mad );
  370. }
  371. static inline int
  372. arbel_cmd_read_mgm ( struct arbel *arbel, unsigned int index,
  373. struct arbelprm_mgm_entry *mgm ) {
  374. return arbel_cmd ( arbel,
  375. ARBEL_HCR_OUT_CMD ( ARBEL_HCR_READ_MGM,
  376. 1, sizeof ( *mgm ) ),
  377. 0, NULL, index, mgm );
  378. }
  379. static inline int
  380. arbel_cmd_write_mgm ( struct arbel *arbel, unsigned int index,
  381. const struct arbelprm_mgm_entry *mgm ) {
  382. return arbel_cmd ( arbel,
  383. ARBEL_HCR_IN_CMD ( ARBEL_HCR_WRITE_MGM,
  384. 1, sizeof ( *mgm ) ),
  385. 0, mgm, index, NULL );
  386. }
  387. static inline int
  388. arbel_cmd_mgid_hash ( struct arbel *arbel, const union ib_gid *gid,
  389. struct arbelprm_mgm_hash *hash ) {
  390. return arbel_cmd ( arbel,
  391. ARBEL_HCR_INOUT_CMD ( ARBEL_HCR_MGID_HASH,
  392. 1, sizeof ( *gid ),
  393. 0, sizeof ( *hash ) ),
  394. 0, gid, 0, hash );
  395. }
  396. static inline int
  397. arbel_cmd_run_fw ( struct arbel *arbel ) {
  398. return arbel_cmd ( arbel,
  399. ARBEL_HCR_VOID_CMD ( ARBEL_HCR_RUN_FW ),
  400. 0, NULL, 0, NULL );
  401. }
  402. static inline int
  403. arbel_cmd_disable_lam ( struct arbel *arbel ) {
  404. return arbel_cmd ( arbel,
  405. ARBEL_HCR_VOID_CMD ( ARBEL_HCR_DISABLE_LAM ),
  406. 0, NULL, 0, NULL );
  407. }
  408. static inline int
  409. arbel_cmd_enable_lam ( struct arbel *arbel, struct arbelprm_access_lam *lam ) {
  410. return arbel_cmd ( arbel,
  411. ARBEL_HCR_OUT_CMD ( ARBEL_HCR_ENABLE_LAM,
  412. 1, sizeof ( *lam ) ),
  413. 1, NULL, 0, lam );
  414. }
  415. static inline int
  416. arbel_cmd_unmap_icm ( struct arbel *arbel, unsigned int page_count,
  417. const struct arbelprm_scalar_parameter *offset ) {
  418. return arbel_cmd ( arbel,
  419. ARBEL_HCR_IN_CMD ( ARBEL_HCR_UNMAP_ICM, 0,
  420. sizeof ( *offset ) ),
  421. 0, offset, page_count, NULL );
  422. }
  423. static inline int
  424. arbel_cmd_map_icm ( struct arbel *arbel,
  425. const struct arbelprm_virtual_physical_mapping *map ) {
  426. return arbel_cmd ( arbel,
  427. ARBEL_HCR_IN_CMD ( ARBEL_HCR_MAP_ICM,
  428. 1, sizeof ( *map ) ),
  429. 0, map, 1, NULL );
  430. }
  431. static inline int
  432. arbel_cmd_unmap_icm_aux ( struct arbel *arbel ) {
  433. return arbel_cmd ( arbel,
  434. ARBEL_HCR_VOID_CMD ( ARBEL_HCR_UNMAP_ICM_AUX ),
  435. 0, NULL, 0, NULL );
  436. }
  437. static inline int
  438. arbel_cmd_map_icm_aux ( struct arbel *arbel,
  439. const struct arbelprm_virtual_physical_mapping *map ) {
  440. return arbel_cmd ( arbel,
  441. ARBEL_HCR_IN_CMD ( ARBEL_HCR_MAP_ICM_AUX,
  442. 1, sizeof ( *map ) ),
  443. 0, map, 1, NULL );
  444. }
  445. static inline int
  446. arbel_cmd_set_icm_size ( struct arbel *arbel,
  447. const struct arbelprm_scalar_parameter *icm_size,
  448. struct arbelprm_scalar_parameter *icm_aux_size ) {
  449. return arbel_cmd ( arbel,
  450. ARBEL_HCR_INOUT_CMD ( ARBEL_HCR_SET_ICM_SIZE,
  451. 0, sizeof ( *icm_size ),
  452. 0, sizeof ( *icm_aux_size ) ),
  453. 0, icm_size, 0, icm_aux_size );
  454. }
  455. static inline int
  456. arbel_cmd_unmap_fa ( struct arbel *arbel ) {
  457. return arbel_cmd ( arbel,
  458. ARBEL_HCR_VOID_CMD ( ARBEL_HCR_UNMAP_FA ),
  459. 0, NULL, 0, NULL );
  460. }
  461. static inline int
  462. arbel_cmd_map_fa ( struct arbel *arbel,
  463. const struct arbelprm_virtual_physical_mapping *map ) {
  464. return arbel_cmd ( arbel,
  465. ARBEL_HCR_IN_CMD ( ARBEL_HCR_MAP_FA,
  466. 1, sizeof ( *map ) ),
  467. 0, map, 1, NULL );
  468. }
  469. /***************************************************************************
  470. *
  471. * MAD operations
  472. *
  473. ***************************************************************************
  474. */
  475. /**
  476. * Issue management datagram
  477. *
  478. * @v ibdev Infiniband device
  479. * @v mad Management datagram
  480. * @ret rc Return status code
  481. */
  482. static int arbel_mad ( struct ib_device *ibdev, union ib_mad *mad ) {
  483. struct arbel *arbel = ib_get_drvdata ( ibdev );
  484. union arbelprm_mad mad_ifc;
  485. int rc;
  486. linker_assert ( sizeof ( *mad ) == sizeof ( mad_ifc.mad ),
  487. mad_size_mismatch );
  488. /* Copy in request packet */
  489. memcpy ( &mad_ifc.mad, mad, sizeof ( mad_ifc.mad ) );
  490. /* Issue MAD */
  491. if ( ( rc = arbel_cmd_mad_ifc ( arbel, ibdev->port,
  492. &mad_ifc ) ) != 0 ) {
  493. DBGC ( arbel, "Arbel %p port %d could not issue MAD IFC: %s\n",
  494. arbel, ibdev->port, strerror ( rc ) );
  495. return rc;
  496. }
  497. /* Copy out reply packet */
  498. memcpy ( mad, &mad_ifc.mad, sizeof ( *mad ) );
  499. if ( mad->hdr.status != 0 ) {
  500. DBGC ( arbel, "Arbel %p port %d MAD IFC status %04x\n",
  501. arbel, ibdev->port, ntohs ( mad->hdr.status ) );
  502. return -EIO;
  503. }
  504. return 0;
  505. }
  506. /***************************************************************************
  507. *
  508. * Completion queue operations
  509. *
  510. ***************************************************************************
  511. */
  512. /**
  513. * Dump completion queue context (for debugging only)
  514. *
  515. * @v arbel Arbel device
  516. * @v cq Completion queue
  517. * @ret rc Return status code
  518. */
  519. static __attribute__ (( unused )) int
  520. arbel_dump_cqctx ( struct arbel *arbel, struct ib_completion_queue *cq ) {
  521. struct arbelprm_completion_queue_context cqctx;
  522. int rc;
  523. memset ( &cqctx, 0, sizeof ( cqctx ) );
  524. if ( ( rc = arbel_cmd_query_cq ( arbel, cq->cqn, &cqctx ) ) != 0 ) {
  525. DBGC ( arbel, "Arbel %p CQN %#lx QUERY_CQ failed: %s\n",
  526. arbel, cq->cqn, strerror ( rc ) );
  527. return rc;
  528. }
  529. DBGC ( arbel, "Arbel %p CQN %#lx context:\n", arbel, cq->cqn );
  530. DBGC_HDA ( arbel, 0, &cqctx, sizeof ( cqctx ) );
  531. return 0;
  532. }
  533. /**
  534. * Create completion queue
  535. *
  536. * @v ibdev Infiniband device
  537. * @v cq Completion queue
  538. * @ret rc Return status code
  539. */
  540. static int arbel_create_cq ( struct ib_device *ibdev,
  541. struct ib_completion_queue *cq ) {
  542. struct arbel *arbel = ib_get_drvdata ( ibdev );
  543. struct arbel_completion_queue *arbel_cq;
  544. struct arbelprm_completion_queue_context cqctx;
  545. struct arbelprm_cq_ci_db_record *ci_db_rec;
  546. struct arbelprm_cq_arm_db_record *arm_db_rec;
  547. int cqn_offset;
  548. unsigned int i;
  549. int rc;
  550. /* Find a free completion queue number */
  551. cqn_offset = arbel_bitmask_alloc ( arbel->cq_inuse, ARBEL_MAX_CQS );
  552. if ( cqn_offset < 0 ) {
  553. DBGC ( arbel, "Arbel %p out of completion queues\n", arbel );
  554. rc = cqn_offset;
  555. goto err_cqn_offset;
  556. }
  557. cq->cqn = ( arbel->limits.reserved_cqs + cqn_offset );
  558. /* Allocate control structures */
  559. arbel_cq = zalloc ( sizeof ( *arbel_cq ) );
  560. if ( ! arbel_cq ) {
  561. rc = -ENOMEM;
  562. goto err_arbel_cq;
  563. }
  564. arbel_cq->ci_doorbell_idx = arbel_cq_ci_doorbell_idx ( arbel, cq );
  565. arbel_cq->arm_doorbell_idx = arbel_cq_arm_doorbell_idx ( arbel, cq );
  566. /* Allocate completion queue itself */
  567. arbel_cq->cqe_size = ( cq->num_cqes * sizeof ( arbel_cq->cqe[0] ) );
  568. arbel_cq->cqe = malloc_dma ( arbel_cq->cqe_size,
  569. sizeof ( arbel_cq->cqe[0] ) );
  570. if ( ! arbel_cq->cqe ) {
  571. rc = -ENOMEM;
  572. goto err_cqe;
  573. }
  574. memset ( arbel_cq->cqe, 0, arbel_cq->cqe_size );
  575. for ( i = 0 ; i < cq->num_cqes ; i++ ) {
  576. MLX_FILL_1 ( &arbel_cq->cqe[i].normal, 7, owner, 1 );
  577. }
  578. barrier();
  579. /* Initialise doorbell records */
  580. ci_db_rec = &arbel->db_rec[arbel_cq->ci_doorbell_idx].cq_ci;
  581. MLX_FILL_1 ( ci_db_rec, 0, counter, 0 );
  582. MLX_FILL_2 ( ci_db_rec, 1,
  583. res, ARBEL_UAR_RES_CQ_CI,
  584. cq_number, cq->cqn );
  585. arm_db_rec = &arbel->db_rec[arbel_cq->arm_doorbell_idx].cq_arm;
  586. MLX_FILL_1 ( arm_db_rec, 0, counter, 0 );
  587. MLX_FILL_2 ( arm_db_rec, 1,
  588. res, ARBEL_UAR_RES_CQ_ARM,
  589. cq_number, cq->cqn );
  590. /* Hand queue over to hardware */
  591. memset ( &cqctx, 0, sizeof ( cqctx ) );
  592. MLX_FILL_1 ( &cqctx, 0, st, 0xa /* "Event fired" */ );
  593. MLX_FILL_H ( &cqctx, 1, start_address_h,
  594. virt_to_bus ( arbel_cq->cqe ) );
  595. MLX_FILL_1 ( &cqctx, 2, start_address_l,
  596. virt_to_bus ( arbel_cq->cqe ) );
  597. MLX_FILL_2 ( &cqctx, 3,
  598. usr_page, arbel->limits.reserved_uars,
  599. log_cq_size, fls ( cq->num_cqes - 1 ) );
  600. MLX_FILL_1 ( &cqctx, 5, c_eqn, arbel->eq.eqn );
  601. MLX_FILL_1 ( &cqctx, 6, pd, ARBEL_GLOBAL_PD );
  602. MLX_FILL_1 ( &cqctx, 7, l_key, arbel->lkey );
  603. MLX_FILL_1 ( &cqctx, 12, cqn, cq->cqn );
  604. MLX_FILL_1 ( &cqctx, 13,
  605. cq_ci_db_record, arbel_cq->ci_doorbell_idx );
  606. MLX_FILL_1 ( &cqctx, 14,
  607. cq_state_db_record, arbel_cq->arm_doorbell_idx );
  608. if ( ( rc = arbel_cmd_sw2hw_cq ( arbel, cq->cqn, &cqctx ) ) != 0 ) {
  609. DBGC ( arbel, "Arbel %p CQN %#lx SW2HW_CQ failed: %s\n",
  610. arbel, cq->cqn, strerror ( rc ) );
  611. goto err_sw2hw_cq;
  612. }
  613. DBGC ( arbel, "Arbel %p CQN %#lx ring [%08lx,%08lx), doorbell %08lx\n",
  614. arbel, cq->cqn, virt_to_phys ( arbel_cq->cqe ),
  615. ( virt_to_phys ( arbel_cq->cqe ) + arbel_cq->cqe_size ),
  616. virt_to_phys ( ci_db_rec ) );
  617. ib_cq_set_drvdata ( cq, arbel_cq );
  618. return 0;
  619. err_sw2hw_cq:
  620. MLX_FILL_1 ( ci_db_rec, 1, res, ARBEL_UAR_RES_NONE );
  621. MLX_FILL_1 ( arm_db_rec, 1, res, ARBEL_UAR_RES_NONE );
  622. free_dma ( arbel_cq->cqe, arbel_cq->cqe_size );
  623. err_cqe:
  624. free ( arbel_cq );
  625. err_arbel_cq:
  626. arbel_bitmask_free ( arbel->cq_inuse, cqn_offset );
  627. err_cqn_offset:
  628. return rc;
  629. }
  630. /**
  631. * Destroy completion queue
  632. *
  633. * @v ibdev Infiniband device
  634. * @v cq Completion queue
  635. */
  636. static void arbel_destroy_cq ( struct ib_device *ibdev,
  637. struct ib_completion_queue *cq ) {
  638. struct arbel *arbel = ib_get_drvdata ( ibdev );
  639. struct arbel_completion_queue *arbel_cq = ib_cq_get_drvdata ( cq );
  640. struct arbelprm_completion_queue_context cqctx;
  641. struct arbelprm_cq_ci_db_record *ci_db_rec;
  642. struct arbelprm_cq_arm_db_record *arm_db_rec;
  643. int cqn_offset;
  644. int rc;
  645. /* Take ownership back from hardware */
  646. if ( ( rc = arbel_cmd_hw2sw_cq ( arbel, cq->cqn, &cqctx ) ) != 0 ) {
  647. DBGC ( arbel, "Arbel %p CQN %#lx FATAL HW2SW_CQ failed: "
  648. "%s\n", arbel, cq->cqn, strerror ( rc ) );
  649. /* Leak memory and return; at least we avoid corruption */
  650. return;
  651. }
  652. /* Clear doorbell records */
  653. ci_db_rec = &arbel->db_rec[arbel_cq->ci_doorbell_idx].cq_ci;
  654. arm_db_rec = &arbel->db_rec[arbel_cq->arm_doorbell_idx].cq_arm;
  655. MLX_FILL_1 ( ci_db_rec, 1, res, ARBEL_UAR_RES_NONE );
  656. MLX_FILL_1 ( arm_db_rec, 1, res, ARBEL_UAR_RES_NONE );
  657. /* Free memory */
  658. free_dma ( arbel_cq->cqe, arbel_cq->cqe_size );
  659. free ( arbel_cq );
  660. /* Mark queue number as free */
  661. cqn_offset = ( cq->cqn - arbel->limits.reserved_cqs );
  662. arbel_bitmask_free ( arbel->cq_inuse, cqn_offset );
  663. ib_cq_set_drvdata ( cq, NULL );
  664. }
  665. /***************************************************************************
  666. *
  667. * Queue pair operations
  668. *
  669. ***************************************************************************
  670. */
  671. /**
  672. * Assign queue pair number
  673. *
  674. * @v ibdev Infiniband device
  675. * @v qp Queue pair
  676. * @ret rc Return status code
  677. */
  678. static int arbel_alloc_qpn ( struct ib_device *ibdev,
  679. struct ib_queue_pair *qp ) {
  680. struct arbel *arbel = ib_get_drvdata ( ibdev );
  681. unsigned int port_offset;
  682. int qpn_offset;
  683. /* Calculate queue pair number */
  684. port_offset = ( ibdev->port - ARBEL_PORT_BASE );
  685. switch ( qp->type ) {
  686. case IB_QPT_SMI:
  687. qp->qpn = ( arbel->special_qpn_base + port_offset );
  688. return 0;
  689. case IB_QPT_GSI:
  690. qp->qpn = ( arbel->special_qpn_base + 2 + port_offset );
  691. return 0;
  692. case IB_QPT_UD:
  693. case IB_QPT_RC:
  694. /* Find a free queue pair number */
  695. qpn_offset = arbel_bitmask_alloc ( arbel->qp_inuse,
  696. ARBEL_MAX_QPS );
  697. if ( qpn_offset < 0 ) {
  698. DBGC ( arbel, "Arbel %p out of queue pairs\n",
  699. arbel );
  700. return qpn_offset;
  701. }
  702. qp->qpn = ( ( random() & ARBEL_QPN_RANDOM_MASK ) |
  703. ( arbel->qpn_base + qpn_offset ) );
  704. return 0;
  705. default:
  706. DBGC ( arbel, "Arbel %p unsupported QP type %d\n",
  707. arbel, qp->type );
  708. return -ENOTSUP;
  709. }
  710. }
  711. /**
  712. * Free queue pair number
  713. *
  714. * @v ibdev Infiniband device
  715. * @v qp Queue pair
  716. */
  717. static void arbel_free_qpn ( struct ib_device *ibdev,
  718. struct ib_queue_pair *qp ) {
  719. struct arbel *arbel = ib_get_drvdata ( ibdev );
  720. int qpn_offset;
  721. qpn_offset = ( ( qp->qpn & ~ARBEL_QPN_RANDOM_MASK ) - arbel->qpn_base );
  722. if ( qpn_offset >= 0 )
  723. arbel_bitmask_free ( arbel->qp_inuse, qpn_offset );
  724. }
  725. /**
  726. * Calculate transmission rate
  727. *
  728. * @v av Address vector
  729. * @ret arbel_rate Arbel rate
  730. */
  731. static unsigned int arbel_rate ( struct ib_address_vector *av ) {
  732. return ( ( ( av->rate >= IB_RATE_2_5 ) && ( av->rate <= IB_RATE_120 ) )
  733. ? ( av->rate + 5 ) : 0 );
  734. }
  735. /** Queue pair transport service type map */
  736. static uint8_t arbel_qp_st[] = {
  737. [IB_QPT_SMI] = ARBEL_ST_MLX,
  738. [IB_QPT_GSI] = ARBEL_ST_MLX,
  739. [IB_QPT_UD] = ARBEL_ST_UD,
  740. [IB_QPT_RC] = ARBEL_ST_RC,
  741. };
  742. /**
  743. * Dump queue pair context (for debugging only)
  744. *
  745. * @v arbel Arbel device
  746. * @v qp Queue pair
  747. * @ret rc Return status code
  748. */
  749. static __attribute__ (( unused )) int
  750. arbel_dump_qpctx ( struct arbel *arbel, struct ib_queue_pair *qp ) {
  751. struct arbelprm_qp_ee_state_transitions qpctx;
  752. int rc;
  753. memset ( &qpctx, 0, sizeof ( qpctx ) );
  754. if ( ( rc = arbel_cmd_query_qpee ( arbel, qp->qpn, &qpctx ) ) != 0 ) {
  755. DBGC ( arbel, "Arbel %p QPN %#lx QUERY_QPEE failed: %s\n",
  756. arbel, qp->qpn, strerror ( rc ) );
  757. return rc;
  758. }
  759. DBGC ( arbel, "Arbel %p QPN %#lx context:\n", arbel, qp->qpn );
  760. DBGC_HDA ( arbel, 0, &qpctx.u.dwords[2], ( sizeof ( qpctx ) - 8 ) );
  761. return 0;
  762. }
  763. /**
  764. * Create send work queue
  765. *
  766. * @v arbel_send_wq Send work queue
  767. * @v num_wqes Number of work queue entries
  768. * @ret rc Return status code
  769. */
  770. static int arbel_create_send_wq ( struct arbel_send_work_queue *arbel_send_wq,
  771. unsigned int num_wqes ) {
  772. union arbel_send_wqe *wqe;
  773. union arbel_send_wqe *next_wqe;
  774. unsigned int wqe_idx_mask;
  775. unsigned int i;
  776. /* Allocate work queue */
  777. arbel_send_wq->wqe_size = ( num_wqes *
  778. sizeof ( arbel_send_wq->wqe[0] ) );
  779. arbel_send_wq->wqe = malloc_dma ( arbel_send_wq->wqe_size,
  780. sizeof ( arbel_send_wq->wqe[0] ) );
  781. if ( ! arbel_send_wq->wqe )
  782. return -ENOMEM;
  783. memset ( arbel_send_wq->wqe, 0, arbel_send_wq->wqe_size );
  784. /* Link work queue entries */
  785. wqe_idx_mask = ( num_wqes - 1 );
  786. for ( i = 0 ; i < num_wqes ; i++ ) {
  787. wqe = &arbel_send_wq->wqe[i];
  788. next_wqe = &arbel_send_wq->wqe[ ( i + 1 ) & wqe_idx_mask ];
  789. MLX_FILL_1 ( &wqe->next, 0, nda_31_6,
  790. ( virt_to_bus ( next_wqe ) >> 6 ) );
  791. MLX_FILL_1 ( &wqe->next, 1, always1, 1 );
  792. }
  793. return 0;
  794. }
  795. /**
  796. * Create receive work queue
  797. *
  798. * @v arbel_recv_wq Receive work queue
  799. * @v num_wqes Number of work queue entries
  800. * @ret rc Return status code
  801. */
  802. static int arbel_create_recv_wq ( struct arbel_recv_work_queue *arbel_recv_wq,
  803. unsigned int num_wqes ) {
  804. struct arbelprm_recv_wqe *wqe;
  805. struct arbelprm_recv_wqe *next_wqe;
  806. unsigned int wqe_idx_mask;
  807. size_t nds;
  808. unsigned int i;
  809. unsigned int j;
  810. /* Allocate work queue */
  811. arbel_recv_wq->wqe_size = ( num_wqes *
  812. sizeof ( arbel_recv_wq->wqe[0] ) );
  813. arbel_recv_wq->wqe = malloc_dma ( arbel_recv_wq->wqe_size,
  814. sizeof ( arbel_recv_wq->wqe[0] ) );
  815. if ( ! arbel_recv_wq->wqe )
  816. return -ENOMEM;
  817. memset ( arbel_recv_wq->wqe, 0, arbel_recv_wq->wqe_size );
  818. /* Link work queue entries */
  819. wqe_idx_mask = ( num_wqes - 1 );
  820. nds = ( ( offsetof ( typeof ( *wqe ), data ) +
  821. sizeof ( wqe->data[0] ) ) >> 4 );
  822. for ( i = 0 ; i < num_wqes ; i++ ) {
  823. wqe = &arbel_recv_wq->wqe[i].recv;
  824. next_wqe = &arbel_recv_wq->wqe[( i + 1 ) & wqe_idx_mask].recv;
  825. MLX_FILL_1 ( &wqe->next, 0, nda_31_6,
  826. ( virt_to_bus ( next_wqe ) >> 6 ) );
  827. MLX_FILL_1 ( &wqe->next, 1, nds, nds );
  828. for ( j = 0 ; ( ( ( void * ) &wqe->data[j] ) <
  829. ( ( void * ) ( wqe + 1 ) ) ) ; j++ ) {
  830. MLX_FILL_1 ( &wqe->data[j], 1,
  831. l_key, ARBEL_INVALID_LKEY );
  832. }
  833. }
  834. return 0;
  835. }
  836. /**
  837. * Create queue pair
  838. *
  839. * @v ibdev Infiniband device
  840. * @v qp Queue pair
  841. * @ret rc Return status code
  842. */
  843. static int arbel_create_qp ( struct ib_device *ibdev,
  844. struct ib_queue_pair *qp ) {
  845. struct arbel *arbel = ib_get_drvdata ( ibdev );
  846. struct arbel_queue_pair *arbel_qp;
  847. struct arbelprm_qp_ee_state_transitions qpctx;
  848. struct arbelprm_qp_db_record *send_db_rec;
  849. struct arbelprm_qp_db_record *recv_db_rec;
  850. physaddr_t send_wqe_base_adr;
  851. physaddr_t recv_wqe_base_adr;
  852. physaddr_t wqe_base_adr;
  853. int rc;
  854. /* Warn about dysfunctional code
  855. *
  856. * Arbel seems to crash the system as soon as the first send
  857. * WQE completes on an RC queue pair. (NOPs complete
  858. * successfully, so this is a problem specific to the work
  859. * queue rather than the completion queue.) The cause of this
  860. * problem has remained unknown for over a year. Patches to
  861. * fix this are welcome.
  862. */
  863. if ( qp->type == IB_QPT_RC )
  864. DBG ( "*** WARNING: Arbel RC support is non-functional ***\n" );
  865. /* Calculate queue pair number */
  866. if ( ( rc = arbel_alloc_qpn ( ibdev, qp ) ) != 0 )
  867. goto err_alloc_qpn;
  868. /* Allocate control structures */
  869. arbel_qp = zalloc ( sizeof ( *arbel_qp ) );
  870. if ( ! arbel_qp ) {
  871. rc = -ENOMEM;
  872. goto err_arbel_qp;
  873. }
  874. arbel_qp->send.doorbell_idx = arbel_send_doorbell_idx ( arbel, qp );
  875. arbel_qp->recv.doorbell_idx = arbel_recv_doorbell_idx ( arbel, qp );
  876. /* Create send and receive work queues */
  877. if ( ( rc = arbel_create_send_wq ( &arbel_qp->send,
  878. qp->send.num_wqes ) ) != 0 )
  879. goto err_create_send_wq;
  880. if ( ( rc = arbel_create_recv_wq ( &arbel_qp->recv,
  881. qp->recv.num_wqes ) ) != 0 )
  882. goto err_create_recv_wq;
  883. /* Send and receive work queue entries must be within the same 4GB */
  884. send_wqe_base_adr = virt_to_bus ( arbel_qp->send.wqe );
  885. recv_wqe_base_adr = virt_to_bus ( arbel_qp->recv.wqe );
  886. if ( ( sizeof ( physaddr_t ) > sizeof ( uint32_t ) ) &&
  887. ( ( ( ( uint64_t ) send_wqe_base_adr ) >> 32 ) !=
  888. ( ( ( uint64_t ) recv_wqe_base_adr ) >> 32 ) ) ) {
  889. DBGC ( arbel, "Arbel %p QPN %#lx cannot support send %08lx "
  890. "recv %08lx\n", arbel, qp->qpn,
  891. send_wqe_base_adr, recv_wqe_base_adr );
  892. rc = -ENOTSUP;
  893. goto err_unsupported_address_split;
  894. }
  895. wqe_base_adr = send_wqe_base_adr;
  896. /* Initialise doorbell records */
  897. send_db_rec = &arbel->db_rec[arbel_qp->send.doorbell_idx].qp;
  898. MLX_FILL_1 ( send_db_rec, 0, counter, 0 );
  899. MLX_FILL_2 ( send_db_rec, 1,
  900. res, ARBEL_UAR_RES_SQ,
  901. qp_number, qp->qpn );
  902. recv_db_rec = &arbel->db_rec[arbel_qp->recv.doorbell_idx].qp;
  903. MLX_FILL_1 ( recv_db_rec, 0, counter, 0 );
  904. MLX_FILL_2 ( recv_db_rec, 1,
  905. res, ARBEL_UAR_RES_RQ,
  906. qp_number, qp->qpn );
  907. /* Transition queue to INIT state */
  908. memset ( &qpctx, 0, sizeof ( qpctx ) );
  909. MLX_FILL_3 ( &qpctx, 2,
  910. qpc_eec_data.de, 1,
  911. qpc_eec_data.pm_state, ARBEL_PM_STATE_MIGRATED,
  912. qpc_eec_data.st, arbel_qp_st[qp->type] );
  913. MLX_FILL_4 ( &qpctx, 4,
  914. qpc_eec_data.log_rq_size, fls ( qp->recv.num_wqes - 1 ),
  915. qpc_eec_data.log_rq_stride,
  916. ( fls ( sizeof ( arbel_qp->recv.wqe[0] ) - 1 ) - 4 ),
  917. qpc_eec_data.log_sq_size, fls ( qp->send.num_wqes - 1 ),
  918. qpc_eec_data.log_sq_stride,
  919. ( fls ( sizeof ( arbel_qp->send.wqe[0] ) - 1 ) - 4 ) );
  920. MLX_FILL_1 ( &qpctx, 5,
  921. qpc_eec_data.usr_page, arbel->limits.reserved_uars );
  922. MLX_FILL_1 ( &qpctx, 10, qpc_eec_data.primary_address_path.port_number,
  923. ibdev->port );
  924. MLX_FILL_1 ( &qpctx, 27, qpc_eec_data.pd, ARBEL_GLOBAL_PD );
  925. MLX_FILL_H ( &qpctx, 28, qpc_eec_data.wqe_base_adr_h, wqe_base_adr );
  926. MLX_FILL_1 ( &qpctx, 29, qpc_eec_data.wqe_lkey, arbel->lkey );
  927. MLX_FILL_1 ( &qpctx, 30, qpc_eec_data.ssc, 1 );
  928. MLX_FILL_1 ( &qpctx, 33, qpc_eec_data.cqn_snd, qp->send.cq->cqn );
  929. MLX_FILL_1 ( &qpctx, 34, qpc_eec_data.snd_wqe_base_adr_l,
  930. ( send_wqe_base_adr >> 6 ) );
  931. MLX_FILL_1 ( &qpctx, 35, qpc_eec_data.snd_db_record_index,
  932. arbel_qp->send.doorbell_idx );
  933. MLX_FILL_4 ( &qpctx, 38,
  934. qpc_eec_data.rre, 1,
  935. qpc_eec_data.rwe, 1,
  936. qpc_eec_data.rae, 1,
  937. qpc_eec_data.rsc, 1 );
  938. MLX_FILL_1 ( &qpctx, 41, qpc_eec_data.cqn_rcv, qp->recv.cq->cqn );
  939. MLX_FILL_1 ( &qpctx, 42, qpc_eec_data.rcv_wqe_base_adr_l,
  940. ( recv_wqe_base_adr >> 6 ) );
  941. MLX_FILL_1 ( &qpctx, 43, qpc_eec_data.rcv_db_record_index,
  942. arbel_qp->recv.doorbell_idx );
  943. if ( ( rc = arbel_cmd_rst2init_qpee ( arbel, qp->qpn, &qpctx )) != 0 ){
  944. DBGC ( arbel, "Arbel %p QPN %#lx RST2INIT_QPEE failed: %s\n",
  945. arbel, qp->qpn, strerror ( rc ) );
  946. goto err_rst2init_qpee;
  947. }
  948. arbel_qp->state = ARBEL_QP_ST_INIT;
  949. DBGC ( arbel, "Arbel %p QPN %#lx send ring [%08lx,%08lx), doorbell "
  950. "%08lx\n", arbel, qp->qpn, virt_to_phys ( arbel_qp->send.wqe ),
  951. ( virt_to_phys ( arbel_qp->send.wqe ) +
  952. arbel_qp->send.wqe_size ),
  953. virt_to_phys ( send_db_rec ) );
  954. DBGC ( arbel, "Arbel %p QPN %#lx receive ring [%08lx,%08lx), doorbell "
  955. "%08lx\n", arbel, qp->qpn, virt_to_phys ( arbel_qp->recv.wqe ),
  956. ( virt_to_phys ( arbel_qp->recv.wqe ) +
  957. arbel_qp->recv.wqe_size ),
  958. virt_to_phys ( recv_db_rec ) );
  959. DBGC ( arbel, "Arbel %p QPN %#lx send CQN %#lx receive CQN %#lx\n",
  960. arbel, qp->qpn, qp->send.cq->cqn, qp->recv.cq->cqn );
  961. ib_qp_set_drvdata ( qp, arbel_qp );
  962. return 0;
  963. arbel_cmd_2rst_qpee ( arbel, qp->qpn );
  964. err_rst2init_qpee:
  965. MLX_FILL_1 ( send_db_rec, 1, res, ARBEL_UAR_RES_NONE );
  966. MLX_FILL_1 ( recv_db_rec, 1, res, ARBEL_UAR_RES_NONE );
  967. err_unsupported_address_split:
  968. free_dma ( arbel_qp->recv.wqe, arbel_qp->recv.wqe_size );
  969. err_create_recv_wq:
  970. free_dma ( arbel_qp->send.wqe, arbel_qp->send.wqe_size );
  971. err_create_send_wq:
  972. free ( arbel_qp );
  973. err_arbel_qp:
  974. arbel_free_qpn ( ibdev, qp );
  975. err_alloc_qpn:
  976. return rc;
  977. }
  978. /**
  979. * Modify queue pair
  980. *
  981. * @v ibdev Infiniband device
  982. * @v qp Queue pair
  983. * @ret rc Return status code
  984. */
  985. static int arbel_modify_qp ( struct ib_device *ibdev,
  986. struct ib_queue_pair *qp ) {
  987. struct arbel *arbel = ib_get_drvdata ( ibdev );
  988. struct arbel_queue_pair *arbel_qp = ib_qp_get_drvdata ( qp );
  989. struct arbelprm_qp_ee_state_transitions qpctx;
  990. int rc;
  991. /* Transition queue to RTR state, if applicable */
  992. if ( arbel_qp->state < ARBEL_QP_ST_RTR ) {
  993. memset ( &qpctx, 0, sizeof ( qpctx ) );
  994. MLX_FILL_2 ( &qpctx, 4,
  995. qpc_eec_data.mtu, ARBEL_MTU_2048,
  996. qpc_eec_data.msg_max, 31 );
  997. MLX_FILL_1 ( &qpctx, 7,
  998. qpc_eec_data.remote_qpn_een, qp->av.qpn );
  999. MLX_FILL_2 ( &qpctx, 11,
  1000. qpc_eec_data.primary_address_path.rnr_retry,
  1001. ARBEL_RETRY_MAX,
  1002. qpc_eec_data.primary_address_path.rlid,
  1003. qp->av.lid );
  1004. MLX_FILL_2 ( &qpctx, 12,
  1005. qpc_eec_data.primary_address_path.ack_timeout,
  1006. 14 /* 4.096us * 2^(14) = 67ms */,
  1007. qpc_eec_data.primary_address_path.max_stat_rate,
  1008. arbel_rate ( &qp->av ) );
  1009. memcpy ( &qpctx.u.dwords[14], &qp->av.gid,
  1010. sizeof ( qp->av.gid ) );
  1011. MLX_FILL_1 ( &qpctx, 30,
  1012. qpc_eec_data.retry_count, ARBEL_RETRY_MAX );
  1013. MLX_FILL_1 ( &qpctx, 39,
  1014. qpc_eec_data.next_rcv_psn, qp->recv.psn );
  1015. MLX_FILL_1 ( &qpctx, 40,
  1016. qpc_eec_data.ra_buff_indx,
  1017. ( arbel->limits.reserved_rdbs +
  1018. ( ( qp->qpn & ~ARBEL_QPN_RANDOM_MASK ) -
  1019. arbel->special_qpn_base ) ) );
  1020. if ( ( rc = arbel_cmd_init2rtr_qpee ( arbel, qp->qpn,
  1021. &qpctx ) ) != 0 ) {
  1022. DBGC ( arbel, "Arbel %p QPN %#lx INIT2RTR_QPEE failed:"
  1023. " %s\n", arbel, qp->qpn, strerror ( rc ) );
  1024. return rc;
  1025. }
  1026. arbel_qp->state = ARBEL_QP_ST_RTR;
  1027. }
  1028. /* Transition queue to RTS state, if applicable */
  1029. if ( arbel_qp->state < ARBEL_QP_ST_RTS ) {
  1030. memset ( &qpctx, 0, sizeof ( qpctx ) );
  1031. MLX_FILL_1 ( &qpctx, 11,
  1032. qpc_eec_data.primary_address_path.rnr_retry,
  1033. ARBEL_RETRY_MAX );
  1034. MLX_FILL_1 ( &qpctx, 12,
  1035. qpc_eec_data.primary_address_path.ack_timeout,
  1036. 14 /* 4.096us * 2^(14) = 67ms */ );
  1037. MLX_FILL_2 ( &qpctx, 30,
  1038. qpc_eec_data.retry_count, ARBEL_RETRY_MAX,
  1039. qpc_eec_data.sic, 1 );
  1040. MLX_FILL_1 ( &qpctx, 32,
  1041. qpc_eec_data.next_send_psn, qp->send.psn );
  1042. if ( ( rc = arbel_cmd_rtr2rts_qpee ( arbel, qp->qpn,
  1043. &qpctx ) ) != 0 ) {
  1044. DBGC ( arbel, "Arbel %p QPN %#lx RTR2RTS_QPEE failed: "
  1045. "%s\n", arbel, qp->qpn, strerror ( rc ) );
  1046. return rc;
  1047. }
  1048. arbel_qp->state = ARBEL_QP_ST_RTS;
  1049. }
  1050. /* Update parameters in RTS state */
  1051. memset ( &qpctx, 0, sizeof ( qpctx ) );
  1052. MLX_FILL_1 ( &qpctx, 0, opt_param_mask, ARBEL_QPEE_OPT_PARAM_QKEY );
  1053. MLX_FILL_1 ( &qpctx, 44, qpc_eec_data.q_key, qp->qkey );
  1054. if ( ( rc = arbel_cmd_rts2rts_qpee ( arbel, qp->qpn, &qpctx ) ) != 0 ){
  1055. DBGC ( arbel, "Arbel %p QPN %#lx RTS2RTS_QPEE failed: %s\n",
  1056. arbel, qp->qpn, strerror ( rc ) );
  1057. return rc;
  1058. }
  1059. return 0;
  1060. }
  1061. /**
  1062. * Destroy queue pair
  1063. *
  1064. * @v ibdev Infiniband device
  1065. * @v qp Queue pair
  1066. */
  1067. static void arbel_destroy_qp ( struct ib_device *ibdev,
  1068. struct ib_queue_pair *qp ) {
  1069. struct arbel *arbel = ib_get_drvdata ( ibdev );
  1070. struct arbel_queue_pair *arbel_qp = ib_qp_get_drvdata ( qp );
  1071. struct arbelprm_qp_db_record *send_db_rec;
  1072. struct arbelprm_qp_db_record *recv_db_rec;
  1073. int rc;
  1074. /* Take ownership back from hardware */
  1075. if ( ( rc = arbel_cmd_2rst_qpee ( arbel, qp->qpn ) ) != 0 ) {
  1076. DBGC ( arbel, "Arbel %p QPN %#lx FATAL 2RST_QPEE failed: "
  1077. "%s\n", arbel, qp->qpn, strerror ( rc ) );
  1078. /* Leak memory and return; at least we avoid corruption */
  1079. return;
  1080. }
  1081. /* Clear doorbell records */
  1082. send_db_rec = &arbel->db_rec[arbel_qp->send.doorbell_idx].qp;
  1083. recv_db_rec = &arbel->db_rec[arbel_qp->recv.doorbell_idx].qp;
  1084. MLX_FILL_1 ( send_db_rec, 1, res, ARBEL_UAR_RES_NONE );
  1085. MLX_FILL_1 ( recv_db_rec, 1, res, ARBEL_UAR_RES_NONE );
  1086. /* Free memory */
  1087. free_dma ( arbel_qp->send.wqe, arbel_qp->send.wqe_size );
  1088. free_dma ( arbel_qp->recv.wqe, arbel_qp->recv.wqe_size );
  1089. free ( arbel_qp );
  1090. /* Mark queue number as free */
  1091. arbel_free_qpn ( ibdev, qp );
  1092. ib_qp_set_drvdata ( qp, NULL );
  1093. }
  1094. /***************************************************************************
  1095. *
  1096. * Work request operations
  1097. *
  1098. ***************************************************************************
  1099. */
  1100. /**
  1101. * Ring doorbell register in UAR
  1102. *
  1103. * @v arbel Arbel device
  1104. * @v db_reg Doorbell register structure
  1105. * @v offset Address of doorbell
  1106. */
  1107. static void arbel_ring_doorbell ( struct arbel *arbel,
  1108. union arbelprm_doorbell_register *db_reg,
  1109. unsigned int offset ) {
  1110. DBGC2 ( arbel, "Arbel %p ringing doorbell %08x:%08x at %lx\n",
  1111. arbel, ntohl ( db_reg->dword[0] ), ntohl ( db_reg->dword[1] ),
  1112. virt_to_phys ( arbel->uar + offset ) );
  1113. barrier();
  1114. writel ( db_reg->dword[0], ( arbel->uar + offset + 0 ) );
  1115. barrier();
  1116. writel ( db_reg->dword[1], ( arbel->uar + offset + 4 ) );
  1117. }
  1118. /** GID used for GID-less send work queue entries */
  1119. static const union ib_gid arbel_no_gid = {
  1120. .bytes = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0 },
  1121. };
  1122. /**
  1123. * Construct UD send work queue entry
  1124. *
  1125. * @v ibdev Infiniband device
  1126. * @v qp Queue pair
  1127. * @v dest Destination address vector
  1128. * @v iobuf I/O buffer
  1129. * @v wqe Send work queue entry
  1130. * @ret nds Work queue entry size
  1131. */
  1132. static size_t arbel_fill_ud_send_wqe ( struct ib_device *ibdev,
  1133. struct ib_queue_pair *qp __unused,
  1134. struct ib_address_vector *dest,
  1135. struct io_buffer *iobuf,
  1136. union arbel_send_wqe *wqe ) {
  1137. struct arbel *arbel = ib_get_drvdata ( ibdev );
  1138. const union ib_gid *gid;
  1139. /* Construct this work queue entry */
  1140. MLX_FILL_1 ( &wqe->ud.ctrl, 0, always1, 1 );
  1141. MLX_FILL_2 ( &wqe->ud.ud, 0,
  1142. ud_address_vector.pd, ARBEL_GLOBAL_PD,
  1143. ud_address_vector.port_number, ibdev->port );
  1144. MLX_FILL_2 ( &wqe->ud.ud, 1,
  1145. ud_address_vector.rlid, dest->lid,
  1146. ud_address_vector.g, dest->gid_present );
  1147. MLX_FILL_2 ( &wqe->ud.ud, 2,
  1148. ud_address_vector.max_stat_rate, arbel_rate ( dest ),
  1149. ud_address_vector.msg, 3 );
  1150. MLX_FILL_1 ( &wqe->ud.ud, 3, ud_address_vector.sl, dest->sl );
  1151. gid = ( dest->gid_present ? &dest->gid : &arbel_no_gid );
  1152. memcpy ( &wqe->ud.ud.u.dwords[4], gid, sizeof ( *gid ) );
  1153. MLX_FILL_1 ( &wqe->ud.ud, 8, destination_qp, dest->qpn );
  1154. MLX_FILL_1 ( &wqe->ud.ud, 9, q_key, dest->qkey );
  1155. MLX_FILL_1 ( &wqe->ud.data[0], 0, byte_count, iob_len ( iobuf ) );
  1156. MLX_FILL_1 ( &wqe->ud.data[0], 1, l_key, arbel->lkey );
  1157. MLX_FILL_H ( &wqe->ud.data[0], 2,
  1158. local_address_h, virt_to_bus ( iobuf->data ) );
  1159. MLX_FILL_1 ( &wqe->ud.data[0], 3,
  1160. local_address_l, virt_to_bus ( iobuf->data ) );
  1161. return ( offsetof ( typeof ( wqe->ud ), data[1] ) >> 4 );
  1162. }
  1163. /**
  1164. * Construct MLX send work queue entry
  1165. *
  1166. * @v ibdev Infiniband device
  1167. * @v qp Queue pair
  1168. * @v dest Destination address vector
  1169. * @v iobuf I/O buffer
  1170. * @v wqe Send work queue entry
  1171. * @ret nds Work queue entry size
  1172. */
  1173. static size_t arbel_fill_mlx_send_wqe ( struct ib_device *ibdev,
  1174. struct ib_queue_pair *qp,
  1175. struct ib_address_vector *dest,
  1176. struct io_buffer *iobuf,
  1177. union arbel_send_wqe *wqe ) {
  1178. struct arbel *arbel = ib_get_drvdata ( ibdev );
  1179. struct io_buffer headers;
  1180. /* Construct IB headers */
  1181. iob_populate ( &headers, &wqe->mlx.headers, 0,
  1182. sizeof ( wqe->mlx.headers ) );
  1183. iob_reserve ( &headers, sizeof ( wqe->mlx.headers ) );
  1184. ib_push ( ibdev, &headers, qp, iob_len ( iobuf ), dest );
  1185. /* Construct this work queue entry */
  1186. MLX_FILL_5 ( &wqe->mlx.ctrl, 0,
  1187. c, 1 /* generate completion */,
  1188. icrc, 0 /* generate ICRC */,
  1189. max_statrate, arbel_rate ( dest ),
  1190. slr, 0,
  1191. v15, ( ( qp->ext_qpn == IB_QPN_SMI ) ? 1 : 0 ) );
  1192. MLX_FILL_1 ( &wqe->mlx.ctrl, 1, rlid, dest->lid );
  1193. MLX_FILL_1 ( &wqe->mlx.data[0], 0,
  1194. byte_count, iob_len ( &headers ) );
  1195. MLX_FILL_1 ( &wqe->mlx.data[0], 1, l_key, arbel->lkey );
  1196. MLX_FILL_H ( &wqe->mlx.data[0], 2,
  1197. local_address_h, virt_to_bus ( headers.data ) );
  1198. MLX_FILL_1 ( &wqe->mlx.data[0], 3,
  1199. local_address_l, virt_to_bus ( headers.data ) );
  1200. MLX_FILL_1 ( &wqe->mlx.data[1], 0,
  1201. byte_count, ( iob_len ( iobuf ) + 4 /* ICRC */ ) );
  1202. MLX_FILL_1 ( &wqe->mlx.data[1], 1, l_key, arbel->lkey );
  1203. MLX_FILL_H ( &wqe->mlx.data[1], 2,
  1204. local_address_h, virt_to_bus ( iobuf->data ) );
  1205. MLX_FILL_1 ( &wqe->mlx.data[1], 3,
  1206. local_address_l, virt_to_bus ( iobuf->data ) );
  1207. return ( offsetof ( typeof ( wqe->mlx ), data[2] ) >> 4 );
  1208. }
  1209. /**
  1210. * Construct RC send work queue entry
  1211. *
  1212. * @v ibdev Infiniband device
  1213. * @v qp Queue pair
  1214. * @v dest Destination address vector
  1215. * @v iobuf I/O buffer
  1216. * @v wqe Send work queue entry
  1217. * @ret nds Work queue entry size
  1218. */
  1219. static size_t arbel_fill_rc_send_wqe ( struct ib_device *ibdev,
  1220. struct ib_queue_pair *qp __unused,
  1221. struct ib_address_vector *dest __unused,
  1222. struct io_buffer *iobuf,
  1223. union arbel_send_wqe *wqe ) {
  1224. struct arbel *arbel = ib_get_drvdata ( ibdev );
  1225. /* Construct this work queue entry */
  1226. MLX_FILL_1 ( &wqe->rc.ctrl, 0, always1, 1 );
  1227. MLX_FILL_1 ( &wqe->rc.data[0], 0, byte_count, iob_len ( iobuf ) );
  1228. MLX_FILL_1 ( &wqe->rc.data[0], 1, l_key, arbel->lkey );
  1229. MLX_FILL_H ( &wqe->rc.data[0], 2,
  1230. local_address_h, virt_to_bus ( iobuf->data ) );
  1231. MLX_FILL_1 ( &wqe->rc.data[0], 3,
  1232. local_address_l, virt_to_bus ( iobuf->data ) );
  1233. return ( offsetof ( typeof ( wqe->rc ), data[1] ) >> 4 );
  1234. }
  1235. /** Work queue entry constructors */
  1236. static size_t
  1237. ( * arbel_fill_send_wqe[] ) ( struct ib_device *ibdev,
  1238. struct ib_queue_pair *qp,
  1239. struct ib_address_vector *dest,
  1240. struct io_buffer *iobuf,
  1241. union arbel_send_wqe *wqe ) = {
  1242. [IB_QPT_SMI] = arbel_fill_mlx_send_wqe,
  1243. [IB_QPT_GSI] = arbel_fill_mlx_send_wqe,
  1244. [IB_QPT_UD] = arbel_fill_ud_send_wqe,
  1245. [IB_QPT_RC] = arbel_fill_rc_send_wqe,
  1246. };
  1247. /**
  1248. * Post send work queue entry
  1249. *
  1250. * @v ibdev Infiniband device
  1251. * @v qp Queue pair
  1252. * @v dest Destination address vector
  1253. * @v iobuf I/O buffer
  1254. * @ret rc Return status code
  1255. */
  1256. static int arbel_post_send ( struct ib_device *ibdev,
  1257. struct ib_queue_pair *qp,
  1258. struct ib_address_vector *dest,
  1259. struct io_buffer *iobuf ) {
  1260. struct arbel *arbel = ib_get_drvdata ( ibdev );
  1261. struct arbel_queue_pair *arbel_qp = ib_qp_get_drvdata ( qp );
  1262. struct ib_work_queue *wq = &qp->send;
  1263. struct arbel_send_work_queue *arbel_send_wq = &arbel_qp->send;
  1264. union arbel_send_wqe *prev_wqe;
  1265. union arbel_send_wqe *wqe;
  1266. struct arbelprm_qp_db_record *qp_db_rec;
  1267. union arbelprm_doorbell_register db_reg;
  1268. unsigned long wqe_idx_mask;
  1269. size_t nds;
  1270. /* Allocate work queue entry */
  1271. wqe_idx_mask = ( wq->num_wqes - 1 );
  1272. if ( wq->iobufs[wq->next_idx & wqe_idx_mask] ) {
  1273. DBGC ( arbel, "Arbel %p QPN %#lx send queue full",
  1274. arbel, qp->qpn );
  1275. return -ENOBUFS;
  1276. }
  1277. wq->iobufs[wq->next_idx & wqe_idx_mask] = iobuf;
  1278. prev_wqe = &arbel_send_wq->wqe[(wq->next_idx - 1) & wqe_idx_mask];
  1279. wqe = &arbel_send_wq->wqe[wq->next_idx & wqe_idx_mask];
  1280. /* Construct work queue entry */
  1281. memset ( ( ( ( void * ) wqe ) + sizeof ( wqe->next ) ), 0,
  1282. ( sizeof ( *wqe ) - sizeof ( wqe->next ) ) );
  1283. assert ( qp->type < ( sizeof ( arbel_fill_send_wqe ) /
  1284. sizeof ( arbel_fill_send_wqe[0] ) ) );
  1285. assert ( arbel_fill_send_wqe[qp->type] != NULL );
  1286. nds = arbel_fill_send_wqe[qp->type] ( ibdev, qp, dest, iobuf, wqe );
  1287. DBGCP ( arbel, "Arbel %p QPN %#lx posting send WQE %#lx:\n",
  1288. arbel, qp->qpn, ( wq->next_idx & wqe_idx_mask ) );
  1289. DBGCP_HDA ( arbel, virt_to_phys ( wqe ), wqe, sizeof ( *wqe ) );
  1290. /* Update previous work queue entry's "next" field */
  1291. MLX_SET ( &prev_wqe->next, nopcode, ARBEL_OPCODE_SEND );
  1292. MLX_FILL_3 ( &prev_wqe->next, 1,
  1293. nds, nds,
  1294. f, 0,
  1295. always1, 1 );
  1296. /* Update doorbell record */
  1297. barrier();
  1298. qp_db_rec = &arbel->db_rec[arbel_send_wq->doorbell_idx].qp;
  1299. MLX_FILL_1 ( qp_db_rec, 0,
  1300. counter, ( ( wq->next_idx + 1 ) & 0xffff ) );
  1301. /* Ring doorbell register */
  1302. MLX_FILL_4 ( &db_reg.send, 0,
  1303. nopcode, ARBEL_OPCODE_SEND,
  1304. f, 0,
  1305. wqe_counter, ( wq->next_idx & 0xffff ),
  1306. wqe_cnt, 1 );
  1307. MLX_FILL_2 ( &db_reg.send, 1,
  1308. nds, nds,
  1309. qpn, qp->qpn );
  1310. arbel_ring_doorbell ( arbel, &db_reg, ARBEL_DB_POST_SND_OFFSET );
  1311. /* Update work queue's index */
  1312. wq->next_idx++;
  1313. return 0;
  1314. }
  1315. /**
  1316. * Post receive work queue entry
  1317. *
  1318. * @v ibdev Infiniband device
  1319. * @v qp Queue pair
  1320. * @v iobuf I/O buffer
  1321. * @ret rc Return status code
  1322. */
  1323. static int arbel_post_recv ( struct ib_device *ibdev,
  1324. struct ib_queue_pair *qp,
  1325. struct io_buffer *iobuf ) {
  1326. struct arbel *arbel = ib_get_drvdata ( ibdev );
  1327. struct arbel_queue_pair *arbel_qp = ib_qp_get_drvdata ( qp );
  1328. struct ib_work_queue *wq = &qp->recv;
  1329. struct arbel_recv_work_queue *arbel_recv_wq = &arbel_qp->recv;
  1330. struct arbelprm_recv_wqe *wqe;
  1331. union arbelprm_doorbell_record *db_rec;
  1332. unsigned int wqe_idx_mask;
  1333. /* Allocate work queue entry */
  1334. wqe_idx_mask = ( wq->num_wqes - 1 );
  1335. if ( wq->iobufs[wq->next_idx & wqe_idx_mask] ) {
  1336. DBGC ( arbel, "Arbel %p QPN %#lx receive queue full\n",
  1337. arbel, qp->qpn );
  1338. return -ENOBUFS;
  1339. }
  1340. wq->iobufs[wq->next_idx & wqe_idx_mask] = iobuf;
  1341. wqe = &arbel_recv_wq->wqe[wq->next_idx & wqe_idx_mask].recv;
  1342. /* Construct work queue entry */
  1343. MLX_FILL_1 ( &wqe->data[0], 0, byte_count, iob_tailroom ( iobuf ) );
  1344. MLX_FILL_1 ( &wqe->data[0], 1, l_key, arbel->lkey );
  1345. MLX_FILL_H ( &wqe->data[0], 2,
  1346. local_address_h, virt_to_bus ( iobuf->data ) );
  1347. MLX_FILL_1 ( &wqe->data[0], 3,
  1348. local_address_l, virt_to_bus ( iobuf->data ) );
  1349. /* Update doorbell record */
  1350. barrier();
  1351. db_rec = &arbel->db_rec[arbel_recv_wq->doorbell_idx];
  1352. MLX_FILL_1 ( &db_rec->qp, 0,
  1353. counter, ( ( wq->next_idx + 1 ) & 0xffff ) );
  1354. /* Update work queue's index */
  1355. wq->next_idx++;
  1356. return 0;
  1357. }
  1358. /**
  1359. * Handle completion
  1360. *
  1361. * @v ibdev Infiniband device
  1362. * @v cq Completion queue
  1363. * @v cqe Hardware completion queue entry
  1364. * @ret rc Return status code
  1365. */
  1366. static int arbel_complete ( struct ib_device *ibdev,
  1367. struct ib_completion_queue *cq,
  1368. union arbelprm_completion_entry *cqe ) {
  1369. struct arbel *arbel = ib_get_drvdata ( ibdev );
  1370. struct ib_work_queue *wq;
  1371. struct ib_queue_pair *qp;
  1372. struct arbel_queue_pair *arbel_qp;
  1373. struct arbel_send_work_queue *arbel_send_wq;
  1374. struct arbel_recv_work_queue *arbel_recv_wq;
  1375. struct arbelprm_recv_wqe *recv_wqe;
  1376. struct io_buffer *iobuf;
  1377. struct ib_address_vector recv_source;
  1378. struct ib_global_route_header *grh;
  1379. struct ib_address_vector *source;
  1380. unsigned int opcode;
  1381. unsigned long qpn;
  1382. int is_send;
  1383. unsigned long wqe_adr;
  1384. unsigned long wqe_idx;
  1385. size_t len;
  1386. int rc = 0;
  1387. /* Parse completion */
  1388. qpn = MLX_GET ( &cqe->normal, my_qpn );
  1389. is_send = MLX_GET ( &cqe->normal, s );
  1390. wqe_adr = ( MLX_GET ( &cqe->normal, wqe_adr ) << 6 );
  1391. opcode = MLX_GET ( &cqe->normal, opcode );
  1392. if ( opcode >= ARBEL_OPCODE_RECV_ERROR ) {
  1393. /* "s" field is not valid for error opcodes */
  1394. is_send = ( opcode == ARBEL_OPCODE_SEND_ERROR );
  1395. DBGC ( arbel, "Arbel %p CQN %#lx %s QPN %#lx syndrome %#x "
  1396. "vendor %#x\n", arbel, cq->cqn,
  1397. ( is_send ? "send" : "recv" ), qpn,
  1398. MLX_GET ( &cqe->error, syndrome ),
  1399. MLX_GET ( &cqe->error, vendor_code ) );
  1400. DBGC_HDA ( arbel, virt_to_phys ( cqe ), cqe, sizeof ( *cqe ) );
  1401. rc = -EIO;
  1402. /* Don't return immediately; propagate error to completer */
  1403. }
  1404. /* Identify work queue */
  1405. wq = ib_find_wq ( cq, qpn, is_send );
  1406. if ( ! wq ) {
  1407. DBGC ( arbel, "Arbel %p CQN %#lx unknown %s QPN %#lx\n",
  1408. arbel, cq->cqn, ( is_send ? "send" : "recv" ), qpn );
  1409. return -EIO;
  1410. }
  1411. qp = wq->qp;
  1412. arbel_qp = ib_qp_get_drvdata ( qp );
  1413. arbel_send_wq = &arbel_qp->send;
  1414. arbel_recv_wq = &arbel_qp->recv;
  1415. /* Identify work queue entry index */
  1416. if ( is_send ) {
  1417. wqe_idx = ( ( wqe_adr - virt_to_bus ( arbel_send_wq->wqe ) ) /
  1418. sizeof ( arbel_send_wq->wqe[0] ) );
  1419. assert ( wqe_idx < qp->send.num_wqes );
  1420. } else {
  1421. wqe_idx = ( ( wqe_adr - virt_to_bus ( arbel_recv_wq->wqe ) ) /
  1422. sizeof ( arbel_recv_wq->wqe[0] ) );
  1423. assert ( wqe_idx < qp->recv.num_wqes );
  1424. }
  1425. DBGCP ( arbel, "Arbel %p CQN %#lx QPN %#lx %s WQE %#lx completed:\n",
  1426. arbel, cq->cqn, qp->qpn, ( is_send ? "send" : "recv" ),
  1427. wqe_idx );
  1428. DBGCP_HDA ( arbel, virt_to_phys ( cqe ), cqe, sizeof ( *cqe ) );
  1429. /* Identify I/O buffer */
  1430. iobuf = wq->iobufs[wqe_idx];
  1431. if ( ! iobuf ) {
  1432. DBGC ( arbel, "Arbel %p CQN %#lx QPN %#lx empty %s WQE %#lx\n",
  1433. arbel, cq->cqn, qp->qpn, ( is_send ? "send" : "recv" ),
  1434. wqe_idx );
  1435. return -EIO;
  1436. }
  1437. wq->iobufs[wqe_idx] = NULL;
  1438. if ( is_send ) {
  1439. /* Hand off to completion handler */
  1440. ib_complete_send ( ibdev, qp, iobuf, rc );
  1441. } else {
  1442. /* Set received length */
  1443. len = MLX_GET ( &cqe->normal, byte_cnt );
  1444. recv_wqe = &arbel_recv_wq->wqe[wqe_idx].recv;
  1445. assert ( MLX_GET ( &recv_wqe->data[0], local_address_l ) ==
  1446. virt_to_bus ( iobuf->data ) );
  1447. assert ( MLX_GET ( &recv_wqe->data[0], byte_count ) ==
  1448. iob_tailroom ( iobuf ) );
  1449. MLX_FILL_1 ( &recv_wqe->data[0], 0, byte_count, 0 );
  1450. MLX_FILL_1 ( &recv_wqe->data[0], 1,
  1451. l_key, ARBEL_INVALID_LKEY );
  1452. assert ( len <= iob_tailroom ( iobuf ) );
  1453. iob_put ( iobuf, len );
  1454. switch ( qp->type ) {
  1455. case IB_QPT_SMI:
  1456. case IB_QPT_GSI:
  1457. case IB_QPT_UD:
  1458. assert ( iob_len ( iobuf ) >= sizeof ( *grh ) );
  1459. grh = iobuf->data;
  1460. iob_pull ( iobuf, sizeof ( *grh ) );
  1461. /* Construct address vector */
  1462. source = &recv_source;
  1463. memset ( source, 0, sizeof ( *source ) );
  1464. source->qpn = MLX_GET ( &cqe->normal, rqpn );
  1465. source->lid = MLX_GET ( &cqe->normal, rlid );
  1466. source->sl = MLX_GET ( &cqe->normal, sl );
  1467. source->gid_present = MLX_GET ( &cqe->normal, g );
  1468. memcpy ( &source->gid, &grh->sgid,
  1469. sizeof ( source->gid ) );
  1470. break;
  1471. case IB_QPT_RC:
  1472. source = &qp->av;
  1473. break;
  1474. default:
  1475. assert ( 0 );
  1476. return -EINVAL;
  1477. }
  1478. /* Hand off to completion handler */
  1479. ib_complete_recv ( ibdev, qp, source, iobuf, rc );
  1480. }
  1481. return rc;
  1482. }
  1483. /**
  1484. * Poll completion queue
  1485. *
  1486. * @v ibdev Infiniband device
  1487. * @v cq Completion queue
  1488. */
  1489. static void arbel_poll_cq ( struct ib_device *ibdev,
  1490. struct ib_completion_queue *cq ) {
  1491. struct arbel *arbel = ib_get_drvdata ( ibdev );
  1492. struct arbel_completion_queue *arbel_cq = ib_cq_get_drvdata ( cq );
  1493. struct arbelprm_cq_ci_db_record *ci_db_rec;
  1494. union arbelprm_completion_entry *cqe;
  1495. unsigned int cqe_idx_mask;
  1496. int rc;
  1497. while ( 1 ) {
  1498. /* Look for completion entry */
  1499. cqe_idx_mask = ( cq->num_cqes - 1 );
  1500. cqe = &arbel_cq->cqe[cq->next_idx & cqe_idx_mask];
  1501. if ( MLX_GET ( &cqe->normal, owner ) != 0 ) {
  1502. /* Entry still owned by hardware; end of poll */
  1503. break;
  1504. }
  1505. /* Handle completion */
  1506. if ( ( rc = arbel_complete ( ibdev, cq, cqe ) ) != 0 ) {
  1507. DBGC ( arbel, "Arbel %p CQN %#lx failed to complete: "
  1508. "%s\n", arbel, cq->cqn, strerror ( rc ) );
  1509. DBGC_HD ( arbel, cqe, sizeof ( *cqe ) );
  1510. }
  1511. /* Return ownership to hardware */
  1512. MLX_FILL_1 ( &cqe->normal, 7, owner, 1 );
  1513. barrier();
  1514. /* Update completion queue's index */
  1515. cq->next_idx++;
  1516. /* Update doorbell record */
  1517. ci_db_rec = &arbel->db_rec[arbel_cq->ci_doorbell_idx].cq_ci;
  1518. MLX_FILL_1 ( ci_db_rec, 0,
  1519. counter, ( cq->next_idx & 0xffffffffUL ) );
  1520. }
  1521. }
  1522. /***************************************************************************
  1523. *
  1524. * Event queues
  1525. *
  1526. ***************************************************************************
  1527. */
  1528. /**
  1529. * Create event queue
  1530. *
  1531. * @v arbel Arbel device
  1532. * @ret rc Return status code
  1533. */
  1534. static int arbel_create_eq ( struct arbel *arbel ) {
  1535. struct arbel_event_queue *arbel_eq = &arbel->eq;
  1536. struct arbelprm_eqc eqctx;
  1537. struct arbelprm_event_mask mask;
  1538. unsigned int i;
  1539. int rc;
  1540. /* Select event queue number */
  1541. arbel_eq->eqn = arbel->limits.reserved_eqs;
  1542. /* Calculate doorbell address */
  1543. arbel_eq->doorbell = ( arbel->eq_ci_doorbells +
  1544. ARBEL_DB_EQ_OFFSET ( arbel_eq->eqn ) );
  1545. /* Allocate event queue itself */
  1546. arbel_eq->eqe_size =
  1547. ( ARBEL_NUM_EQES * sizeof ( arbel_eq->eqe[0] ) );
  1548. arbel_eq->eqe = malloc_dma ( arbel_eq->eqe_size,
  1549. sizeof ( arbel_eq->eqe[0] ) );
  1550. if ( ! arbel_eq->eqe ) {
  1551. rc = -ENOMEM;
  1552. goto err_eqe;
  1553. }
  1554. memset ( arbel_eq->eqe, 0, arbel_eq->eqe_size );
  1555. for ( i = 0 ; i < ARBEL_NUM_EQES ; i++ ) {
  1556. MLX_FILL_1 ( &arbel_eq->eqe[i].generic, 7, owner, 1 );
  1557. }
  1558. barrier();
  1559. /* Hand queue over to hardware */
  1560. memset ( &eqctx, 0, sizeof ( eqctx ) );
  1561. MLX_FILL_1 ( &eqctx, 0, st, 0xa /* "Fired" */ );
  1562. MLX_FILL_H ( &eqctx, 1,
  1563. start_address_h, virt_to_phys ( arbel_eq->eqe ) );
  1564. MLX_FILL_1 ( &eqctx, 2,
  1565. start_address_l, virt_to_phys ( arbel_eq->eqe ) );
  1566. MLX_FILL_1 ( &eqctx, 3, log_eq_size, fls ( ARBEL_NUM_EQES - 1 ) );
  1567. MLX_FILL_1 ( &eqctx, 6, pd, ARBEL_GLOBAL_PD );
  1568. MLX_FILL_1 ( &eqctx, 7, lkey, arbel->lkey );
  1569. if ( ( rc = arbel_cmd_sw2hw_eq ( arbel, arbel_eq->eqn,
  1570. &eqctx ) ) != 0 ) {
  1571. DBGC ( arbel, "Arbel %p EQN %#lx SW2HW_EQ failed: %s\n",
  1572. arbel, arbel_eq->eqn, strerror ( rc ) );
  1573. goto err_sw2hw_eq;
  1574. }
  1575. /* Map events to this event queue */
  1576. memset ( &mask, 0xff, sizeof ( mask ) );
  1577. if ( ( rc = arbel_cmd_map_eq ( arbel,
  1578. ( ARBEL_MAP_EQ | arbel_eq->eqn ),
  1579. &mask ) ) != 0 ) {
  1580. DBGC ( arbel, "Arbel %p EQN %#lx MAP_EQ failed: %s\n",
  1581. arbel, arbel_eq->eqn, strerror ( rc ) );
  1582. goto err_map_eq;
  1583. }
  1584. DBGC ( arbel, "Arbel %p EQN %#lx ring [%08lx,%08lx), doorbell %08lx\n",
  1585. arbel, arbel_eq->eqn, virt_to_phys ( arbel_eq->eqe ),
  1586. ( virt_to_phys ( arbel_eq->eqe ) + arbel_eq->eqe_size ),
  1587. virt_to_phys ( arbel_eq->doorbell ) );
  1588. return 0;
  1589. err_map_eq:
  1590. arbel_cmd_hw2sw_eq ( arbel, arbel_eq->eqn, &eqctx );
  1591. err_sw2hw_eq:
  1592. free_dma ( arbel_eq->eqe, arbel_eq->eqe_size );
  1593. err_eqe:
  1594. memset ( arbel_eq, 0, sizeof ( *arbel_eq ) );
  1595. return rc;
  1596. }
  1597. /**
  1598. * Destroy event queue
  1599. *
  1600. * @v arbel Arbel device
  1601. */
  1602. static void arbel_destroy_eq ( struct arbel *arbel ) {
  1603. struct arbel_event_queue *arbel_eq = &arbel->eq;
  1604. struct arbelprm_eqc eqctx;
  1605. struct arbelprm_event_mask mask;
  1606. int rc;
  1607. /* Unmap events from event queue */
  1608. memset ( &mask, 0, sizeof ( mask ) );
  1609. MLX_FILL_1 ( &mask, 1, port_state_change, 1 );
  1610. if ( ( rc = arbel_cmd_map_eq ( arbel,
  1611. ( ARBEL_UNMAP_EQ | arbel_eq->eqn ),
  1612. &mask ) ) != 0 ) {
  1613. DBGC ( arbel, "Arbel %p EQN %#lx FATAL MAP_EQ failed to "
  1614. "unmap: %s\n", arbel, arbel_eq->eqn, strerror ( rc ) );
  1615. /* Continue; HCA may die but system should survive */
  1616. }
  1617. /* Take ownership back from hardware */
  1618. if ( ( rc = arbel_cmd_hw2sw_eq ( arbel, arbel_eq->eqn,
  1619. &eqctx ) ) != 0 ) {
  1620. DBGC ( arbel, "Arbel %p EQN %#lx FATAL HW2SW_EQ failed: %s\n",
  1621. arbel, arbel_eq->eqn, strerror ( rc ) );
  1622. /* Leak memory and return; at least we avoid corruption */
  1623. return;
  1624. }
  1625. /* Free memory */
  1626. free_dma ( arbel_eq->eqe, arbel_eq->eqe_size );
  1627. memset ( arbel_eq, 0, sizeof ( *arbel_eq ) );
  1628. }
  1629. /**
  1630. * Handle port state event
  1631. *
  1632. * @v arbel Arbel device
  1633. * @v eqe Port state change event queue entry
  1634. */
  1635. static void arbel_event_port_state_change ( struct arbel *arbel,
  1636. union arbelprm_event_entry *eqe){
  1637. unsigned int port;
  1638. int link_up;
  1639. /* Get port and link status */
  1640. port = ( MLX_GET ( &eqe->port_state_change, data.p ) - 1 );
  1641. link_up = ( MLX_GET ( &eqe->generic, event_sub_type ) & 0x04 );
  1642. DBGC ( arbel, "Arbel %p port %d link %s\n", arbel, ( port + 1 ),
  1643. ( link_up ? "up" : "down" ) );
  1644. /* Sanity check */
  1645. if ( port >= ARBEL_NUM_PORTS ) {
  1646. DBGC ( arbel, "Arbel %p port %d does not exist!\n",
  1647. arbel, ( port + 1 ) );
  1648. return;
  1649. }
  1650. /* Update MAD parameters */
  1651. ib_smc_update ( arbel->ibdev[port], arbel_mad );
  1652. }
  1653. /**
  1654. * Poll event queue
  1655. *
  1656. * @v ibdev Infiniband device
  1657. */
  1658. static void arbel_poll_eq ( struct ib_device *ibdev ) {
  1659. struct arbel *arbel = ib_get_drvdata ( ibdev );
  1660. struct arbel_event_queue *arbel_eq = &arbel->eq;
  1661. union arbelprm_event_entry *eqe;
  1662. union arbelprm_eq_doorbell_register db_reg;
  1663. unsigned int eqe_idx_mask;
  1664. unsigned int event_type;
  1665. /* No event is generated upon reaching INIT, so we must poll
  1666. * separately for link state changes while we remain DOWN.
  1667. */
  1668. if ( ib_is_open ( ibdev ) &&
  1669. ( ibdev->port_state == IB_PORT_STATE_DOWN ) ) {
  1670. ib_smc_update ( ibdev, arbel_mad );
  1671. }
  1672. /* Poll event queue */
  1673. while ( 1 ) {
  1674. /* Look for event entry */
  1675. eqe_idx_mask = ( ARBEL_NUM_EQES - 1 );
  1676. eqe = &arbel_eq->eqe[arbel_eq->next_idx & eqe_idx_mask];
  1677. if ( MLX_GET ( &eqe->generic, owner ) != 0 ) {
  1678. /* Entry still owned by hardware; end of poll */
  1679. break;
  1680. }
  1681. DBGCP ( arbel, "Arbel %p EQN %#lx event:\n",
  1682. arbel, arbel_eq->eqn );
  1683. DBGCP_HDA ( arbel, virt_to_phys ( eqe ),
  1684. eqe, sizeof ( *eqe ) );
  1685. /* Handle event */
  1686. event_type = MLX_GET ( &eqe->generic, event_type );
  1687. switch ( event_type ) {
  1688. case ARBEL_EV_PORT_STATE_CHANGE:
  1689. arbel_event_port_state_change ( arbel, eqe );
  1690. break;
  1691. default:
  1692. DBGC ( arbel, "Arbel %p EQN %#lx unrecognised event "
  1693. "type %#x:\n",
  1694. arbel, arbel_eq->eqn, event_type );
  1695. DBGC_HDA ( arbel, virt_to_phys ( eqe ),
  1696. eqe, sizeof ( *eqe ) );
  1697. break;
  1698. }
  1699. /* Return ownership to hardware */
  1700. MLX_FILL_1 ( &eqe->generic, 7, owner, 1 );
  1701. barrier();
  1702. /* Update event queue's index */
  1703. arbel_eq->next_idx++;
  1704. /* Ring doorbell */
  1705. MLX_FILL_1 ( &db_reg.ci, 0, ci, arbel_eq->next_idx );
  1706. writel ( db_reg.dword[0], arbel_eq->doorbell );
  1707. }
  1708. }
  1709. /***************************************************************************
  1710. *
  1711. * Firmware control
  1712. *
  1713. ***************************************************************************
  1714. */
  1715. /**
  1716. * Map virtual to physical address for firmware usage
  1717. *
  1718. * @v arbel Arbel device
  1719. * @v map Mapping function
  1720. * @v va Virtual address
  1721. * @v pa Physical address
  1722. * @v len Length of region
  1723. * @ret rc Return status code
  1724. */
  1725. static int arbel_map_vpm ( struct arbel *arbel,
  1726. int ( *map ) ( struct arbel *arbel,
  1727. const struct arbelprm_virtual_physical_mapping* ),
  1728. uint64_t va, physaddr_t pa, size_t len ) {
  1729. struct arbelprm_virtual_physical_mapping mapping;
  1730. physaddr_t start;
  1731. physaddr_t low;
  1732. physaddr_t high;
  1733. physaddr_t end;
  1734. size_t size;
  1735. int rc;
  1736. /* Sanity checks */
  1737. assert ( ( va & ( ARBEL_PAGE_SIZE - 1 ) ) == 0 );
  1738. assert ( ( pa & ( ARBEL_PAGE_SIZE - 1 ) ) == 0 );
  1739. assert ( ( len & ( ARBEL_PAGE_SIZE - 1 ) ) == 0 );
  1740. /* Calculate starting points */
  1741. start = pa;
  1742. end = ( start + len );
  1743. size = ( 1UL << ( fls ( start ^ end ) - 1 ) );
  1744. low = high = ( end & ~( size - 1 ) );
  1745. assert ( start < low );
  1746. assert ( high <= end );
  1747. /* These mappings tend to generate huge volumes of
  1748. * uninteresting debug data, which basically makes it
  1749. * impossible to use debugging otherwise.
  1750. */
  1751. DBG_DISABLE ( DBGLVL_LOG | DBGLVL_EXTRA );
  1752. /* Map blocks in descending order of size */
  1753. while ( size >= ARBEL_PAGE_SIZE ) {
  1754. /* Find the next candidate block */
  1755. if ( ( low - size ) >= start ) {
  1756. low -= size;
  1757. pa = low;
  1758. } else if ( ( high + size ) <= end ) {
  1759. pa = high;
  1760. high += size;
  1761. } else {
  1762. size >>= 1;
  1763. continue;
  1764. }
  1765. assert ( ( va & ( size - 1 ) ) == 0 );
  1766. assert ( ( pa & ( size - 1 ) ) == 0 );
  1767. /* Map this block */
  1768. memset ( &mapping, 0, sizeof ( mapping ) );
  1769. MLX_FILL_1 ( &mapping, 0, va_h, ( va >> 32 ) );
  1770. MLX_FILL_1 ( &mapping, 1, va_l, ( va >> 12 ) );
  1771. MLX_FILL_H ( &mapping, 2, pa_h, pa );
  1772. MLX_FILL_2 ( &mapping, 3,
  1773. log2size, ( ( fls ( size ) - 1 ) - 12 ),
  1774. pa_l, ( pa >> 12 ) );
  1775. if ( ( rc = map ( arbel, &mapping ) ) != 0 ) {
  1776. DBG_ENABLE ( DBGLVL_LOG | DBGLVL_EXTRA );
  1777. DBGC ( arbel, "Arbel %p could not map %08llx+%zx to "
  1778. "%08lx: %s\n",
  1779. arbel, va, size, pa, strerror ( rc ) );
  1780. return rc;
  1781. }
  1782. va += size;
  1783. }
  1784. assert ( low == start );
  1785. assert ( high == end );
  1786. DBG_ENABLE ( DBGLVL_LOG | DBGLVL_EXTRA );
  1787. return 0;
  1788. }
  1789. /**
  1790. * Start firmware running
  1791. *
  1792. * @v arbel Arbel device
  1793. * @ret rc Return status code
  1794. */
  1795. static int arbel_start_firmware ( struct arbel *arbel ) {
  1796. struct arbelprm_query_fw fw;
  1797. struct arbelprm_access_lam lam;
  1798. unsigned int fw_pages;
  1799. size_t fw_len;
  1800. physaddr_t fw_base;
  1801. uint64_t eq_set_ci_base_addr;
  1802. int rc;
  1803. /* Get firmware parameters */
  1804. if ( ( rc = arbel_cmd_query_fw ( arbel, &fw ) ) != 0 ) {
  1805. DBGC ( arbel, "Arbel %p could not query firmware: %s\n",
  1806. arbel, strerror ( rc ) );
  1807. goto err_query_fw;
  1808. }
  1809. DBGC ( arbel, "Arbel %p firmware version %d.%d.%d\n", arbel,
  1810. MLX_GET ( &fw, fw_rev_major ), MLX_GET ( &fw, fw_rev_minor ),
  1811. MLX_GET ( &fw, fw_rev_subminor ) );
  1812. fw_pages = MLX_GET ( &fw, fw_pages );
  1813. DBGC ( arbel, "Arbel %p requires %d kB for firmware\n",
  1814. arbel, ( fw_pages * 4 ) );
  1815. eq_set_ci_base_addr =
  1816. ( ( (uint64_t) MLX_GET ( &fw, eq_set_ci_base_addr_h ) << 32 ) |
  1817. ( (uint64_t) MLX_GET ( &fw, eq_set_ci_base_addr_l ) ) );
  1818. arbel->eq_ci_doorbells = ioremap ( eq_set_ci_base_addr, 0x200 );
  1819. /* Enable locally-attached memory. Ignore failure; there may
  1820. * be no attached memory.
  1821. */
  1822. arbel_cmd_enable_lam ( arbel, &lam );
  1823. /* Allocate firmware pages and map firmware area */
  1824. fw_len = ( fw_pages * ARBEL_PAGE_SIZE );
  1825. if ( ! arbel->firmware_area ) {
  1826. arbel->firmware_len = fw_len;
  1827. arbel->firmware_area = umalloc ( arbel->firmware_len );
  1828. if ( ! arbel->firmware_area ) {
  1829. rc = -ENOMEM;
  1830. goto err_alloc_fa;
  1831. }
  1832. } else {
  1833. assert ( arbel->firmware_len == fw_len );
  1834. }
  1835. fw_base = user_to_phys ( arbel->firmware_area, 0 );
  1836. DBGC ( arbel, "Arbel %p firmware area at [%08lx,%08lx)\n",
  1837. arbel, fw_base, ( fw_base + fw_len ) );
  1838. if ( ( rc = arbel_map_vpm ( arbel, arbel_cmd_map_fa,
  1839. 0, fw_base, fw_len ) ) != 0 ) {
  1840. DBGC ( arbel, "Arbel %p could not map firmware: %s\n",
  1841. arbel, strerror ( rc ) );
  1842. goto err_map_fa;
  1843. }
  1844. /* Start firmware */
  1845. if ( ( rc = arbel_cmd_run_fw ( arbel ) ) != 0 ) {
  1846. DBGC ( arbel, "Arbel %p could not run firmware: %s\n",
  1847. arbel, strerror ( rc ) );
  1848. goto err_run_fw;
  1849. }
  1850. DBGC ( arbel, "Arbel %p firmware started\n", arbel );
  1851. return 0;
  1852. err_run_fw:
  1853. arbel_cmd_unmap_fa ( arbel );
  1854. err_map_fa:
  1855. err_alloc_fa:
  1856. err_query_fw:
  1857. return rc;
  1858. }
  1859. /**
  1860. * Stop firmware running
  1861. *
  1862. * @v arbel Arbel device
  1863. */
  1864. static void arbel_stop_firmware ( struct arbel *arbel ) {
  1865. int rc;
  1866. if ( ( rc = arbel_cmd_unmap_fa ( arbel ) ) != 0 ) {
  1867. DBGC ( arbel, "Arbel %p FATAL could not stop firmware: %s\n",
  1868. arbel, strerror ( rc ) );
  1869. /* Leak memory and return; at least we avoid corruption */
  1870. arbel->firmware_area = UNULL;
  1871. return;
  1872. }
  1873. }
  1874. /***************************************************************************
  1875. *
  1876. * Infinihost Context Memory management
  1877. *
  1878. ***************************************************************************
  1879. */
  1880. /**
  1881. * Get device limits
  1882. *
  1883. * @v arbel Arbel device
  1884. * @ret rc Return status code
  1885. */
  1886. static int arbel_get_limits ( struct arbel *arbel ) {
  1887. struct arbelprm_query_dev_lim dev_lim;
  1888. int rc;
  1889. if ( ( rc = arbel_cmd_query_dev_lim ( arbel, &dev_lim ) ) != 0 ) {
  1890. DBGC ( arbel, "Arbel %p could not get device limits: %s\n",
  1891. arbel, strerror ( rc ) );
  1892. return rc;
  1893. }
  1894. arbel->limits.reserved_qps =
  1895. ( 1 << MLX_GET ( &dev_lim, log2_rsvd_qps ) );
  1896. arbel->limits.qpc_entry_size = MLX_GET ( &dev_lim, qpc_entry_sz );
  1897. arbel->limits.eqpc_entry_size = MLX_GET ( &dev_lim, eqpc_entry_sz );
  1898. arbel->limits.reserved_srqs =
  1899. ( 1 << MLX_GET ( &dev_lim, log2_rsvd_srqs ) );
  1900. arbel->limits.srqc_entry_size = MLX_GET ( &dev_lim, srq_entry_sz );
  1901. arbel->limits.reserved_ees =
  1902. ( 1 << MLX_GET ( &dev_lim, log2_rsvd_ees ) );
  1903. arbel->limits.eec_entry_size = MLX_GET ( &dev_lim, eec_entry_sz );
  1904. arbel->limits.eeec_entry_size = MLX_GET ( &dev_lim, eeec_entry_sz );
  1905. arbel->limits.reserved_cqs =
  1906. ( 1 << MLX_GET ( &dev_lim, log2_rsvd_cqs ) );
  1907. arbel->limits.cqc_entry_size = MLX_GET ( &dev_lim, cqc_entry_sz );
  1908. arbel->limits.reserved_mtts =
  1909. ( 1 << MLX_GET ( &dev_lim, log2_rsvd_mtts ) );
  1910. arbel->limits.mtt_entry_size = MLX_GET ( &dev_lim, mtt_entry_sz );
  1911. arbel->limits.reserved_mrws =
  1912. ( 1 << MLX_GET ( &dev_lim, log2_rsvd_mrws ) );
  1913. arbel->limits.mpt_entry_size = MLX_GET ( &dev_lim, mpt_entry_sz );
  1914. arbel->limits.reserved_rdbs =
  1915. ( 1 << MLX_GET ( &dev_lim, log2_rsvd_rdbs ) );
  1916. arbel->limits.reserved_eqs = MLX_GET ( &dev_lim, num_rsvd_eqs );
  1917. arbel->limits.eqc_entry_size = MLX_GET ( &dev_lim, eqc_entry_sz );
  1918. arbel->limits.reserved_uars = MLX_GET ( &dev_lim, num_rsvd_uars );
  1919. arbel->limits.uar_scratch_entry_size =
  1920. MLX_GET ( &dev_lim, uar_scratch_entry_sz );
  1921. DBGC ( arbel, "Arbel %p reserves %d x %#zx QPC, %d x %#zx EQPC, "
  1922. "%d x %#zx SRQC\n", arbel,
  1923. arbel->limits.reserved_qps, arbel->limits.qpc_entry_size,
  1924. arbel->limits.reserved_qps, arbel->limits.eqpc_entry_size,
  1925. arbel->limits.reserved_srqs, arbel->limits.srqc_entry_size );
  1926. DBGC ( arbel, "Arbel %p reserves %d x %#zx EEC, %d x %#zx EEEC, "
  1927. "%d x %#zx CQC\n", arbel,
  1928. arbel->limits.reserved_ees, arbel->limits.eec_entry_size,
  1929. arbel->limits.reserved_ees, arbel->limits.eeec_entry_size,
  1930. arbel->limits.reserved_cqs, arbel->limits.cqc_entry_size );
  1931. DBGC ( arbel, "Arbel %p reserves %d x %#zx EQC, %d x %#zx MTT, "
  1932. "%d x %#zx MPT\n", arbel,
  1933. arbel->limits.reserved_eqs, arbel->limits.eqc_entry_size,
  1934. arbel->limits.reserved_mtts, arbel->limits.mtt_entry_size,
  1935. arbel->limits.reserved_mrws, arbel->limits.mpt_entry_size );
  1936. DBGC ( arbel, "Arbel %p reserves %d x %#zx RDB, %d x %#zx UAR, "
  1937. "%d x %#zx UAR scratchpad\n", arbel,
  1938. arbel->limits.reserved_rdbs, ARBEL_RDB_ENTRY_SIZE,
  1939. arbel->limits.reserved_uars, ARBEL_PAGE_SIZE,
  1940. arbel->limits.reserved_uars,
  1941. arbel->limits.uar_scratch_entry_size );
  1942. return 0;
  1943. }
  1944. /**
  1945. * Align ICM table
  1946. *
  1947. * @v icm_offset Current ICM offset
  1948. * @v len ICM table length
  1949. * @ret icm_offset ICM offset
  1950. */
  1951. static size_t icm_align ( size_t icm_offset, size_t len ) {
  1952. /* Round up to a multiple of the table size */
  1953. assert ( len == ( 1UL << ( fls ( len ) - 1 ) ) );
  1954. return ( ( icm_offset + len - 1 ) & ~( len - 1 ) );
  1955. }
  1956. /**
  1957. * Allocate ICM
  1958. *
  1959. * @v arbel Arbel device
  1960. * @v init_hca INIT_HCA structure to fill in
  1961. * @ret rc Return status code
  1962. */
  1963. static int arbel_alloc_icm ( struct arbel *arbel,
  1964. struct arbelprm_init_hca *init_hca ) {
  1965. struct arbelprm_scalar_parameter icm_size;
  1966. struct arbelprm_scalar_parameter icm_aux_size;
  1967. struct arbelprm_scalar_parameter unmap_icm;
  1968. union arbelprm_doorbell_record *db_rec;
  1969. size_t icm_offset = 0;
  1970. unsigned int log_num_uars, log_num_qps, log_num_srqs, log_num_ees;
  1971. unsigned int log_num_cqs, log_num_mtts, log_num_mpts, log_num_rdbs;
  1972. unsigned int log_num_eqs, log_num_mcs;
  1973. size_t icm_len, icm_aux_len;
  1974. size_t len;
  1975. physaddr_t icm_phys;
  1976. int rc;
  1977. /* Calculate number of each object type within ICM */
  1978. log_num_qps = fls ( arbel->limits.reserved_qps +
  1979. ARBEL_RSVD_SPECIAL_QPS + ARBEL_MAX_QPS - 1 );
  1980. log_num_srqs = fls ( arbel->limits.reserved_srqs - 1 );
  1981. log_num_ees = fls ( arbel->limits.reserved_ees - 1 );
  1982. log_num_cqs = fls ( arbel->limits.reserved_cqs + ARBEL_MAX_CQS - 1 );
  1983. log_num_eqs = fls ( arbel->limits.reserved_eqs + ARBEL_MAX_EQS - 1 );
  1984. log_num_mtts = fls ( arbel->limits.reserved_mtts - 1 );
  1985. log_num_mpts = fls ( arbel->limits.reserved_mrws + 1 - 1 );
  1986. log_num_rdbs = fls ( arbel->limits.reserved_rdbs +
  1987. ARBEL_RSVD_SPECIAL_QPS + ARBEL_MAX_QPS - 1 );
  1988. log_num_uars = fls ( arbel->limits.reserved_uars +
  1989. 1 /* single UAR used */ - 1 );
  1990. log_num_mcs = ARBEL_LOG_MULTICAST_HASH_SIZE;
  1991. /* Queue pair contexts */
  1992. len = ( ( 1 << log_num_qps ) * arbel->limits.qpc_entry_size );
  1993. icm_offset = icm_align ( icm_offset, len );
  1994. MLX_FILL_2 ( init_hca, 13,
  1995. qpc_eec_cqc_eqc_rdb_parameters.qpc_base_addr_l,
  1996. ( icm_offset >> 7 ),
  1997. qpc_eec_cqc_eqc_rdb_parameters.log_num_of_qp,
  1998. log_num_qps );
  1999. DBGC ( arbel, "Arbel %p ICM QPC is %d x %#zx at [%zx,%zx)\n",
  2000. arbel, ( 1 << log_num_qps ), arbel->limits.qpc_entry_size,
  2001. icm_offset, ( icm_offset + len ) );
  2002. icm_offset += len;
  2003. /* Extended queue pair contexts */
  2004. len = ( ( 1 << log_num_qps ) * arbel->limits.eqpc_entry_size );
  2005. icm_offset = icm_align ( icm_offset, len );
  2006. MLX_FILL_1 ( init_hca, 25,
  2007. qpc_eec_cqc_eqc_rdb_parameters.eqpc_base_addr_l,
  2008. icm_offset );
  2009. DBGC ( arbel, "Arbel %p ICM EQPC is %d x %#zx at [%zx,%zx)\n",
  2010. arbel, ( 1 << log_num_qps ), arbel->limits.eqpc_entry_size,
  2011. icm_offset, ( icm_offset + len ) );
  2012. icm_offset += len;
  2013. /* Completion queue contexts */
  2014. len = ( ( 1 << log_num_cqs ) * arbel->limits.cqc_entry_size );
  2015. icm_offset = icm_align ( icm_offset, len );
  2016. MLX_FILL_2 ( init_hca, 21,
  2017. qpc_eec_cqc_eqc_rdb_parameters.cqc_base_addr_l,
  2018. ( icm_offset >> 6 ),
  2019. qpc_eec_cqc_eqc_rdb_parameters.log_num_of_cq,
  2020. log_num_cqs );
  2021. DBGC ( arbel, "Arbel %p ICM CQC is %d x %#zx at [%zx,%zx)\n",
  2022. arbel, ( 1 << log_num_cqs ), arbel->limits.cqc_entry_size,
  2023. icm_offset, ( icm_offset + len ) );
  2024. icm_offset += len;
  2025. /* Event queue contexts */
  2026. len = ( ( 1 << log_num_eqs ) * arbel->limits.eqc_entry_size );
  2027. icm_offset = icm_align ( icm_offset, len );
  2028. MLX_FILL_2 ( init_hca, 33,
  2029. qpc_eec_cqc_eqc_rdb_parameters.eqc_base_addr_l,
  2030. ( icm_offset >> 6 ),
  2031. qpc_eec_cqc_eqc_rdb_parameters.log_num_eq,
  2032. log_num_eqs );
  2033. DBGC ( arbel, "Arbel %p ICM EQC is %d x %#zx at [%zx,%zx)\n",
  2034. arbel, ( 1 << log_num_eqs ), arbel->limits.eqc_entry_size,
  2035. icm_offset, ( icm_offset + len ) );
  2036. icm_offset += len;
  2037. /* End-to-end contexts */
  2038. len = ( ( 1 << log_num_ees ) * arbel->limits.eec_entry_size );
  2039. icm_offset = icm_align ( icm_offset, len );
  2040. MLX_FILL_2 ( init_hca, 17,
  2041. qpc_eec_cqc_eqc_rdb_parameters.eec_base_addr_l,
  2042. ( icm_offset >> 7 ),
  2043. qpc_eec_cqc_eqc_rdb_parameters.log_num_of_ee,
  2044. log_num_ees );
  2045. DBGC ( arbel, "Arbel %p ICM EEC is %d x %#zx at [%zx,%zx)\n",
  2046. arbel, ( 1 << log_num_ees ), arbel->limits.eec_entry_size,
  2047. icm_offset, ( icm_offset + len ) );
  2048. icm_offset += len;
  2049. /* Shared receive queue contexts */
  2050. len = ( ( 1 << log_num_srqs ) * arbel->limits.srqc_entry_size );
  2051. icm_offset = icm_align ( icm_offset, len );
  2052. MLX_FILL_2 ( init_hca, 19,
  2053. qpc_eec_cqc_eqc_rdb_parameters.srqc_base_addr_l,
  2054. ( icm_offset >> 5 ),
  2055. qpc_eec_cqc_eqc_rdb_parameters.log_num_of_srq,
  2056. log_num_srqs );
  2057. DBGC ( arbel, "Arbel %p ICM SRQC is %d x %#zx at [%zx,%zx)\n",
  2058. arbel, ( 1 << log_num_srqs ), arbel->limits.srqc_entry_size,
  2059. icm_offset, ( icm_offset + len ) );
  2060. icm_offset += len;
  2061. /* Memory protection table */
  2062. len = ( ( 1 << log_num_mpts ) * arbel->limits.mpt_entry_size );
  2063. icm_offset = icm_align ( icm_offset, len );
  2064. MLX_FILL_1 ( init_hca, 61,
  2065. tpt_parameters.mpt_base_adr_l, icm_offset );
  2066. MLX_FILL_1 ( init_hca, 62,
  2067. tpt_parameters.log_mpt_sz, log_num_mpts );
  2068. DBGC ( arbel, "Arbel %p ICM MPT is %d x %#zx at [%zx,%zx)\n",
  2069. arbel, ( 1 << log_num_mpts ), arbel->limits.mpt_entry_size,
  2070. icm_offset, ( icm_offset + len ) );
  2071. icm_offset += len;
  2072. /* Remote read data base table */
  2073. len = ( ( 1 << log_num_rdbs ) * ARBEL_RDB_ENTRY_SIZE );
  2074. icm_offset = icm_align ( icm_offset, len );
  2075. MLX_FILL_1 ( init_hca, 37,
  2076. qpc_eec_cqc_eqc_rdb_parameters.rdb_base_addr_l,
  2077. icm_offset );
  2078. DBGC ( arbel, "Arbel %p ICM RDB is %d x %#zx at [%zx,%zx)\n",
  2079. arbel, ( 1 << log_num_rdbs ), ARBEL_RDB_ENTRY_SIZE,
  2080. icm_offset, ( icm_offset + len ) );
  2081. icm_offset += len;
  2082. /* Extended end-to-end contexts */
  2083. len = ( ( 1 << log_num_ees ) * arbel->limits.eeec_entry_size );
  2084. icm_offset = icm_align ( icm_offset, len );
  2085. MLX_FILL_1 ( init_hca, 29,
  2086. qpc_eec_cqc_eqc_rdb_parameters.eeec_base_addr_l,
  2087. icm_offset );
  2088. DBGC ( arbel, "Arbel %p ICM EEEC is %d x %#zx at [%zx,%zx)\n",
  2089. arbel, ( 1 << log_num_ees ), arbel->limits.eeec_entry_size,
  2090. icm_offset, ( icm_offset + len ) );
  2091. icm_offset += len;
  2092. /* Multicast table */
  2093. len = ( ( 1 << log_num_mcs ) * sizeof ( struct arbelprm_mgm_entry ) );
  2094. icm_offset = icm_align ( icm_offset, len );
  2095. MLX_FILL_1 ( init_hca, 49,
  2096. multicast_parameters.mc_base_addr_l, icm_offset );
  2097. MLX_FILL_1 ( init_hca, 52,
  2098. multicast_parameters.log_mc_table_entry_sz,
  2099. fls ( sizeof ( struct arbelprm_mgm_entry ) - 1 ) );
  2100. MLX_FILL_1 ( init_hca, 53,
  2101. multicast_parameters.mc_table_hash_sz,
  2102. ( 1 << log_num_mcs ) );
  2103. MLX_FILL_1 ( init_hca, 54,
  2104. multicast_parameters.log_mc_table_sz,
  2105. log_num_mcs /* Only one entry per hash */ );
  2106. DBGC ( arbel, "Arbel %p ICM MC is %d x %#zx at [%zx,%zx)\n", arbel,
  2107. ( 1 << log_num_mcs ), sizeof ( struct arbelprm_mgm_entry ),
  2108. icm_offset, ( icm_offset + len ) );
  2109. icm_offset += len;
  2110. /* Memory translation table */
  2111. len = ( ( 1 << log_num_mtts ) * arbel->limits.mtt_entry_size );
  2112. icm_offset = icm_align ( icm_offset, len );
  2113. MLX_FILL_1 ( init_hca, 65,
  2114. tpt_parameters.mtt_base_addr_l, icm_offset );
  2115. DBGC ( arbel, "Arbel %p ICM MTT is %d x %#zx at [%zx,%zx)\n",
  2116. arbel, ( 1 << log_num_mtts ), arbel->limits.mtt_entry_size,
  2117. icm_offset, ( icm_offset + len ) );
  2118. icm_offset += len;
  2119. /* User access region scratchpads */
  2120. len = ( ( 1 << log_num_uars ) * arbel->limits.uar_scratch_entry_size );
  2121. icm_offset = icm_align ( icm_offset, len );
  2122. MLX_FILL_1 ( init_hca, 77,
  2123. uar_parameters.uar_scratch_base_addr_l, icm_offset );
  2124. DBGC ( arbel, "Arbel %p UAR scratchpad is %d x %#zx at [%zx,%zx)\n",
  2125. arbel, ( 1 << log_num_uars ),
  2126. arbel->limits.uar_scratch_entry_size,
  2127. icm_offset, ( icm_offset + len ) );
  2128. icm_offset += len;
  2129. /* Record amount of ICM to be allocated */
  2130. icm_offset = icm_align ( icm_offset, ARBEL_PAGE_SIZE );
  2131. icm_len = icm_offset;
  2132. /* User access region contexts
  2133. *
  2134. * The reserved UAR(s) do not need to be backed by physical
  2135. * memory, and our UAR is allocated separately; neither are
  2136. * part of the umalloc()ed ICM block, but both contribute to
  2137. * the total length of ICM virtual address space.
  2138. */
  2139. len = ( ( 1 << log_num_uars ) * ARBEL_PAGE_SIZE );
  2140. icm_offset = icm_align ( icm_offset, len );
  2141. MLX_FILL_1 ( init_hca, 74, uar_parameters.log_max_uars, log_num_uars );
  2142. MLX_FILL_1 ( init_hca, 79,
  2143. uar_parameters.uar_context_base_addr_l, icm_offset );
  2144. arbel->db_rec_offset =
  2145. ( icm_offset +
  2146. ( arbel->limits.reserved_uars * ARBEL_PAGE_SIZE ) );
  2147. DBGC ( arbel, "Arbel %p UAR is %d x %#zx at [%zx,%zx), doorbells "
  2148. "[%zx,%zx)\n", arbel, ( 1 << log_num_uars ), ARBEL_PAGE_SIZE,
  2149. icm_offset, ( icm_offset + len ), arbel->db_rec_offset,
  2150. ( arbel->db_rec_offset + ARBEL_PAGE_SIZE ) );
  2151. icm_offset += len;
  2152. /* Get ICM auxiliary area size */
  2153. memset ( &icm_size, 0, sizeof ( icm_size ) );
  2154. MLX_FILL_1 ( &icm_size, 1, value, icm_len );
  2155. if ( ( rc = arbel_cmd_set_icm_size ( arbel, &icm_size,
  2156. &icm_aux_size ) ) != 0 ) {
  2157. DBGC ( arbel, "Arbel %p could not set ICM size: %s\n",
  2158. arbel, strerror ( rc ) );
  2159. goto err_set_icm_size;
  2160. }
  2161. icm_aux_len = ( MLX_GET ( &icm_aux_size, value ) * ARBEL_PAGE_SIZE );
  2162. /* Allocate ICM data and auxiliary area */
  2163. DBGC ( arbel, "Arbel %p requires %zd kB ICM and %zd kB AUX ICM\n",
  2164. arbel, ( icm_len / 1024 ), ( icm_aux_len / 1024 ) );
  2165. if ( ! arbel->icm ) {
  2166. arbel->icm_len = icm_len;
  2167. arbel->icm_aux_len = icm_aux_len;
  2168. arbel->icm = umalloc ( arbel->icm_len + arbel->icm_aux_len );
  2169. if ( ! arbel->icm ) {
  2170. rc = -ENOMEM;
  2171. goto err_alloc_icm;
  2172. }
  2173. } else {
  2174. assert ( arbel->icm_len == icm_len );
  2175. assert ( arbel->icm_aux_len == icm_aux_len );
  2176. }
  2177. icm_phys = user_to_phys ( arbel->icm, 0 );
  2178. /* Allocate doorbell UAR */
  2179. arbel->db_rec = malloc_dma ( ARBEL_PAGE_SIZE, ARBEL_PAGE_SIZE );
  2180. if ( ! arbel->db_rec ) {
  2181. rc = -ENOMEM;
  2182. goto err_alloc_doorbell;
  2183. }
  2184. /* Map ICM auxiliary area */
  2185. DBGC ( arbel, "Arbel %p ICM AUX at [%08lx,%08lx)\n",
  2186. arbel, icm_phys, ( icm_phys + arbel->icm_aux_len ) );
  2187. if ( ( rc = arbel_map_vpm ( arbel, arbel_cmd_map_icm_aux,
  2188. 0, icm_phys, arbel->icm_aux_len ) ) != 0 ){
  2189. DBGC ( arbel, "Arbel %p could not map AUX ICM: %s\n",
  2190. arbel, strerror ( rc ) );
  2191. goto err_map_icm_aux;
  2192. }
  2193. icm_phys += arbel->icm_aux_len;
  2194. /* Map ICM area */
  2195. DBGC ( arbel, "Arbel %p ICM at [%08lx,%08lx)\n",
  2196. arbel, icm_phys, ( icm_phys + arbel->icm_len ) );
  2197. if ( ( rc = arbel_map_vpm ( arbel, arbel_cmd_map_icm,
  2198. 0, icm_phys, arbel->icm_len ) ) != 0 ) {
  2199. DBGC ( arbel, "Arbel %p could not map ICM: %s\n",
  2200. arbel, strerror ( rc ) );
  2201. goto err_map_icm;
  2202. }
  2203. icm_phys += arbel->icm_len;
  2204. /* Map doorbell UAR */
  2205. DBGC ( arbel, "Arbel %p UAR at [%08lx,%08lx)\n",
  2206. arbel, virt_to_phys ( arbel->db_rec ),
  2207. ( virt_to_phys ( arbel->db_rec ) + ARBEL_PAGE_SIZE ) );
  2208. if ( ( rc = arbel_map_vpm ( arbel, arbel_cmd_map_icm,
  2209. arbel->db_rec_offset,
  2210. virt_to_phys ( arbel->db_rec ),
  2211. ARBEL_PAGE_SIZE ) ) != 0 ) {
  2212. DBGC ( arbel, "Arbel %p could not map doorbell UAR: %s\n",
  2213. arbel, strerror ( rc ) );
  2214. goto err_map_doorbell;
  2215. }
  2216. /* Initialise doorbell records */
  2217. memset ( arbel->db_rec, 0, ARBEL_PAGE_SIZE );
  2218. db_rec = &arbel->db_rec[ARBEL_GROUP_SEPARATOR_DOORBELL];
  2219. MLX_FILL_1 ( &db_rec->qp, 1, res, ARBEL_UAR_RES_GROUP_SEP );
  2220. return 0;
  2221. memset ( &unmap_icm, 0, sizeof ( unmap_icm ) );
  2222. MLX_FILL_1 ( &unmap_icm, 1, value, arbel->db_rec_offset );
  2223. arbel_cmd_unmap_icm ( arbel, 1, &unmap_icm );
  2224. err_map_doorbell:
  2225. memset ( &unmap_icm, 0, sizeof ( unmap_icm ) );
  2226. arbel_cmd_unmap_icm ( arbel, ( arbel->icm_len / ARBEL_PAGE_SIZE ),
  2227. &unmap_icm );
  2228. err_map_icm:
  2229. arbel_cmd_unmap_icm_aux ( arbel );
  2230. err_map_icm_aux:
  2231. free_dma ( arbel->db_rec, ARBEL_PAGE_SIZE );
  2232. arbel->db_rec= NULL;
  2233. err_alloc_doorbell:
  2234. err_alloc_icm:
  2235. err_set_icm_size:
  2236. return rc;
  2237. }
  2238. /**
  2239. * Free ICM
  2240. *
  2241. * @v arbel Arbel device
  2242. */
  2243. static void arbel_free_icm ( struct arbel *arbel ) {
  2244. struct arbelprm_scalar_parameter unmap_icm;
  2245. memset ( &unmap_icm, 0, sizeof ( unmap_icm ) );
  2246. MLX_FILL_1 ( &unmap_icm, 1, value, arbel->db_rec_offset );
  2247. arbel_cmd_unmap_icm ( arbel, 1, &unmap_icm );
  2248. memset ( &unmap_icm, 0, sizeof ( unmap_icm ) );
  2249. arbel_cmd_unmap_icm ( arbel, ( arbel->icm_len / ARBEL_PAGE_SIZE ),
  2250. &unmap_icm );
  2251. arbel_cmd_unmap_icm_aux ( arbel );
  2252. free_dma ( arbel->db_rec, ARBEL_PAGE_SIZE );
  2253. arbel->db_rec = NULL;
  2254. }
  2255. /***************************************************************************
  2256. *
  2257. * Initialisation and teardown
  2258. *
  2259. ***************************************************************************
  2260. */
  2261. /**
  2262. * Reset device
  2263. *
  2264. * @v arbel Arbel device
  2265. */
  2266. static void arbel_reset ( struct arbel *arbel ) {
  2267. struct pci_device *pci = arbel->pci;
  2268. struct pci_config_backup backup;
  2269. static const uint8_t backup_exclude[] =
  2270. PCI_CONFIG_BACKUP_EXCLUDE ( 0x58, 0x5c );
  2271. uint16_t vendor;
  2272. unsigned int i;
  2273. /* Perform device reset and preserve PCI configuration */
  2274. pci_backup ( pci, &backup, backup_exclude );
  2275. writel ( ARBEL_RESET_MAGIC,
  2276. ( arbel->config + ARBEL_RESET_OFFSET ) );
  2277. for ( i = 0 ; i < ARBEL_RESET_WAIT_TIME_MS ; i++ ) {
  2278. mdelay ( 1 );
  2279. pci_read_config_word ( pci, PCI_VENDOR_ID, &vendor );
  2280. if ( vendor != 0xffff )
  2281. break;
  2282. }
  2283. pci_restore ( pci, &backup, backup_exclude );
  2284. }
  2285. /**
  2286. * Set up memory protection table
  2287. *
  2288. * @v arbel Arbel device
  2289. * @ret rc Return status code
  2290. */
  2291. static int arbel_setup_mpt ( struct arbel *arbel ) {
  2292. struct arbelprm_mpt mpt;
  2293. uint32_t key;
  2294. int rc;
  2295. /* Derive key */
  2296. key = ( arbel->limits.reserved_mrws | ARBEL_MKEY_PREFIX );
  2297. arbel->lkey = ( ( key << 8 ) | ( key >> 24 ) );
  2298. /* Initialise memory protection table */
  2299. memset ( &mpt, 0, sizeof ( mpt ) );
  2300. MLX_FILL_7 ( &mpt, 0,
  2301. a, 1,
  2302. rw, 1,
  2303. rr, 1,
  2304. lw, 1,
  2305. lr, 1,
  2306. pa, 1,
  2307. r_w, 1 );
  2308. MLX_FILL_1 ( &mpt, 2, mem_key, key );
  2309. MLX_FILL_2 ( &mpt, 3,
  2310. pd, ARBEL_GLOBAL_PD,
  2311. rae, 1 );
  2312. MLX_FILL_1 ( &mpt, 6, reg_wnd_len_h, 0xffffffffUL );
  2313. MLX_FILL_1 ( &mpt, 7, reg_wnd_len_l, 0xffffffffUL );
  2314. if ( ( rc = arbel_cmd_sw2hw_mpt ( arbel, arbel->limits.reserved_mrws,
  2315. &mpt ) ) != 0 ) {
  2316. DBGC ( arbel, "Arbel %p could not set up MPT: %s\n",
  2317. arbel, strerror ( rc ) );
  2318. return rc;
  2319. }
  2320. return 0;
  2321. }
  2322. /**
  2323. * Configure special queue pairs
  2324. *
  2325. * @v arbel Arbel device
  2326. * @ret rc Return status code
  2327. */
  2328. static int arbel_configure_special_qps ( struct arbel *arbel ) {
  2329. unsigned int smi_qpn_base;
  2330. unsigned int gsi_qpn_base;
  2331. int rc;
  2332. /* Special QP block must be aligned on an even number */
  2333. arbel->special_qpn_base = ( ( arbel->limits.reserved_qps + 1 ) & ~1 );
  2334. arbel->qpn_base = ( arbel->special_qpn_base +
  2335. ARBEL_NUM_SPECIAL_QPS );
  2336. DBGC ( arbel, "Arbel %p special QPs at [%lx,%lx]\n", arbel,
  2337. arbel->special_qpn_base, ( arbel->qpn_base - 1 ) );
  2338. smi_qpn_base = arbel->special_qpn_base;
  2339. gsi_qpn_base = ( smi_qpn_base + 2 );
  2340. /* Issue commands to configure special QPs */
  2341. if ( ( rc = arbel_cmd_conf_special_qp ( arbel, 0,
  2342. smi_qpn_base ) ) != 0 ) {
  2343. DBGC ( arbel, "Arbel %p could not configure SMI QPs: %s\n",
  2344. arbel, strerror ( rc ) );
  2345. return rc;
  2346. }
  2347. if ( ( rc = arbel_cmd_conf_special_qp ( arbel, 1,
  2348. gsi_qpn_base ) ) != 0 ) {
  2349. DBGC ( arbel, "Arbel %p could not configure GSI QPs: %s\n",
  2350. arbel, strerror ( rc ) );
  2351. return rc;
  2352. }
  2353. return 0;
  2354. }
  2355. /**
  2356. * Start Arbel device
  2357. *
  2358. * @v arbel Arbel device
  2359. * @v running Firmware is already running
  2360. * @ret rc Return status code
  2361. */
  2362. static int arbel_start ( struct arbel *arbel, int running ) {
  2363. struct arbelprm_init_hca init_hca;
  2364. unsigned int i;
  2365. int rc;
  2366. /* Start firmware if not already running */
  2367. if ( ! running ) {
  2368. if ( ( rc = arbel_start_firmware ( arbel ) ) != 0 )
  2369. goto err_start_firmware;
  2370. }
  2371. /* Allocate ICM */
  2372. memset ( &init_hca, 0, sizeof ( init_hca ) );
  2373. if ( ( rc = arbel_alloc_icm ( arbel, &init_hca ) ) != 0 )
  2374. goto err_alloc_icm;
  2375. /* Initialise HCA */
  2376. if ( ( rc = arbel_cmd_init_hca ( arbel, &init_hca ) ) != 0 ) {
  2377. DBGC ( arbel, "Arbel %p could not initialise HCA: %s\n",
  2378. arbel, strerror ( rc ) );
  2379. goto err_init_hca;
  2380. }
  2381. /* Set up memory protection */
  2382. if ( ( rc = arbel_setup_mpt ( arbel ) ) != 0 )
  2383. goto err_setup_mpt;
  2384. for ( i = 0 ; i < ARBEL_NUM_PORTS ; i++ )
  2385. arbel->ibdev[i]->rdma_key = arbel->lkey;
  2386. /* Set up event queue */
  2387. if ( ( rc = arbel_create_eq ( arbel ) ) != 0 )
  2388. goto err_create_eq;
  2389. /* Configure special QPs */
  2390. if ( ( rc = arbel_configure_special_qps ( arbel ) ) != 0 )
  2391. goto err_conf_special_qps;
  2392. return 0;
  2393. err_conf_special_qps:
  2394. arbel_destroy_eq ( arbel );
  2395. err_create_eq:
  2396. err_setup_mpt:
  2397. arbel_cmd_close_hca ( arbel );
  2398. err_init_hca:
  2399. arbel_free_icm ( arbel );
  2400. err_alloc_icm:
  2401. arbel_stop_firmware ( arbel );
  2402. err_start_firmware:
  2403. return rc;
  2404. }
  2405. /**
  2406. * Stop Arbel device
  2407. *
  2408. * @v arbel Arbel device
  2409. */
  2410. static void arbel_stop ( struct arbel *arbel ) {
  2411. arbel_destroy_eq ( arbel );
  2412. arbel_cmd_close_hca ( arbel );
  2413. arbel_free_icm ( arbel );
  2414. arbel_stop_firmware ( arbel );
  2415. arbel_reset ( arbel );
  2416. }
  2417. /**
  2418. * Open Arbel device
  2419. *
  2420. * @v arbel Arbel device
  2421. * @ret rc Return status code
  2422. */
  2423. static int arbel_open ( struct arbel *arbel ) {
  2424. int rc;
  2425. /* Start device if applicable */
  2426. if ( arbel->open_count == 0 ) {
  2427. if ( ( rc = arbel_start ( arbel, 0 ) ) != 0 )
  2428. return rc;
  2429. }
  2430. /* Increment open counter */
  2431. arbel->open_count++;
  2432. return 0;
  2433. }
  2434. /**
  2435. * Close Arbel device
  2436. *
  2437. * @v arbel Arbel device
  2438. */
  2439. static void arbel_close ( struct arbel *arbel ) {
  2440. /* Decrement open counter */
  2441. assert ( arbel->open_count != 0 );
  2442. arbel->open_count--;
  2443. /* Stop device if applicable */
  2444. if ( arbel->open_count == 0 )
  2445. arbel_stop ( arbel );
  2446. }
  2447. /***************************************************************************
  2448. *
  2449. * Infiniband link-layer operations
  2450. *
  2451. ***************************************************************************
  2452. */
  2453. /**
  2454. * Initialise Infiniband link
  2455. *
  2456. * @v ibdev Infiniband device
  2457. * @ret rc Return status code
  2458. */
  2459. static int arbel_ib_open ( struct ib_device *ibdev ) {
  2460. struct arbel *arbel = ib_get_drvdata ( ibdev );
  2461. struct arbelprm_init_ib init_ib;
  2462. int rc;
  2463. /* Open hardware */
  2464. if ( ( rc = arbel_open ( arbel ) ) != 0 )
  2465. goto err_open;
  2466. /* Initialise IB */
  2467. memset ( &init_ib, 0, sizeof ( init_ib ) );
  2468. MLX_FILL_3 ( &init_ib, 0,
  2469. mtu_cap, ARBEL_MTU_2048,
  2470. port_width_cap, 3,
  2471. vl_cap, 1 );
  2472. MLX_FILL_1 ( &init_ib, 1, max_gid, 1 );
  2473. MLX_FILL_1 ( &init_ib, 2, max_pkey, 64 );
  2474. if ( ( rc = arbel_cmd_init_ib ( arbel, ibdev->port,
  2475. &init_ib ) ) != 0 ) {
  2476. DBGC ( arbel, "Arbel %p port %d could not intialise IB: %s\n",
  2477. arbel, ibdev->port, strerror ( rc ) );
  2478. goto err_init_ib;
  2479. }
  2480. /* Update MAD parameters */
  2481. ib_smc_update ( ibdev, arbel_mad );
  2482. return 0;
  2483. err_init_ib:
  2484. arbel_close ( arbel );
  2485. err_open:
  2486. return rc;
  2487. }
  2488. /**
  2489. * Close Infiniband link
  2490. *
  2491. * @v ibdev Infiniband device
  2492. */
  2493. static void arbel_ib_close ( struct ib_device *ibdev ) {
  2494. struct arbel *arbel = ib_get_drvdata ( ibdev );
  2495. int rc;
  2496. /* Close IB */
  2497. if ( ( rc = arbel_cmd_close_ib ( arbel, ibdev->port ) ) != 0 ) {
  2498. DBGC ( arbel, "Arbel %p port %d could not close IB: %s\n",
  2499. arbel, ibdev->port, strerror ( rc ) );
  2500. /* Nothing we can do about this */
  2501. }
  2502. /* Close hardware */
  2503. arbel_close ( arbel );
  2504. }
  2505. /**
  2506. * Inform embedded subnet management agent of a received MAD
  2507. *
  2508. * @v ibdev Infiniband device
  2509. * @v mad MAD
  2510. * @ret rc Return status code
  2511. */
  2512. static int arbel_inform_sma ( struct ib_device *ibdev, union ib_mad *mad ) {
  2513. int rc;
  2514. /* Send the MAD to the embedded SMA */
  2515. if ( ( rc = arbel_mad ( ibdev, mad ) ) != 0 )
  2516. return rc;
  2517. /* Update parameters held in software */
  2518. ib_smc_update ( ibdev, arbel_mad );
  2519. return 0;
  2520. }
  2521. /***************************************************************************
  2522. *
  2523. * Multicast group operations
  2524. *
  2525. ***************************************************************************
  2526. */
  2527. /**
  2528. * Attach to multicast group
  2529. *
  2530. * @v ibdev Infiniband device
  2531. * @v qp Queue pair
  2532. * @v gid Multicast GID
  2533. * @ret rc Return status code
  2534. */
  2535. static int arbel_mcast_attach ( struct ib_device *ibdev,
  2536. struct ib_queue_pair *qp,
  2537. union ib_gid *gid ) {
  2538. struct arbel *arbel = ib_get_drvdata ( ibdev );
  2539. struct arbelprm_mgm_hash hash;
  2540. struct arbelprm_mgm_entry mgm;
  2541. unsigned int index;
  2542. int rc;
  2543. /* Generate hash table index */
  2544. if ( ( rc = arbel_cmd_mgid_hash ( arbel, gid, &hash ) ) != 0 ) {
  2545. DBGC ( arbel, "Arbel %p could not hash GID: %s\n",
  2546. arbel, strerror ( rc ) );
  2547. return rc;
  2548. }
  2549. index = MLX_GET ( &hash, hash );
  2550. /* Check for existing hash table entry */
  2551. if ( ( rc = arbel_cmd_read_mgm ( arbel, index, &mgm ) ) != 0 ) {
  2552. DBGC ( arbel, "Arbel %p could not read MGM %#x: %s\n",
  2553. arbel, index, strerror ( rc ) );
  2554. return rc;
  2555. }
  2556. if ( MLX_GET ( &mgm, mgmqp_0.qi ) != 0 ) {
  2557. /* FIXME: this implementation allows only a single QP
  2558. * per multicast group, and doesn't handle hash
  2559. * collisions. Sufficient for IPoIB but may need to
  2560. * be extended in future.
  2561. */
  2562. DBGC ( arbel, "Arbel %p MGID index %#x already in use\n",
  2563. arbel, index );
  2564. return -EBUSY;
  2565. }
  2566. /* Update hash table entry */
  2567. MLX_FILL_2 ( &mgm, 8,
  2568. mgmqp_0.qpn_i, qp->qpn,
  2569. mgmqp_0.qi, 1 );
  2570. memcpy ( &mgm.u.dwords[4], gid, sizeof ( *gid ) );
  2571. if ( ( rc = arbel_cmd_write_mgm ( arbel, index, &mgm ) ) != 0 ) {
  2572. DBGC ( arbel, "Arbel %p could not write MGM %#x: %s\n",
  2573. arbel, index, strerror ( rc ) );
  2574. return rc;
  2575. }
  2576. return 0;
  2577. }
  2578. /**
  2579. * Detach from multicast group
  2580. *
  2581. * @v ibdev Infiniband device
  2582. * @v qp Queue pair
  2583. * @v gid Multicast GID
  2584. */
  2585. static void arbel_mcast_detach ( struct ib_device *ibdev,
  2586. struct ib_queue_pair *qp __unused,
  2587. union ib_gid *gid ) {
  2588. struct arbel *arbel = ib_get_drvdata ( ibdev );
  2589. struct arbelprm_mgm_hash hash;
  2590. struct arbelprm_mgm_entry mgm;
  2591. unsigned int index;
  2592. int rc;
  2593. /* Generate hash table index */
  2594. if ( ( rc = arbel_cmd_mgid_hash ( arbel, gid, &hash ) ) != 0 ) {
  2595. DBGC ( arbel, "Arbel %p could not hash GID: %s\n",
  2596. arbel, strerror ( rc ) );
  2597. return;
  2598. }
  2599. index = MLX_GET ( &hash, hash );
  2600. /* Clear hash table entry */
  2601. memset ( &mgm, 0, sizeof ( mgm ) );
  2602. if ( ( rc = arbel_cmd_write_mgm ( arbel, index, &mgm ) ) != 0 ) {
  2603. DBGC ( arbel, "Arbel %p could not write MGM %#x: %s\n",
  2604. arbel, index, strerror ( rc ) );
  2605. return;
  2606. }
  2607. }
  2608. /** Arbel Infiniband operations */
  2609. static struct ib_device_operations arbel_ib_operations = {
  2610. .create_cq = arbel_create_cq,
  2611. .destroy_cq = arbel_destroy_cq,
  2612. .create_qp = arbel_create_qp,
  2613. .modify_qp = arbel_modify_qp,
  2614. .destroy_qp = arbel_destroy_qp,
  2615. .post_send = arbel_post_send,
  2616. .post_recv = arbel_post_recv,
  2617. .poll_cq = arbel_poll_cq,
  2618. .poll_eq = arbel_poll_eq,
  2619. .open = arbel_ib_open,
  2620. .close = arbel_ib_close,
  2621. .mcast_attach = arbel_mcast_attach,
  2622. .mcast_detach = arbel_mcast_detach,
  2623. .set_port_info = arbel_inform_sma,
  2624. .set_pkey_table = arbel_inform_sma,
  2625. };
  2626. /***************************************************************************
  2627. *
  2628. * PCI interface
  2629. *
  2630. ***************************************************************************
  2631. */
  2632. /**
  2633. * Allocate Arbel device
  2634. *
  2635. * @ret arbel Arbel device
  2636. */
  2637. static struct arbel * arbel_alloc ( void ) {
  2638. struct arbel *arbel;
  2639. /* Allocate Arbel device */
  2640. arbel = zalloc ( sizeof ( *arbel ) );
  2641. if ( ! arbel )
  2642. goto err_arbel;
  2643. /* Allocate space for mailboxes */
  2644. arbel->mailbox_in = malloc_dma ( ARBEL_MBOX_SIZE, ARBEL_MBOX_ALIGN );
  2645. if ( ! arbel->mailbox_in )
  2646. goto err_mailbox_in;
  2647. arbel->mailbox_out = malloc_dma ( ARBEL_MBOX_SIZE, ARBEL_MBOX_ALIGN );
  2648. if ( ! arbel->mailbox_out )
  2649. goto err_mailbox_out;
  2650. return arbel;
  2651. free_dma ( arbel->mailbox_out, ARBEL_MBOX_SIZE );
  2652. err_mailbox_out:
  2653. free_dma ( arbel->mailbox_in, ARBEL_MBOX_SIZE );
  2654. err_mailbox_in:
  2655. free ( arbel );
  2656. err_arbel:
  2657. return NULL;
  2658. }
  2659. /**
  2660. * Free Arbel device
  2661. *
  2662. * @v arbel Arbel device
  2663. */
  2664. static void arbel_free ( struct arbel *arbel ) {
  2665. ufree ( arbel->icm );
  2666. ufree ( arbel->firmware_area );
  2667. free_dma ( arbel->mailbox_out, ARBEL_MBOX_SIZE );
  2668. free_dma ( arbel->mailbox_in, ARBEL_MBOX_SIZE );
  2669. free ( arbel );
  2670. }
  2671. /**
  2672. * Probe PCI device
  2673. *
  2674. * @v pci PCI device
  2675. * @v id PCI ID
  2676. * @ret rc Return status code
  2677. */
  2678. static int arbel_probe ( struct pci_device *pci ) {
  2679. struct arbel *arbel;
  2680. struct ib_device *ibdev;
  2681. int i;
  2682. int rc;
  2683. /* Allocate Arbel device */
  2684. arbel = arbel_alloc();
  2685. if ( ! arbel ) {
  2686. rc = -ENOMEM;
  2687. goto err_alloc;
  2688. }
  2689. pci_set_drvdata ( pci, arbel );
  2690. arbel->pci = pci;
  2691. /* Allocate Infiniband devices */
  2692. for ( i = 0 ; i < ARBEL_NUM_PORTS ; i++ ) {
  2693. ibdev = alloc_ibdev ( 0 );
  2694. if ( ! ibdev ) {
  2695. rc = -ENOMEM;
  2696. goto err_alloc_ibdev;
  2697. }
  2698. arbel->ibdev[i] = ibdev;
  2699. ibdev->op = &arbel_ib_operations;
  2700. ibdev->dev = &pci->dev;
  2701. ibdev->port = ( ARBEL_PORT_BASE + i );
  2702. ib_set_drvdata ( ibdev, arbel );
  2703. }
  2704. /* Fix up PCI device */
  2705. adjust_pci_device ( pci );
  2706. /* Get PCI BARs */
  2707. arbel->config = ioremap ( pci_bar_start ( pci, ARBEL_PCI_CONFIG_BAR ),
  2708. ARBEL_PCI_CONFIG_BAR_SIZE );
  2709. arbel->uar = ioremap ( ( pci_bar_start ( pci, ARBEL_PCI_UAR_BAR ) +
  2710. ARBEL_PCI_UAR_IDX * ARBEL_PCI_UAR_SIZE ),
  2711. ARBEL_PCI_UAR_SIZE );
  2712. /* Reset device */
  2713. arbel_reset ( arbel );
  2714. /* Start firmware */
  2715. if ( ( rc = arbel_start_firmware ( arbel ) ) != 0 )
  2716. goto err_start_firmware;
  2717. /* Get device limits */
  2718. if ( ( rc = arbel_get_limits ( arbel ) ) != 0 )
  2719. goto err_get_limits;
  2720. /* Start device */
  2721. if ( ( rc = arbel_start ( arbel, 1 ) ) != 0 )
  2722. goto err_start;
  2723. /* Initialise parameters using SMC */
  2724. for ( i = 0 ; i < ARBEL_NUM_PORTS ; i++ )
  2725. ib_smc_init ( arbel->ibdev[i], arbel_mad );
  2726. /* Register Infiniband devices */
  2727. for ( i = 0 ; i < ARBEL_NUM_PORTS ; i++ ) {
  2728. if ( ( rc = register_ibdev ( arbel->ibdev[i] ) ) != 0 ) {
  2729. DBGC ( arbel, "Arbel %p port %d could not register IB "
  2730. "device: %s\n", arbel,
  2731. arbel->ibdev[i]->port, strerror ( rc ) );
  2732. goto err_register_ibdev;
  2733. }
  2734. }
  2735. /* Leave device quiescent until opened */
  2736. if ( arbel->open_count == 0 )
  2737. arbel_stop ( arbel );
  2738. return 0;
  2739. i = ARBEL_NUM_PORTS;
  2740. err_register_ibdev:
  2741. for ( i-- ; i >= 0 ; i-- )
  2742. unregister_ibdev ( arbel->ibdev[i] );
  2743. arbel_stop ( arbel );
  2744. err_start:
  2745. err_get_limits:
  2746. arbel_stop_firmware ( arbel );
  2747. err_start_firmware:
  2748. i = ARBEL_NUM_PORTS;
  2749. err_alloc_ibdev:
  2750. for ( i-- ; i >= 0 ; i-- )
  2751. ibdev_put ( arbel->ibdev[i] );
  2752. arbel_free ( arbel );
  2753. err_alloc:
  2754. return rc;
  2755. }
  2756. /**
  2757. * Remove PCI device
  2758. *
  2759. * @v pci PCI device
  2760. */
  2761. static void arbel_remove ( struct pci_device *pci ) {
  2762. struct arbel *arbel = pci_get_drvdata ( pci );
  2763. int i;
  2764. for ( i = ( ARBEL_NUM_PORTS - 1 ) ; i >= 0 ; i-- )
  2765. unregister_ibdev ( arbel->ibdev[i] );
  2766. for ( i = ( ARBEL_NUM_PORTS - 1 ) ; i >= 0 ; i-- )
  2767. ibdev_put ( arbel->ibdev[i] );
  2768. arbel_free ( arbel );
  2769. }
  2770. static struct pci_device_id arbel_nics[] = {
  2771. PCI_ROM ( 0x15b3, 0x6282, "mt25218", "MT25218 HCA driver", 0 ),
  2772. PCI_ROM ( 0x15b3, 0x6274, "mt25204", "MT25204 HCA driver", 0 ),
  2773. };
  2774. struct pci_driver arbel_driver __pci_driver = {
  2775. .ids = arbel_nics,
  2776. .id_count = ( sizeof ( arbel_nics ) / sizeof ( arbel_nics[0] ) ),
  2777. .probe = arbel_probe,
  2778. .remove = arbel_remove,
  2779. };