You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146
  1. /*
  2. * Copyright (C) 2007 Michael Brown <mbrown@fensystems.co.uk>.
  3. *
  4. * Based in part upon the original driver by Mellanox Technologies
  5. * Ltd. Portions may be Copyright (c) Mellanox Technologies Ltd.
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License as
  9. * published by the Free Software Foundation; either version 2 of the
  10. * License, or any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  20. * 02110-1301, USA.
  21. *
  22. * You can also choose to distribute this program under the terms of
  23. * the Unmodified Binary Distribution Licence (as given in the file
  24. * COPYING.UBDL), provided that you have satisfied its requirements.
  25. */
  26. FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
  27. #include <stdint.h>
  28. #include <stdlib.h>
  29. #include <stdio.h>
  30. #include <string.h>
  31. #include <strings.h>
  32. #include <unistd.h>
  33. #include <errno.h>
  34. #include <byteswap.h>
  35. #include <ipxe/io.h>
  36. #include <ipxe/pci.h>
  37. #include <ipxe/pcibackup.h>
  38. #include <ipxe/malloc.h>
  39. #include <ipxe/umalloc.h>
  40. #include <ipxe/iobuf.h>
  41. #include <ipxe/netdevice.h>
  42. #include <ipxe/infiniband.h>
  43. #include <ipxe/ib_smc.h>
  44. #include "arbel.h"
  45. /**
  46. * @file
  47. *
  48. * Mellanox Arbel Infiniband HCA
  49. *
  50. */
  51. /***************************************************************************
  52. *
  53. * Queue number allocation
  54. *
  55. ***************************************************************************
  56. */
  57. /**
  58. * Allocate offset within usage bitmask
  59. *
  60. * @v bits Usage bitmask
  61. * @v bits_len Length of usage bitmask
  62. * @ret bit First free bit within bitmask, or negative error
  63. */
  64. static int arbel_bitmask_alloc ( arbel_bitmask_t *bits,
  65. unsigned int bits_len ) {
  66. unsigned int bit = 0;
  67. arbel_bitmask_t mask = 1;
  68. while ( bit < bits_len ) {
  69. if ( ( mask & *bits ) == 0 ) {
  70. *bits |= mask;
  71. return bit;
  72. }
  73. bit++;
  74. mask = ( mask << 1 ) | ( mask >> ( 8 * sizeof ( mask ) - 1 ) );
  75. if ( mask == 1 )
  76. bits++;
  77. }
  78. return -ENFILE;
  79. }
  80. /**
  81. * Free offset within usage bitmask
  82. *
  83. * @v bits Usage bitmask
  84. * @v bit Bit within bitmask
  85. */
  86. static void arbel_bitmask_free ( arbel_bitmask_t *bits, int bit ) {
  87. arbel_bitmask_t mask;
  88. mask = ( 1 << ( bit % ( 8 * sizeof ( mask ) ) ) );
  89. bits += ( bit / ( 8 * sizeof ( mask ) ) );
  90. *bits &= ~mask;
  91. }
  92. /***************************************************************************
  93. *
  94. * HCA commands
  95. *
  96. ***************************************************************************
  97. */
  98. /**
  99. * Wait for Arbel command completion
  100. *
  101. * @v arbel Arbel device
  102. * @ret rc Return status code
  103. */
  104. static int arbel_cmd_wait ( struct arbel *arbel,
  105. struct arbelprm_hca_command_register *hcr ) {
  106. unsigned int wait;
  107. for ( wait = ARBEL_HCR_MAX_WAIT_MS ; wait ; wait-- ) {
  108. hcr->u.dwords[6] =
  109. readl ( arbel->config + ARBEL_HCR_REG ( 6 ) );
  110. if ( MLX_GET ( hcr, go ) == 0 )
  111. return 0;
  112. mdelay ( 1 );
  113. }
  114. return -EBUSY;
  115. }
  116. /**
  117. * Issue HCA command
  118. *
  119. * @v arbel Arbel device
  120. * @v command Command opcode, flags and input/output lengths
  121. * @v op_mod Opcode modifier (0 if no modifier applicable)
  122. * @v in Input parameters
  123. * @v in_mod Input modifier (0 if no modifier applicable)
  124. * @v out Output parameters
  125. * @ret rc Return status code
  126. */
  127. static int arbel_cmd ( struct arbel *arbel, unsigned long command,
  128. unsigned int op_mod, const void *in,
  129. unsigned int in_mod, void *out ) {
  130. struct arbelprm_hca_command_register hcr;
  131. unsigned int opcode = ARBEL_HCR_OPCODE ( command );
  132. size_t in_len = ARBEL_HCR_IN_LEN ( command );
  133. size_t out_len = ARBEL_HCR_OUT_LEN ( command );
  134. void *in_buffer;
  135. void *out_buffer;
  136. unsigned int status;
  137. unsigned int i;
  138. int rc;
  139. assert ( in_len <= ARBEL_MBOX_SIZE );
  140. assert ( out_len <= ARBEL_MBOX_SIZE );
  141. DBGC2 ( arbel, "Arbel %p command %02x in %zx%s out %zx%s\n",
  142. arbel, opcode, in_len,
  143. ( ( command & ARBEL_HCR_IN_MBOX ) ? "(mbox)" : "" ), out_len,
  144. ( ( command & ARBEL_HCR_OUT_MBOX ) ? "(mbox)" : "" ) );
  145. /* Check that HCR is free */
  146. if ( ( rc = arbel_cmd_wait ( arbel, &hcr ) ) != 0 ) {
  147. DBGC ( arbel, "Arbel %p command interface locked\n", arbel );
  148. return rc;
  149. }
  150. /* Prepare HCR */
  151. memset ( &hcr, 0, sizeof ( hcr ) );
  152. in_buffer = &hcr.u.dwords[0];
  153. if ( in_len && ( command & ARBEL_HCR_IN_MBOX ) ) {
  154. in_buffer = arbel->mailbox_in;
  155. MLX_FILL_H ( &hcr, 0, in_param_h, virt_to_bus ( in_buffer ) );
  156. MLX_FILL_1 ( &hcr, 1, in_param_l, virt_to_bus ( in_buffer ) );
  157. }
  158. memcpy ( in_buffer, in, in_len );
  159. MLX_FILL_1 ( &hcr, 2, input_modifier, in_mod );
  160. out_buffer = &hcr.u.dwords[3];
  161. if ( out_len && ( command & ARBEL_HCR_OUT_MBOX ) ) {
  162. out_buffer = arbel->mailbox_out;
  163. MLX_FILL_H ( &hcr, 3, out_param_h,
  164. virt_to_bus ( out_buffer ) );
  165. MLX_FILL_1 ( &hcr, 4, out_param_l,
  166. virt_to_bus ( out_buffer ) );
  167. }
  168. MLX_FILL_3 ( &hcr, 6,
  169. opcode, opcode,
  170. opcode_modifier, op_mod,
  171. go, 1 );
  172. DBGC ( arbel, "Arbel %p issuing command %04x\n", arbel, opcode );
  173. DBGC2_HDA ( arbel, virt_to_phys ( arbel->config + ARBEL_HCR_BASE ),
  174. &hcr, sizeof ( hcr ) );
  175. if ( in_len && ( command & ARBEL_HCR_IN_MBOX ) ) {
  176. DBGC2 ( arbel, "Input mailbox:\n" );
  177. DBGC2_HDA ( arbel, virt_to_phys ( in_buffer ), in_buffer,
  178. ( ( in_len < 512 ) ? in_len : 512 ) );
  179. }
  180. /* Issue command */
  181. for ( i = 0 ; i < ( sizeof ( hcr ) / sizeof ( hcr.u.dwords[0] ) ) ;
  182. i++ ) {
  183. writel ( hcr.u.dwords[i],
  184. arbel->config + ARBEL_HCR_REG ( i ) );
  185. barrier();
  186. }
  187. /* Wait for command completion */
  188. if ( ( rc = arbel_cmd_wait ( arbel, &hcr ) ) != 0 ) {
  189. DBGC ( arbel, "Arbel %p timed out waiting for command:\n",
  190. arbel );
  191. DBGC_HD ( arbel, &hcr, sizeof ( hcr ) );
  192. return rc;
  193. }
  194. /* Check command status */
  195. status = MLX_GET ( &hcr, status );
  196. if ( status != 0 ) {
  197. DBGC ( arbel, "Arbel %p command failed with status %02x:\n",
  198. arbel, status );
  199. DBGC_HD ( arbel, &hcr, sizeof ( hcr ) );
  200. return -EIO;
  201. }
  202. /* Read output parameters, if any */
  203. hcr.u.dwords[3] = readl ( arbel->config + ARBEL_HCR_REG ( 3 ) );
  204. hcr.u.dwords[4] = readl ( arbel->config + ARBEL_HCR_REG ( 4 ) );
  205. memcpy ( out, out_buffer, out_len );
  206. if ( out_len ) {
  207. DBGC2 ( arbel, "Output%s:\n",
  208. ( command & ARBEL_HCR_OUT_MBOX ) ? " mailbox" : "" );
  209. DBGC2_HDA ( arbel, virt_to_phys ( out_buffer ), out_buffer,
  210. ( ( out_len < 512 ) ? out_len : 512 ) );
  211. }
  212. return 0;
  213. }
  214. static inline int
  215. arbel_cmd_query_dev_lim ( struct arbel *arbel,
  216. struct arbelprm_query_dev_lim *dev_lim ) {
  217. return arbel_cmd ( arbel,
  218. ARBEL_HCR_OUT_CMD ( ARBEL_HCR_QUERY_DEV_LIM,
  219. 1, sizeof ( *dev_lim ) ),
  220. 0, NULL, 0, dev_lim );
  221. }
  222. static inline int
  223. arbel_cmd_query_fw ( struct arbel *arbel, struct arbelprm_query_fw *fw ) {
  224. return arbel_cmd ( arbel,
  225. ARBEL_HCR_OUT_CMD ( ARBEL_HCR_QUERY_FW,
  226. 1, sizeof ( *fw ) ),
  227. 0, NULL, 0, fw );
  228. }
  229. static inline int
  230. arbel_cmd_init_hca ( struct arbel *arbel,
  231. const struct arbelprm_init_hca *init_hca ) {
  232. return arbel_cmd ( arbel,
  233. ARBEL_HCR_IN_CMD ( ARBEL_HCR_INIT_HCA,
  234. 1, sizeof ( *init_hca ) ),
  235. 0, init_hca, 0, NULL );
  236. }
  237. static inline int
  238. arbel_cmd_close_hca ( struct arbel *arbel ) {
  239. return arbel_cmd ( arbel,
  240. ARBEL_HCR_VOID_CMD ( ARBEL_HCR_CLOSE_HCA ),
  241. 0, NULL, 0, NULL );
  242. }
  243. static inline int
  244. arbel_cmd_init_ib ( struct arbel *arbel, unsigned int port,
  245. const struct arbelprm_init_ib *init_ib ) {
  246. return arbel_cmd ( arbel,
  247. ARBEL_HCR_IN_CMD ( ARBEL_HCR_INIT_IB,
  248. 1, sizeof ( *init_ib ) ),
  249. 0, init_ib, port, NULL );
  250. }
  251. static inline int
  252. arbel_cmd_close_ib ( struct arbel *arbel, unsigned int port ) {
  253. return arbel_cmd ( arbel,
  254. ARBEL_HCR_VOID_CMD ( ARBEL_HCR_CLOSE_IB ),
  255. 0, NULL, port, NULL );
  256. }
  257. static inline int
  258. arbel_cmd_sw2hw_mpt ( struct arbel *arbel, unsigned int index,
  259. const struct arbelprm_mpt *mpt ) {
  260. return arbel_cmd ( arbel,
  261. ARBEL_HCR_IN_CMD ( ARBEL_HCR_SW2HW_MPT,
  262. 1, sizeof ( *mpt ) ),
  263. 0, mpt, index, NULL );
  264. }
  265. static inline int
  266. arbel_cmd_map_eq ( struct arbel *arbel, unsigned long index_map,
  267. const struct arbelprm_event_mask *mask ) {
  268. return arbel_cmd ( arbel,
  269. ARBEL_HCR_IN_CMD ( ARBEL_HCR_MAP_EQ,
  270. 0, sizeof ( *mask ) ),
  271. 0, mask, index_map, NULL );
  272. }
  273. static inline int
  274. arbel_cmd_sw2hw_eq ( struct arbel *arbel, unsigned int index,
  275. const struct arbelprm_eqc *eqctx ) {
  276. return arbel_cmd ( arbel,
  277. ARBEL_HCR_IN_CMD ( ARBEL_HCR_SW2HW_EQ,
  278. 1, sizeof ( *eqctx ) ),
  279. 0, eqctx, index, NULL );
  280. }
  281. static inline int
  282. arbel_cmd_hw2sw_eq ( struct arbel *arbel, unsigned int index,
  283. struct arbelprm_eqc *eqctx ) {
  284. return arbel_cmd ( arbel,
  285. ARBEL_HCR_OUT_CMD ( ARBEL_HCR_HW2SW_EQ,
  286. 1, sizeof ( *eqctx ) ),
  287. 1, NULL, index, eqctx );
  288. }
  289. static inline int
  290. arbel_cmd_sw2hw_cq ( struct arbel *arbel, unsigned long cqn,
  291. const struct arbelprm_completion_queue_context *cqctx ) {
  292. return arbel_cmd ( arbel,
  293. ARBEL_HCR_IN_CMD ( ARBEL_HCR_SW2HW_CQ,
  294. 1, sizeof ( *cqctx ) ),
  295. 0, cqctx, cqn, NULL );
  296. }
  297. static inline int
  298. arbel_cmd_hw2sw_cq ( struct arbel *arbel, unsigned long cqn,
  299. struct arbelprm_completion_queue_context *cqctx) {
  300. return arbel_cmd ( arbel,
  301. ARBEL_HCR_OUT_CMD ( ARBEL_HCR_HW2SW_CQ,
  302. 1, sizeof ( *cqctx ) ),
  303. 0, NULL, cqn, cqctx );
  304. }
  305. static inline int
  306. arbel_cmd_query_cq ( struct arbel *arbel, unsigned long cqn,
  307. struct arbelprm_completion_queue_context *cqctx ) {
  308. return arbel_cmd ( arbel,
  309. ARBEL_HCR_OUT_CMD ( ARBEL_HCR_QUERY_CQ,
  310. 1, sizeof ( *cqctx ) ),
  311. 0, NULL, cqn, cqctx );
  312. }
  313. static inline int
  314. arbel_cmd_rst2init_qpee ( struct arbel *arbel, unsigned long qpn,
  315. const struct arbelprm_qp_ee_state_transitions *ctx ){
  316. return arbel_cmd ( arbel,
  317. ARBEL_HCR_IN_CMD ( ARBEL_HCR_RST2INIT_QPEE,
  318. 1, sizeof ( *ctx ) ),
  319. 0, ctx, qpn, NULL );
  320. }
  321. static inline int
  322. arbel_cmd_init2rtr_qpee ( struct arbel *arbel, unsigned long qpn,
  323. const struct arbelprm_qp_ee_state_transitions *ctx ){
  324. return arbel_cmd ( arbel,
  325. ARBEL_HCR_IN_CMD ( ARBEL_HCR_INIT2RTR_QPEE,
  326. 1, sizeof ( *ctx ) ),
  327. 0, ctx, qpn, NULL );
  328. }
  329. static inline int
  330. arbel_cmd_rtr2rts_qpee ( struct arbel *arbel, unsigned long qpn,
  331. const struct arbelprm_qp_ee_state_transitions *ctx ) {
  332. return arbel_cmd ( arbel,
  333. ARBEL_HCR_IN_CMD ( ARBEL_HCR_RTR2RTS_QPEE,
  334. 1, sizeof ( *ctx ) ),
  335. 0, ctx, qpn, NULL );
  336. }
  337. static inline int
  338. arbel_cmd_rts2rts_qpee ( struct arbel *arbel, unsigned long qpn,
  339. const struct arbelprm_qp_ee_state_transitions *ctx ) {
  340. return arbel_cmd ( arbel,
  341. ARBEL_HCR_IN_CMD ( ARBEL_HCR_RTS2RTS_QPEE,
  342. 1, sizeof ( *ctx ) ),
  343. 0, ctx, qpn, NULL );
  344. }
  345. static inline int
  346. arbel_cmd_2rst_qpee ( struct arbel *arbel, unsigned long qpn ) {
  347. return arbel_cmd ( arbel,
  348. ARBEL_HCR_VOID_CMD ( ARBEL_HCR_2RST_QPEE ),
  349. 0x03, NULL, qpn, NULL );
  350. }
  351. static inline int
  352. arbel_cmd_query_qpee ( struct arbel *arbel, unsigned long qpn,
  353. struct arbelprm_qp_ee_state_transitions *ctx ) {
  354. return arbel_cmd ( arbel,
  355. ARBEL_HCR_OUT_CMD ( ARBEL_HCR_QUERY_QPEE,
  356. 1, sizeof ( *ctx ) ),
  357. 0, NULL, qpn, ctx );
  358. }
  359. static inline int
  360. arbel_cmd_conf_special_qp ( struct arbel *arbel, unsigned int qp_type,
  361. unsigned long base_qpn ) {
  362. return arbel_cmd ( arbel,
  363. ARBEL_HCR_VOID_CMD ( ARBEL_HCR_CONF_SPECIAL_QP ),
  364. qp_type, NULL, base_qpn, NULL );
  365. }
  366. static inline int
  367. arbel_cmd_mad_ifc ( struct arbel *arbel, unsigned int port,
  368. union arbelprm_mad *mad ) {
  369. return arbel_cmd ( arbel,
  370. ARBEL_HCR_INOUT_CMD ( ARBEL_HCR_MAD_IFC,
  371. 1, sizeof ( *mad ),
  372. 1, sizeof ( *mad ) ),
  373. 0x03, mad, port, mad );
  374. }
  375. static inline int
  376. arbel_cmd_read_mgm ( struct arbel *arbel, unsigned int index,
  377. struct arbelprm_mgm_entry *mgm ) {
  378. return arbel_cmd ( arbel,
  379. ARBEL_HCR_OUT_CMD ( ARBEL_HCR_READ_MGM,
  380. 1, sizeof ( *mgm ) ),
  381. 0, NULL, index, mgm );
  382. }
  383. static inline int
  384. arbel_cmd_write_mgm ( struct arbel *arbel, unsigned int index,
  385. const struct arbelprm_mgm_entry *mgm ) {
  386. return arbel_cmd ( arbel,
  387. ARBEL_HCR_IN_CMD ( ARBEL_HCR_WRITE_MGM,
  388. 1, sizeof ( *mgm ) ),
  389. 0, mgm, index, NULL );
  390. }
  391. static inline int
  392. arbel_cmd_mgid_hash ( struct arbel *arbel, const union ib_gid *gid,
  393. struct arbelprm_mgm_hash *hash ) {
  394. return arbel_cmd ( arbel,
  395. ARBEL_HCR_INOUT_CMD ( ARBEL_HCR_MGID_HASH,
  396. 1, sizeof ( *gid ),
  397. 0, sizeof ( *hash ) ),
  398. 0, gid, 0, hash );
  399. }
  400. static inline int
  401. arbel_cmd_run_fw ( struct arbel *arbel ) {
  402. return arbel_cmd ( arbel,
  403. ARBEL_HCR_VOID_CMD ( ARBEL_HCR_RUN_FW ),
  404. 0, NULL, 0, NULL );
  405. }
  406. static inline int
  407. arbel_cmd_disable_lam ( struct arbel *arbel ) {
  408. return arbel_cmd ( arbel,
  409. ARBEL_HCR_VOID_CMD ( ARBEL_HCR_DISABLE_LAM ),
  410. 0, NULL, 0, NULL );
  411. }
  412. static inline int
  413. arbel_cmd_enable_lam ( struct arbel *arbel, struct arbelprm_access_lam *lam ) {
  414. return arbel_cmd ( arbel,
  415. ARBEL_HCR_OUT_CMD ( ARBEL_HCR_ENABLE_LAM,
  416. 1, sizeof ( *lam ) ),
  417. 1, NULL, 0, lam );
  418. }
  419. static inline int
  420. arbel_cmd_unmap_icm ( struct arbel *arbel, unsigned int page_count,
  421. const struct arbelprm_scalar_parameter *offset ) {
  422. return arbel_cmd ( arbel,
  423. ARBEL_HCR_IN_CMD ( ARBEL_HCR_UNMAP_ICM, 0,
  424. sizeof ( *offset ) ),
  425. 0, offset, page_count, NULL );
  426. }
  427. static inline int
  428. arbel_cmd_map_icm ( struct arbel *arbel,
  429. const struct arbelprm_virtual_physical_mapping *map ) {
  430. return arbel_cmd ( arbel,
  431. ARBEL_HCR_IN_CMD ( ARBEL_HCR_MAP_ICM,
  432. 1, sizeof ( *map ) ),
  433. 0, map, 1, NULL );
  434. }
  435. static inline int
  436. arbel_cmd_unmap_icm_aux ( struct arbel *arbel ) {
  437. return arbel_cmd ( arbel,
  438. ARBEL_HCR_VOID_CMD ( ARBEL_HCR_UNMAP_ICM_AUX ),
  439. 0, NULL, 0, NULL );
  440. }
  441. static inline int
  442. arbel_cmd_map_icm_aux ( struct arbel *arbel,
  443. const struct arbelprm_virtual_physical_mapping *map ) {
  444. return arbel_cmd ( arbel,
  445. ARBEL_HCR_IN_CMD ( ARBEL_HCR_MAP_ICM_AUX,
  446. 1, sizeof ( *map ) ),
  447. 0, map, 1, NULL );
  448. }
  449. static inline int
  450. arbel_cmd_set_icm_size ( struct arbel *arbel,
  451. const struct arbelprm_scalar_parameter *icm_size,
  452. struct arbelprm_scalar_parameter *icm_aux_size ) {
  453. return arbel_cmd ( arbel,
  454. ARBEL_HCR_INOUT_CMD ( ARBEL_HCR_SET_ICM_SIZE,
  455. 0, sizeof ( *icm_size ),
  456. 0, sizeof ( *icm_aux_size ) ),
  457. 0, icm_size, 0, icm_aux_size );
  458. }
  459. static inline int
  460. arbel_cmd_unmap_fa ( struct arbel *arbel ) {
  461. return arbel_cmd ( arbel,
  462. ARBEL_HCR_VOID_CMD ( ARBEL_HCR_UNMAP_FA ),
  463. 0, NULL, 0, NULL );
  464. }
  465. static inline int
  466. arbel_cmd_map_fa ( struct arbel *arbel,
  467. const struct arbelprm_virtual_physical_mapping *map ) {
  468. return arbel_cmd ( arbel,
  469. ARBEL_HCR_IN_CMD ( ARBEL_HCR_MAP_FA,
  470. 1, sizeof ( *map ) ),
  471. 0, map, 1, NULL );
  472. }
  473. /***************************************************************************
  474. *
  475. * MAD operations
  476. *
  477. ***************************************************************************
  478. */
  479. /**
  480. * Issue management datagram
  481. *
  482. * @v ibdev Infiniband device
  483. * @v mad Management datagram
  484. * @ret rc Return status code
  485. */
  486. static int arbel_mad ( struct ib_device *ibdev, union ib_mad *mad ) {
  487. struct arbel *arbel = ib_get_drvdata ( ibdev );
  488. union arbelprm_mad mad_ifc;
  489. int rc;
  490. linker_assert ( sizeof ( *mad ) == sizeof ( mad_ifc.mad ),
  491. mad_size_mismatch );
  492. /* Copy in request packet */
  493. memcpy ( &mad_ifc.mad, mad, sizeof ( mad_ifc.mad ) );
  494. /* Issue MAD */
  495. if ( ( rc = arbel_cmd_mad_ifc ( arbel, ibdev->port,
  496. &mad_ifc ) ) != 0 ) {
  497. DBGC ( arbel, "Arbel %p port %d could not issue MAD IFC: %s\n",
  498. arbel, ibdev->port, strerror ( rc ) );
  499. return rc;
  500. }
  501. /* Copy out reply packet */
  502. memcpy ( mad, &mad_ifc.mad, sizeof ( *mad ) );
  503. if ( mad->hdr.status != 0 ) {
  504. DBGC ( arbel, "Arbel %p port %d MAD IFC status %04x\n",
  505. arbel, ibdev->port, ntohs ( mad->hdr.status ) );
  506. return -EIO;
  507. }
  508. return 0;
  509. }
  510. /***************************************************************************
  511. *
  512. * Completion queue operations
  513. *
  514. ***************************************************************************
  515. */
  516. /**
  517. * Dump completion queue context (for debugging only)
  518. *
  519. * @v arbel Arbel device
  520. * @v cq Completion queue
  521. * @ret rc Return status code
  522. */
  523. static __attribute__ (( unused )) int
  524. arbel_dump_cqctx ( struct arbel *arbel, struct ib_completion_queue *cq ) {
  525. struct arbelprm_completion_queue_context cqctx;
  526. int rc;
  527. memset ( &cqctx, 0, sizeof ( cqctx ) );
  528. if ( ( rc = arbel_cmd_query_cq ( arbel, cq->cqn, &cqctx ) ) != 0 ) {
  529. DBGC ( arbel, "Arbel %p CQN %#lx QUERY_CQ failed: %s\n",
  530. arbel, cq->cqn, strerror ( rc ) );
  531. return rc;
  532. }
  533. DBGC ( arbel, "Arbel %p CQN %#lx context:\n", arbel, cq->cqn );
  534. DBGC_HDA ( arbel, 0, &cqctx, sizeof ( cqctx ) );
  535. return 0;
  536. }
  537. /**
  538. * Create completion queue
  539. *
  540. * @v ibdev Infiniband device
  541. * @v cq Completion queue
  542. * @ret rc Return status code
  543. */
  544. static int arbel_create_cq ( struct ib_device *ibdev,
  545. struct ib_completion_queue *cq ) {
  546. struct arbel *arbel = ib_get_drvdata ( ibdev );
  547. struct arbel_completion_queue *arbel_cq;
  548. struct arbelprm_completion_queue_context cqctx;
  549. struct arbelprm_cq_ci_db_record *ci_db_rec;
  550. struct arbelprm_cq_arm_db_record *arm_db_rec;
  551. int cqn_offset;
  552. unsigned int i;
  553. int rc;
  554. /* Find a free completion queue number */
  555. cqn_offset = arbel_bitmask_alloc ( arbel->cq_inuse, ARBEL_MAX_CQS );
  556. if ( cqn_offset < 0 ) {
  557. DBGC ( arbel, "Arbel %p out of completion queues\n", arbel );
  558. rc = cqn_offset;
  559. goto err_cqn_offset;
  560. }
  561. cq->cqn = ( arbel->limits.reserved_cqs + cqn_offset );
  562. /* Allocate control structures */
  563. arbel_cq = zalloc ( sizeof ( *arbel_cq ) );
  564. if ( ! arbel_cq ) {
  565. rc = -ENOMEM;
  566. goto err_arbel_cq;
  567. }
  568. arbel_cq->ci_doorbell_idx = arbel_cq_ci_doorbell_idx ( arbel, cq );
  569. arbel_cq->arm_doorbell_idx = arbel_cq_arm_doorbell_idx ( arbel, cq );
  570. /* Allocate completion queue itself */
  571. arbel_cq->cqe_size = ( cq->num_cqes * sizeof ( arbel_cq->cqe[0] ) );
  572. arbel_cq->cqe = malloc_dma ( arbel_cq->cqe_size,
  573. sizeof ( arbel_cq->cqe[0] ) );
  574. if ( ! arbel_cq->cqe ) {
  575. rc = -ENOMEM;
  576. goto err_cqe;
  577. }
  578. memset ( arbel_cq->cqe, 0, arbel_cq->cqe_size );
  579. for ( i = 0 ; i < cq->num_cqes ; i++ ) {
  580. MLX_FILL_1 ( &arbel_cq->cqe[i].normal, 7, owner, 1 );
  581. }
  582. barrier();
  583. /* Initialise doorbell records */
  584. ci_db_rec = &arbel->db_rec[arbel_cq->ci_doorbell_idx].cq_ci;
  585. MLX_FILL_1 ( ci_db_rec, 0, counter, 0 );
  586. MLX_FILL_2 ( ci_db_rec, 1,
  587. res, ARBEL_UAR_RES_CQ_CI,
  588. cq_number, cq->cqn );
  589. arm_db_rec = &arbel->db_rec[arbel_cq->arm_doorbell_idx].cq_arm;
  590. MLX_FILL_1 ( arm_db_rec, 0, counter, 0 );
  591. MLX_FILL_2 ( arm_db_rec, 1,
  592. res, ARBEL_UAR_RES_CQ_ARM,
  593. cq_number, cq->cqn );
  594. /* Hand queue over to hardware */
  595. memset ( &cqctx, 0, sizeof ( cqctx ) );
  596. MLX_FILL_1 ( &cqctx, 0, st, 0xa /* "Event fired" */ );
  597. MLX_FILL_H ( &cqctx, 1, start_address_h,
  598. virt_to_bus ( arbel_cq->cqe ) );
  599. MLX_FILL_1 ( &cqctx, 2, start_address_l,
  600. virt_to_bus ( arbel_cq->cqe ) );
  601. MLX_FILL_2 ( &cqctx, 3,
  602. usr_page, arbel->limits.reserved_uars,
  603. log_cq_size, fls ( cq->num_cqes - 1 ) );
  604. MLX_FILL_1 ( &cqctx, 5, c_eqn, arbel->eq.eqn );
  605. MLX_FILL_1 ( &cqctx, 6, pd, ARBEL_GLOBAL_PD );
  606. MLX_FILL_1 ( &cqctx, 7, l_key, arbel->lkey );
  607. MLX_FILL_1 ( &cqctx, 12, cqn, cq->cqn );
  608. MLX_FILL_1 ( &cqctx, 13,
  609. cq_ci_db_record, arbel_cq->ci_doorbell_idx );
  610. MLX_FILL_1 ( &cqctx, 14,
  611. cq_state_db_record, arbel_cq->arm_doorbell_idx );
  612. if ( ( rc = arbel_cmd_sw2hw_cq ( arbel, cq->cqn, &cqctx ) ) != 0 ) {
  613. DBGC ( arbel, "Arbel %p CQN %#lx SW2HW_CQ failed: %s\n",
  614. arbel, cq->cqn, strerror ( rc ) );
  615. goto err_sw2hw_cq;
  616. }
  617. DBGC ( arbel, "Arbel %p CQN %#lx ring [%08lx,%08lx), doorbell %08lx\n",
  618. arbel, cq->cqn, virt_to_phys ( arbel_cq->cqe ),
  619. ( virt_to_phys ( arbel_cq->cqe ) + arbel_cq->cqe_size ),
  620. virt_to_phys ( ci_db_rec ) );
  621. ib_cq_set_drvdata ( cq, arbel_cq );
  622. return 0;
  623. err_sw2hw_cq:
  624. MLX_FILL_1 ( ci_db_rec, 1, res, ARBEL_UAR_RES_NONE );
  625. MLX_FILL_1 ( arm_db_rec, 1, res, ARBEL_UAR_RES_NONE );
  626. free_dma ( arbel_cq->cqe, arbel_cq->cqe_size );
  627. err_cqe:
  628. free ( arbel_cq );
  629. err_arbel_cq:
  630. arbel_bitmask_free ( arbel->cq_inuse, cqn_offset );
  631. err_cqn_offset:
  632. return rc;
  633. }
  634. /**
  635. * Destroy completion queue
  636. *
  637. * @v ibdev Infiniband device
  638. * @v cq Completion queue
  639. */
  640. static void arbel_destroy_cq ( struct ib_device *ibdev,
  641. struct ib_completion_queue *cq ) {
  642. struct arbel *arbel = ib_get_drvdata ( ibdev );
  643. struct arbel_completion_queue *arbel_cq = ib_cq_get_drvdata ( cq );
  644. struct arbelprm_completion_queue_context cqctx;
  645. struct arbelprm_cq_ci_db_record *ci_db_rec;
  646. struct arbelprm_cq_arm_db_record *arm_db_rec;
  647. int cqn_offset;
  648. int rc;
  649. /* Take ownership back from hardware */
  650. if ( ( rc = arbel_cmd_hw2sw_cq ( arbel, cq->cqn, &cqctx ) ) != 0 ) {
  651. DBGC ( arbel, "Arbel %p CQN %#lx FATAL HW2SW_CQ failed: "
  652. "%s\n", arbel, cq->cqn, strerror ( rc ) );
  653. /* Leak memory and return; at least we avoid corruption */
  654. return;
  655. }
  656. /* Clear doorbell records */
  657. ci_db_rec = &arbel->db_rec[arbel_cq->ci_doorbell_idx].cq_ci;
  658. arm_db_rec = &arbel->db_rec[arbel_cq->arm_doorbell_idx].cq_arm;
  659. MLX_FILL_1 ( ci_db_rec, 1, res, ARBEL_UAR_RES_NONE );
  660. MLX_FILL_1 ( arm_db_rec, 1, res, ARBEL_UAR_RES_NONE );
  661. /* Free memory */
  662. free_dma ( arbel_cq->cqe, arbel_cq->cqe_size );
  663. free ( arbel_cq );
  664. /* Mark queue number as free */
  665. cqn_offset = ( cq->cqn - arbel->limits.reserved_cqs );
  666. arbel_bitmask_free ( arbel->cq_inuse, cqn_offset );
  667. ib_cq_set_drvdata ( cq, NULL );
  668. }
  669. /***************************************************************************
  670. *
  671. * Queue pair operations
  672. *
  673. ***************************************************************************
  674. */
  675. /**
  676. * Assign queue pair number
  677. *
  678. * @v ibdev Infiniband device
  679. * @v qp Queue pair
  680. * @ret rc Return status code
  681. */
  682. static int arbel_alloc_qpn ( struct ib_device *ibdev,
  683. struct ib_queue_pair *qp ) {
  684. struct arbel *arbel = ib_get_drvdata ( ibdev );
  685. unsigned int port_offset;
  686. int qpn_offset;
  687. /* Calculate queue pair number */
  688. port_offset = ( ibdev->port - ARBEL_PORT_BASE );
  689. switch ( qp->type ) {
  690. case IB_QPT_SMI:
  691. qp->qpn = ( arbel->special_qpn_base + port_offset );
  692. return 0;
  693. case IB_QPT_GSI:
  694. qp->qpn = ( arbel->special_qpn_base + 2 + port_offset );
  695. return 0;
  696. case IB_QPT_UD:
  697. case IB_QPT_RC:
  698. /* Find a free queue pair number */
  699. qpn_offset = arbel_bitmask_alloc ( arbel->qp_inuse,
  700. ARBEL_MAX_QPS );
  701. if ( qpn_offset < 0 ) {
  702. DBGC ( arbel, "Arbel %p out of queue pairs\n",
  703. arbel );
  704. return qpn_offset;
  705. }
  706. qp->qpn = ( ( random() & ARBEL_QPN_RANDOM_MASK ) |
  707. ( arbel->qpn_base + qpn_offset ) );
  708. return 0;
  709. default:
  710. DBGC ( arbel, "Arbel %p unsupported QP type %d\n",
  711. arbel, qp->type );
  712. return -ENOTSUP;
  713. }
  714. }
  715. /**
  716. * Free queue pair number
  717. *
  718. * @v ibdev Infiniband device
  719. * @v qp Queue pair
  720. */
  721. static void arbel_free_qpn ( struct ib_device *ibdev,
  722. struct ib_queue_pair *qp ) {
  723. struct arbel *arbel = ib_get_drvdata ( ibdev );
  724. int qpn_offset;
  725. qpn_offset = ( ( qp->qpn & ~ARBEL_QPN_RANDOM_MASK ) - arbel->qpn_base );
  726. if ( qpn_offset >= 0 )
  727. arbel_bitmask_free ( arbel->qp_inuse, qpn_offset );
  728. }
  729. /**
  730. * Calculate transmission rate
  731. *
  732. * @v av Address vector
  733. * @ret arbel_rate Arbel rate
  734. */
  735. static unsigned int arbel_rate ( struct ib_address_vector *av ) {
  736. return ( ( ( av->rate >= IB_RATE_2_5 ) && ( av->rate <= IB_RATE_120 ) )
  737. ? ( av->rate + 5 ) : 0 );
  738. }
  739. /** Queue pair transport service type map */
  740. static uint8_t arbel_qp_st[] = {
  741. [IB_QPT_SMI] = ARBEL_ST_MLX,
  742. [IB_QPT_GSI] = ARBEL_ST_MLX,
  743. [IB_QPT_UD] = ARBEL_ST_UD,
  744. [IB_QPT_RC] = ARBEL_ST_RC,
  745. };
  746. /**
  747. * Dump queue pair context (for debugging only)
  748. *
  749. * @v arbel Arbel device
  750. * @v qp Queue pair
  751. * @ret rc Return status code
  752. */
  753. static __attribute__ (( unused )) int
  754. arbel_dump_qpctx ( struct arbel *arbel, struct ib_queue_pair *qp ) {
  755. struct arbelprm_qp_ee_state_transitions qpctx;
  756. int rc;
  757. memset ( &qpctx, 0, sizeof ( qpctx ) );
  758. if ( ( rc = arbel_cmd_query_qpee ( arbel, qp->qpn, &qpctx ) ) != 0 ) {
  759. DBGC ( arbel, "Arbel %p QPN %#lx QUERY_QPEE failed: %s\n",
  760. arbel, qp->qpn, strerror ( rc ) );
  761. return rc;
  762. }
  763. DBGC ( arbel, "Arbel %p QPN %#lx context:\n", arbel, qp->qpn );
  764. DBGC_HDA ( arbel, 0, &qpctx.u.dwords[2], ( sizeof ( qpctx ) - 8 ) );
  765. return 0;
  766. }
  767. /**
  768. * Create send work queue
  769. *
  770. * @v arbel_send_wq Send work queue
  771. * @v num_wqes Number of work queue entries
  772. * @ret rc Return status code
  773. */
  774. static int arbel_create_send_wq ( struct arbel_send_work_queue *arbel_send_wq,
  775. unsigned int num_wqes ) {
  776. union arbel_send_wqe *wqe;
  777. union arbel_send_wqe *next_wqe;
  778. unsigned int wqe_idx_mask;
  779. unsigned int i;
  780. /* Allocate work queue */
  781. arbel_send_wq->wqe_size = ( num_wqes *
  782. sizeof ( arbel_send_wq->wqe[0] ) );
  783. arbel_send_wq->wqe = malloc_dma ( arbel_send_wq->wqe_size,
  784. sizeof ( arbel_send_wq->wqe[0] ) );
  785. if ( ! arbel_send_wq->wqe )
  786. return -ENOMEM;
  787. memset ( arbel_send_wq->wqe, 0, arbel_send_wq->wqe_size );
  788. /* Link work queue entries */
  789. wqe_idx_mask = ( num_wqes - 1 );
  790. for ( i = 0 ; i < num_wqes ; i++ ) {
  791. wqe = &arbel_send_wq->wqe[i];
  792. next_wqe = &arbel_send_wq->wqe[ ( i + 1 ) & wqe_idx_mask ];
  793. MLX_FILL_1 ( &wqe->next, 0, nda_31_6,
  794. ( virt_to_bus ( next_wqe ) >> 6 ) );
  795. MLX_FILL_1 ( &wqe->next, 1, always1, 1 );
  796. }
  797. return 0;
  798. }
  799. /**
  800. * Create receive work queue
  801. *
  802. * @v arbel_recv_wq Receive work queue
  803. * @v num_wqes Number of work queue entries
  804. * @v type Queue pair type
  805. * @ret rc Return status code
  806. */
  807. static int arbel_create_recv_wq ( struct arbel_recv_work_queue *arbel_recv_wq,
  808. unsigned int num_wqes,
  809. enum ib_queue_pair_type type ) {
  810. struct arbelprm_recv_wqe *wqe;
  811. struct arbelprm_recv_wqe *next_wqe;
  812. unsigned int wqe_idx_mask;
  813. size_t nds;
  814. unsigned int i;
  815. unsigned int j;
  816. int rc;
  817. /* Allocate work queue */
  818. arbel_recv_wq->wqe_size = ( num_wqes *
  819. sizeof ( arbel_recv_wq->wqe[0] ) );
  820. arbel_recv_wq->wqe = malloc_dma ( arbel_recv_wq->wqe_size,
  821. sizeof ( arbel_recv_wq->wqe[0] ) );
  822. if ( ! arbel_recv_wq->wqe ) {
  823. rc = -ENOMEM;
  824. goto err_alloc_wqe;
  825. }
  826. memset ( arbel_recv_wq->wqe, 0, arbel_recv_wq->wqe_size );
  827. /* Allocate GRH entries, if needed */
  828. if ( ( type == IB_QPT_SMI ) || ( type == IB_QPT_GSI ) ||
  829. ( type == IB_QPT_UD ) ) {
  830. arbel_recv_wq->grh_size = ( num_wqes *
  831. sizeof ( arbel_recv_wq->grh[0] ) );
  832. arbel_recv_wq->grh = malloc_dma ( arbel_recv_wq->grh_size,
  833. sizeof ( void * ) );
  834. if ( ! arbel_recv_wq->grh ) {
  835. rc = -ENOMEM;
  836. goto err_alloc_grh;
  837. }
  838. }
  839. /* Link work queue entries */
  840. wqe_idx_mask = ( num_wqes - 1 );
  841. nds = ( ( offsetof ( typeof ( *wqe ), data ) +
  842. sizeof ( wqe->data[0] ) ) >> 4 );
  843. for ( i = 0 ; i < num_wqes ; i++ ) {
  844. wqe = &arbel_recv_wq->wqe[i].recv;
  845. next_wqe = &arbel_recv_wq->wqe[( i + 1 ) & wqe_idx_mask].recv;
  846. MLX_FILL_1 ( &wqe->next, 0, nda_31_6,
  847. ( virt_to_bus ( next_wqe ) >> 6 ) );
  848. MLX_FILL_1 ( &wqe->next, 1, nds, nds );
  849. for ( j = 0 ; ( ( ( void * ) &wqe->data[j] ) <
  850. ( ( void * ) ( wqe + 1 ) ) ) ; j++ ) {
  851. MLX_FILL_1 ( &wqe->data[j], 1,
  852. l_key, ARBEL_INVALID_LKEY );
  853. }
  854. }
  855. return 0;
  856. free_dma ( arbel_recv_wq->grh, arbel_recv_wq->grh_size );
  857. err_alloc_grh:
  858. free_dma ( arbel_recv_wq->wqe, arbel_recv_wq->wqe_size );
  859. err_alloc_wqe:
  860. return rc;
  861. }
  862. /**
  863. * Create queue pair
  864. *
  865. * @v ibdev Infiniband device
  866. * @v qp Queue pair
  867. * @ret rc Return status code
  868. */
  869. static int arbel_create_qp ( struct ib_device *ibdev,
  870. struct ib_queue_pair *qp ) {
  871. struct arbel *arbel = ib_get_drvdata ( ibdev );
  872. struct arbel_queue_pair *arbel_qp;
  873. struct arbelprm_qp_ee_state_transitions qpctx;
  874. struct arbelprm_qp_db_record *send_db_rec;
  875. struct arbelprm_qp_db_record *recv_db_rec;
  876. physaddr_t send_wqe_base_adr;
  877. physaddr_t recv_wqe_base_adr;
  878. physaddr_t wqe_base_adr;
  879. int rc;
  880. /* Warn about dysfunctional code
  881. *
  882. * Arbel seems to crash the system as soon as the first send
  883. * WQE completes on an RC queue pair. (NOPs complete
  884. * successfully, so this is a problem specific to the work
  885. * queue rather than the completion queue.) The cause of this
  886. * problem has remained unknown for over a year. Patches to
  887. * fix this are welcome.
  888. */
  889. if ( qp->type == IB_QPT_RC )
  890. DBG ( "*** WARNING: Arbel RC support is non-functional ***\n" );
  891. /* Calculate queue pair number */
  892. if ( ( rc = arbel_alloc_qpn ( ibdev, qp ) ) != 0 )
  893. goto err_alloc_qpn;
  894. /* Allocate control structures */
  895. arbel_qp = zalloc ( sizeof ( *arbel_qp ) );
  896. if ( ! arbel_qp ) {
  897. rc = -ENOMEM;
  898. goto err_arbel_qp;
  899. }
  900. arbel_qp->send.doorbell_idx = arbel_send_doorbell_idx ( arbel, qp );
  901. arbel_qp->recv.doorbell_idx = arbel_recv_doorbell_idx ( arbel, qp );
  902. /* Create send and receive work queues */
  903. if ( ( rc = arbel_create_send_wq ( &arbel_qp->send,
  904. qp->send.num_wqes ) ) != 0 )
  905. goto err_create_send_wq;
  906. if ( ( rc = arbel_create_recv_wq ( &arbel_qp->recv, qp->recv.num_wqes,
  907. qp->type ) ) != 0 )
  908. goto err_create_recv_wq;
  909. /* Send and receive work queue entries must be within the same 4GB */
  910. send_wqe_base_adr = virt_to_bus ( arbel_qp->send.wqe );
  911. recv_wqe_base_adr = virt_to_bus ( arbel_qp->recv.wqe );
  912. if ( ( sizeof ( physaddr_t ) > sizeof ( uint32_t ) ) &&
  913. ( ( ( ( uint64_t ) send_wqe_base_adr ) >> 32 ) !=
  914. ( ( ( uint64_t ) recv_wqe_base_adr ) >> 32 ) ) ) {
  915. DBGC ( arbel, "Arbel %p QPN %#lx cannot support send %08lx "
  916. "recv %08lx\n", arbel, qp->qpn,
  917. send_wqe_base_adr, recv_wqe_base_adr );
  918. rc = -ENOTSUP;
  919. goto err_unsupported_address_split;
  920. }
  921. wqe_base_adr = send_wqe_base_adr;
  922. /* Initialise doorbell records */
  923. send_db_rec = &arbel->db_rec[arbel_qp->send.doorbell_idx].qp;
  924. MLX_FILL_1 ( send_db_rec, 0, counter, 0 );
  925. MLX_FILL_2 ( send_db_rec, 1,
  926. res, ARBEL_UAR_RES_SQ,
  927. qp_number, qp->qpn );
  928. recv_db_rec = &arbel->db_rec[arbel_qp->recv.doorbell_idx].qp;
  929. MLX_FILL_1 ( recv_db_rec, 0, counter, 0 );
  930. MLX_FILL_2 ( recv_db_rec, 1,
  931. res, ARBEL_UAR_RES_RQ,
  932. qp_number, qp->qpn );
  933. /* Transition queue to INIT state */
  934. memset ( &qpctx, 0, sizeof ( qpctx ) );
  935. MLX_FILL_3 ( &qpctx, 2,
  936. qpc_eec_data.de, 1,
  937. qpc_eec_data.pm_state, ARBEL_PM_STATE_MIGRATED,
  938. qpc_eec_data.st, arbel_qp_st[qp->type] );
  939. MLX_FILL_4 ( &qpctx, 4,
  940. qpc_eec_data.log_rq_size, fls ( qp->recv.num_wqes - 1 ),
  941. qpc_eec_data.log_rq_stride,
  942. ( fls ( sizeof ( arbel_qp->recv.wqe[0] ) - 1 ) - 4 ),
  943. qpc_eec_data.log_sq_size, fls ( qp->send.num_wqes - 1 ),
  944. qpc_eec_data.log_sq_stride,
  945. ( fls ( sizeof ( arbel_qp->send.wqe[0] ) - 1 ) - 4 ) );
  946. MLX_FILL_1 ( &qpctx, 5,
  947. qpc_eec_data.usr_page, arbel->limits.reserved_uars );
  948. MLX_FILL_1 ( &qpctx, 10, qpc_eec_data.primary_address_path.port_number,
  949. ibdev->port );
  950. MLX_FILL_1 ( &qpctx, 27, qpc_eec_data.pd, ARBEL_GLOBAL_PD );
  951. MLX_FILL_H ( &qpctx, 28, qpc_eec_data.wqe_base_adr_h, wqe_base_adr );
  952. MLX_FILL_1 ( &qpctx, 29, qpc_eec_data.wqe_lkey, arbel->lkey );
  953. MLX_FILL_1 ( &qpctx, 30, qpc_eec_data.ssc, 1 );
  954. MLX_FILL_1 ( &qpctx, 33, qpc_eec_data.cqn_snd, qp->send.cq->cqn );
  955. MLX_FILL_1 ( &qpctx, 34, qpc_eec_data.snd_wqe_base_adr_l,
  956. ( send_wqe_base_adr >> 6 ) );
  957. MLX_FILL_1 ( &qpctx, 35, qpc_eec_data.snd_db_record_index,
  958. arbel_qp->send.doorbell_idx );
  959. MLX_FILL_4 ( &qpctx, 38,
  960. qpc_eec_data.rre, 1,
  961. qpc_eec_data.rwe, 1,
  962. qpc_eec_data.rae, 1,
  963. qpc_eec_data.rsc, 1 );
  964. MLX_FILL_1 ( &qpctx, 41, qpc_eec_data.cqn_rcv, qp->recv.cq->cqn );
  965. MLX_FILL_1 ( &qpctx, 42, qpc_eec_data.rcv_wqe_base_adr_l,
  966. ( recv_wqe_base_adr >> 6 ) );
  967. MLX_FILL_1 ( &qpctx, 43, qpc_eec_data.rcv_db_record_index,
  968. arbel_qp->recv.doorbell_idx );
  969. if ( ( rc = arbel_cmd_rst2init_qpee ( arbel, qp->qpn, &qpctx )) != 0 ){
  970. DBGC ( arbel, "Arbel %p QPN %#lx RST2INIT_QPEE failed: %s\n",
  971. arbel, qp->qpn, strerror ( rc ) );
  972. goto err_rst2init_qpee;
  973. }
  974. arbel_qp->state = ARBEL_QP_ST_INIT;
  975. DBGC ( arbel, "Arbel %p QPN %#lx send ring [%08lx,%08lx), doorbell "
  976. "%08lx\n", arbel, qp->qpn, virt_to_phys ( arbel_qp->send.wqe ),
  977. ( virt_to_phys ( arbel_qp->send.wqe ) +
  978. arbel_qp->send.wqe_size ),
  979. virt_to_phys ( send_db_rec ) );
  980. DBGC ( arbel, "Arbel %p QPN %#lx receive ring [%08lx,%08lx), doorbell "
  981. "%08lx\n", arbel, qp->qpn, virt_to_phys ( arbel_qp->recv.wqe ),
  982. ( virt_to_phys ( arbel_qp->recv.wqe ) +
  983. arbel_qp->recv.wqe_size ),
  984. virt_to_phys ( recv_db_rec ) );
  985. DBGC ( arbel, "Arbel %p QPN %#lx send CQN %#lx receive CQN %#lx\n",
  986. arbel, qp->qpn, qp->send.cq->cqn, qp->recv.cq->cqn );
  987. ib_qp_set_drvdata ( qp, arbel_qp );
  988. return 0;
  989. arbel_cmd_2rst_qpee ( arbel, qp->qpn );
  990. err_rst2init_qpee:
  991. MLX_FILL_1 ( send_db_rec, 1, res, ARBEL_UAR_RES_NONE );
  992. MLX_FILL_1 ( recv_db_rec, 1, res, ARBEL_UAR_RES_NONE );
  993. err_unsupported_address_split:
  994. free_dma ( arbel_qp->recv.grh, arbel_qp->recv.grh_size );
  995. free_dma ( arbel_qp->recv.wqe, arbel_qp->recv.wqe_size );
  996. err_create_recv_wq:
  997. free_dma ( arbel_qp->send.wqe, arbel_qp->send.wqe_size );
  998. err_create_send_wq:
  999. free ( arbel_qp );
  1000. err_arbel_qp:
  1001. arbel_free_qpn ( ibdev, qp );
  1002. err_alloc_qpn:
  1003. return rc;
  1004. }
  1005. /**
  1006. * Modify queue pair
  1007. *
  1008. * @v ibdev Infiniband device
  1009. * @v qp Queue pair
  1010. * @ret rc Return status code
  1011. */
  1012. static int arbel_modify_qp ( struct ib_device *ibdev,
  1013. struct ib_queue_pair *qp ) {
  1014. struct arbel *arbel = ib_get_drvdata ( ibdev );
  1015. struct arbel_queue_pair *arbel_qp = ib_qp_get_drvdata ( qp );
  1016. struct arbelprm_qp_ee_state_transitions qpctx;
  1017. int rc;
  1018. /* Transition queue to RTR state, if applicable */
  1019. if ( arbel_qp->state < ARBEL_QP_ST_RTR ) {
  1020. memset ( &qpctx, 0, sizeof ( qpctx ) );
  1021. MLX_FILL_2 ( &qpctx, 4,
  1022. qpc_eec_data.mtu, ARBEL_MTU_2048,
  1023. qpc_eec_data.msg_max, 31 );
  1024. MLX_FILL_1 ( &qpctx, 7,
  1025. qpc_eec_data.remote_qpn_een, qp->av.qpn );
  1026. MLX_FILL_2 ( &qpctx, 11,
  1027. qpc_eec_data.primary_address_path.rnr_retry,
  1028. ARBEL_RETRY_MAX,
  1029. qpc_eec_data.primary_address_path.rlid,
  1030. qp->av.lid );
  1031. MLX_FILL_2 ( &qpctx, 12,
  1032. qpc_eec_data.primary_address_path.ack_timeout,
  1033. 14 /* 4.096us * 2^(14) = 67ms */,
  1034. qpc_eec_data.primary_address_path.max_stat_rate,
  1035. arbel_rate ( &qp->av ) );
  1036. memcpy ( &qpctx.u.dwords[14], &qp->av.gid,
  1037. sizeof ( qp->av.gid ) );
  1038. MLX_FILL_1 ( &qpctx, 30,
  1039. qpc_eec_data.retry_count, ARBEL_RETRY_MAX );
  1040. MLX_FILL_1 ( &qpctx, 39,
  1041. qpc_eec_data.next_rcv_psn, qp->recv.psn );
  1042. MLX_FILL_1 ( &qpctx, 40,
  1043. qpc_eec_data.ra_buff_indx,
  1044. ( arbel->limits.reserved_rdbs +
  1045. ( ( qp->qpn & ~ARBEL_QPN_RANDOM_MASK ) -
  1046. arbel->special_qpn_base ) ) );
  1047. if ( ( rc = arbel_cmd_init2rtr_qpee ( arbel, qp->qpn,
  1048. &qpctx ) ) != 0 ) {
  1049. DBGC ( arbel, "Arbel %p QPN %#lx INIT2RTR_QPEE failed:"
  1050. " %s\n", arbel, qp->qpn, strerror ( rc ) );
  1051. return rc;
  1052. }
  1053. arbel_qp->state = ARBEL_QP_ST_RTR;
  1054. }
  1055. /* Transition queue to RTS state, if applicable */
  1056. if ( arbel_qp->state < ARBEL_QP_ST_RTS ) {
  1057. memset ( &qpctx, 0, sizeof ( qpctx ) );
  1058. MLX_FILL_1 ( &qpctx, 11,
  1059. qpc_eec_data.primary_address_path.rnr_retry,
  1060. ARBEL_RETRY_MAX );
  1061. MLX_FILL_1 ( &qpctx, 12,
  1062. qpc_eec_data.primary_address_path.ack_timeout,
  1063. 14 /* 4.096us * 2^(14) = 67ms */ );
  1064. MLX_FILL_2 ( &qpctx, 30,
  1065. qpc_eec_data.retry_count, ARBEL_RETRY_MAX,
  1066. qpc_eec_data.sic, 1 );
  1067. MLX_FILL_1 ( &qpctx, 32,
  1068. qpc_eec_data.next_send_psn, qp->send.psn );
  1069. if ( ( rc = arbel_cmd_rtr2rts_qpee ( arbel, qp->qpn,
  1070. &qpctx ) ) != 0 ) {
  1071. DBGC ( arbel, "Arbel %p QPN %#lx RTR2RTS_QPEE failed: "
  1072. "%s\n", arbel, qp->qpn, strerror ( rc ) );
  1073. return rc;
  1074. }
  1075. arbel_qp->state = ARBEL_QP_ST_RTS;
  1076. }
  1077. /* Update parameters in RTS state */
  1078. memset ( &qpctx, 0, sizeof ( qpctx ) );
  1079. MLX_FILL_1 ( &qpctx, 0, opt_param_mask, ARBEL_QPEE_OPT_PARAM_QKEY );
  1080. MLX_FILL_1 ( &qpctx, 44, qpc_eec_data.q_key, qp->qkey );
  1081. if ( ( rc = arbel_cmd_rts2rts_qpee ( arbel, qp->qpn, &qpctx ) ) != 0 ){
  1082. DBGC ( arbel, "Arbel %p QPN %#lx RTS2RTS_QPEE failed: %s\n",
  1083. arbel, qp->qpn, strerror ( rc ) );
  1084. return rc;
  1085. }
  1086. return 0;
  1087. }
  1088. /**
  1089. * Destroy queue pair
  1090. *
  1091. * @v ibdev Infiniband device
  1092. * @v qp Queue pair
  1093. */
  1094. static void arbel_destroy_qp ( struct ib_device *ibdev,
  1095. struct ib_queue_pair *qp ) {
  1096. struct arbel *arbel = ib_get_drvdata ( ibdev );
  1097. struct arbel_queue_pair *arbel_qp = ib_qp_get_drvdata ( qp );
  1098. struct arbelprm_qp_db_record *send_db_rec;
  1099. struct arbelprm_qp_db_record *recv_db_rec;
  1100. int rc;
  1101. /* Take ownership back from hardware */
  1102. if ( ( rc = arbel_cmd_2rst_qpee ( arbel, qp->qpn ) ) != 0 ) {
  1103. DBGC ( arbel, "Arbel %p QPN %#lx FATAL 2RST_QPEE failed: "
  1104. "%s\n", arbel, qp->qpn, strerror ( rc ) );
  1105. /* Leak memory and return; at least we avoid corruption */
  1106. return;
  1107. }
  1108. /* Clear doorbell records */
  1109. send_db_rec = &arbel->db_rec[arbel_qp->send.doorbell_idx].qp;
  1110. recv_db_rec = &arbel->db_rec[arbel_qp->recv.doorbell_idx].qp;
  1111. MLX_FILL_1 ( send_db_rec, 1, res, ARBEL_UAR_RES_NONE );
  1112. MLX_FILL_1 ( recv_db_rec, 1, res, ARBEL_UAR_RES_NONE );
  1113. /* Free memory */
  1114. free_dma ( arbel_qp->recv.grh, arbel_qp->recv.grh_size );
  1115. free_dma ( arbel_qp->recv.wqe, arbel_qp->recv.wqe_size );
  1116. free_dma ( arbel_qp->send.wqe, arbel_qp->send.wqe_size );
  1117. free ( arbel_qp );
  1118. /* Mark queue number as free */
  1119. arbel_free_qpn ( ibdev, qp );
  1120. ib_qp_set_drvdata ( qp, NULL );
  1121. }
  1122. /***************************************************************************
  1123. *
  1124. * Work request operations
  1125. *
  1126. ***************************************************************************
  1127. */
  1128. /**
  1129. * Ring doorbell register in UAR
  1130. *
  1131. * @v arbel Arbel device
  1132. * @v db_reg Doorbell register structure
  1133. * @v offset Address of doorbell
  1134. */
  1135. static void arbel_ring_doorbell ( struct arbel *arbel,
  1136. union arbelprm_doorbell_register *db_reg,
  1137. unsigned int offset ) {
  1138. DBGC2 ( arbel, "Arbel %p ringing doorbell %08x:%08x at %lx\n",
  1139. arbel, ntohl ( db_reg->dword[0] ), ntohl ( db_reg->dword[1] ),
  1140. virt_to_phys ( arbel->uar + offset ) );
  1141. barrier();
  1142. writel ( db_reg->dword[0], ( arbel->uar + offset + 0 ) );
  1143. barrier();
  1144. writel ( db_reg->dword[1], ( arbel->uar + offset + 4 ) );
  1145. }
  1146. /** GID used for GID-less send work queue entries */
  1147. static const union ib_gid arbel_no_gid = {
  1148. .bytes = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0 },
  1149. };
  1150. /**
  1151. * Construct UD send work queue entry
  1152. *
  1153. * @v ibdev Infiniband device
  1154. * @v qp Queue pair
  1155. * @v dest Destination address vector
  1156. * @v iobuf I/O buffer
  1157. * @v wqe Send work queue entry
  1158. * @ret nds Work queue entry size
  1159. */
  1160. static size_t arbel_fill_ud_send_wqe ( struct ib_device *ibdev,
  1161. struct ib_queue_pair *qp __unused,
  1162. struct ib_address_vector *dest,
  1163. struct io_buffer *iobuf,
  1164. union arbel_send_wqe *wqe ) {
  1165. struct arbel *arbel = ib_get_drvdata ( ibdev );
  1166. const union ib_gid *gid;
  1167. /* Construct this work queue entry */
  1168. MLX_FILL_1 ( &wqe->ud.ctrl, 0, always1, 1 );
  1169. MLX_FILL_2 ( &wqe->ud.ud, 0,
  1170. ud_address_vector.pd, ARBEL_GLOBAL_PD,
  1171. ud_address_vector.port_number, ibdev->port );
  1172. MLX_FILL_2 ( &wqe->ud.ud, 1,
  1173. ud_address_vector.rlid, dest->lid,
  1174. ud_address_vector.g, dest->gid_present );
  1175. MLX_FILL_2 ( &wqe->ud.ud, 2,
  1176. ud_address_vector.max_stat_rate, arbel_rate ( dest ),
  1177. ud_address_vector.msg, 3 );
  1178. MLX_FILL_1 ( &wqe->ud.ud, 3, ud_address_vector.sl, dest->sl );
  1179. gid = ( dest->gid_present ? &dest->gid : &arbel_no_gid );
  1180. memcpy ( &wqe->ud.ud.u.dwords[4], gid, sizeof ( *gid ) );
  1181. MLX_FILL_1 ( &wqe->ud.ud, 8, destination_qp, dest->qpn );
  1182. MLX_FILL_1 ( &wqe->ud.ud, 9, q_key, dest->qkey );
  1183. MLX_FILL_1 ( &wqe->ud.data[0], 0, byte_count, iob_len ( iobuf ) );
  1184. MLX_FILL_1 ( &wqe->ud.data[0], 1, l_key, arbel->lkey );
  1185. MLX_FILL_H ( &wqe->ud.data[0], 2,
  1186. local_address_h, virt_to_bus ( iobuf->data ) );
  1187. MLX_FILL_1 ( &wqe->ud.data[0], 3,
  1188. local_address_l, virt_to_bus ( iobuf->data ) );
  1189. return ( offsetof ( typeof ( wqe->ud ), data[1] ) >> 4 );
  1190. }
  1191. /**
  1192. * Construct MLX send work queue entry
  1193. *
  1194. * @v ibdev Infiniband device
  1195. * @v qp Queue pair
  1196. * @v dest Destination address vector
  1197. * @v iobuf I/O buffer
  1198. * @v wqe Send work queue entry
  1199. * @ret nds Work queue entry size
  1200. */
  1201. static size_t arbel_fill_mlx_send_wqe ( struct ib_device *ibdev,
  1202. struct ib_queue_pair *qp,
  1203. struct ib_address_vector *dest,
  1204. struct io_buffer *iobuf,
  1205. union arbel_send_wqe *wqe ) {
  1206. struct arbel *arbel = ib_get_drvdata ( ibdev );
  1207. struct io_buffer headers;
  1208. /* Construct IB headers */
  1209. iob_populate ( &headers, &wqe->mlx.headers, 0,
  1210. sizeof ( wqe->mlx.headers ) );
  1211. iob_reserve ( &headers, sizeof ( wqe->mlx.headers ) );
  1212. ib_push ( ibdev, &headers, qp, iob_len ( iobuf ), dest );
  1213. /* Construct this work queue entry */
  1214. MLX_FILL_5 ( &wqe->mlx.ctrl, 0,
  1215. c, 1 /* generate completion */,
  1216. icrc, 0 /* generate ICRC */,
  1217. max_statrate, arbel_rate ( dest ),
  1218. slr, 0,
  1219. v15, ( ( qp->ext_qpn == IB_QPN_SMI ) ? 1 : 0 ) );
  1220. MLX_FILL_1 ( &wqe->mlx.ctrl, 1, rlid, dest->lid );
  1221. MLX_FILL_1 ( &wqe->mlx.data[0], 0,
  1222. byte_count, iob_len ( &headers ) );
  1223. MLX_FILL_1 ( &wqe->mlx.data[0], 1, l_key, arbel->lkey );
  1224. MLX_FILL_H ( &wqe->mlx.data[0], 2,
  1225. local_address_h, virt_to_bus ( headers.data ) );
  1226. MLX_FILL_1 ( &wqe->mlx.data[0], 3,
  1227. local_address_l, virt_to_bus ( headers.data ) );
  1228. MLX_FILL_1 ( &wqe->mlx.data[1], 0,
  1229. byte_count, ( iob_len ( iobuf ) + 4 /* ICRC */ ) );
  1230. MLX_FILL_1 ( &wqe->mlx.data[1], 1, l_key, arbel->lkey );
  1231. MLX_FILL_H ( &wqe->mlx.data[1], 2,
  1232. local_address_h, virt_to_bus ( iobuf->data ) );
  1233. MLX_FILL_1 ( &wqe->mlx.data[1], 3,
  1234. local_address_l, virt_to_bus ( iobuf->data ) );
  1235. return ( offsetof ( typeof ( wqe->mlx ), data[2] ) >> 4 );
  1236. }
  1237. /**
  1238. * Construct RC send work queue entry
  1239. *
  1240. * @v ibdev Infiniband device
  1241. * @v qp Queue pair
  1242. * @v dest Destination address vector
  1243. * @v iobuf I/O buffer
  1244. * @v wqe Send work queue entry
  1245. * @ret nds Work queue entry size
  1246. */
  1247. static size_t arbel_fill_rc_send_wqe ( struct ib_device *ibdev,
  1248. struct ib_queue_pair *qp __unused,
  1249. struct ib_address_vector *dest __unused,
  1250. struct io_buffer *iobuf,
  1251. union arbel_send_wqe *wqe ) {
  1252. struct arbel *arbel = ib_get_drvdata ( ibdev );
  1253. /* Construct this work queue entry */
  1254. MLX_FILL_1 ( &wqe->rc.ctrl, 0, always1, 1 );
  1255. MLX_FILL_1 ( &wqe->rc.data[0], 0, byte_count, iob_len ( iobuf ) );
  1256. MLX_FILL_1 ( &wqe->rc.data[0], 1, l_key, arbel->lkey );
  1257. MLX_FILL_H ( &wqe->rc.data[0], 2,
  1258. local_address_h, virt_to_bus ( iobuf->data ) );
  1259. MLX_FILL_1 ( &wqe->rc.data[0], 3,
  1260. local_address_l, virt_to_bus ( iobuf->data ) );
  1261. return ( offsetof ( typeof ( wqe->rc ), data[1] ) >> 4 );
  1262. }
  1263. /** Work queue entry constructors */
  1264. static size_t
  1265. ( * arbel_fill_send_wqe[] ) ( struct ib_device *ibdev,
  1266. struct ib_queue_pair *qp,
  1267. struct ib_address_vector *dest,
  1268. struct io_buffer *iobuf,
  1269. union arbel_send_wqe *wqe ) = {
  1270. [IB_QPT_SMI] = arbel_fill_mlx_send_wqe,
  1271. [IB_QPT_GSI] = arbel_fill_mlx_send_wqe,
  1272. [IB_QPT_UD] = arbel_fill_ud_send_wqe,
  1273. [IB_QPT_RC] = arbel_fill_rc_send_wqe,
  1274. };
  1275. /**
  1276. * Post send work queue entry
  1277. *
  1278. * @v ibdev Infiniband device
  1279. * @v qp Queue pair
  1280. * @v dest Destination address vector
  1281. * @v iobuf I/O buffer
  1282. * @ret rc Return status code
  1283. */
  1284. static int arbel_post_send ( struct ib_device *ibdev,
  1285. struct ib_queue_pair *qp,
  1286. struct ib_address_vector *dest,
  1287. struct io_buffer *iobuf ) {
  1288. struct arbel *arbel = ib_get_drvdata ( ibdev );
  1289. struct arbel_queue_pair *arbel_qp = ib_qp_get_drvdata ( qp );
  1290. struct ib_work_queue *wq = &qp->send;
  1291. struct arbel_send_work_queue *arbel_send_wq = &arbel_qp->send;
  1292. union arbel_send_wqe *prev_wqe;
  1293. union arbel_send_wqe *wqe;
  1294. struct arbelprm_qp_db_record *qp_db_rec;
  1295. union arbelprm_doorbell_register db_reg;
  1296. unsigned long wqe_idx_mask;
  1297. size_t nds;
  1298. /* Allocate work queue entry */
  1299. wqe_idx_mask = ( wq->num_wqes - 1 );
  1300. if ( wq->iobufs[wq->next_idx & wqe_idx_mask] ) {
  1301. DBGC ( arbel, "Arbel %p QPN %#lx send queue full",
  1302. arbel, qp->qpn );
  1303. return -ENOBUFS;
  1304. }
  1305. wq->iobufs[wq->next_idx & wqe_idx_mask] = iobuf;
  1306. prev_wqe = &arbel_send_wq->wqe[(wq->next_idx - 1) & wqe_idx_mask];
  1307. wqe = &arbel_send_wq->wqe[wq->next_idx & wqe_idx_mask];
  1308. /* Construct work queue entry */
  1309. memset ( ( ( ( void * ) wqe ) + sizeof ( wqe->next ) ), 0,
  1310. ( sizeof ( *wqe ) - sizeof ( wqe->next ) ) );
  1311. assert ( qp->type < ( sizeof ( arbel_fill_send_wqe ) /
  1312. sizeof ( arbel_fill_send_wqe[0] ) ) );
  1313. assert ( arbel_fill_send_wqe[qp->type] != NULL );
  1314. nds = arbel_fill_send_wqe[qp->type] ( ibdev, qp, dest, iobuf, wqe );
  1315. DBGCP ( arbel, "Arbel %p QPN %#lx posting send WQE %#lx:\n",
  1316. arbel, qp->qpn, ( wq->next_idx & wqe_idx_mask ) );
  1317. DBGCP_HDA ( arbel, virt_to_phys ( wqe ), wqe, sizeof ( *wqe ) );
  1318. /* Update previous work queue entry's "next" field */
  1319. MLX_SET ( &prev_wqe->next, nopcode, ARBEL_OPCODE_SEND );
  1320. MLX_FILL_3 ( &prev_wqe->next, 1,
  1321. nds, nds,
  1322. f, 0,
  1323. always1, 1 );
  1324. /* Update doorbell record */
  1325. barrier();
  1326. qp_db_rec = &arbel->db_rec[arbel_send_wq->doorbell_idx].qp;
  1327. MLX_FILL_1 ( qp_db_rec, 0,
  1328. counter, ( ( wq->next_idx + 1 ) & 0xffff ) );
  1329. /* Ring doorbell register */
  1330. MLX_FILL_4 ( &db_reg.send, 0,
  1331. nopcode, ARBEL_OPCODE_SEND,
  1332. f, 0,
  1333. wqe_counter, ( wq->next_idx & 0xffff ),
  1334. wqe_cnt, 1 );
  1335. MLX_FILL_2 ( &db_reg.send, 1,
  1336. nds, nds,
  1337. qpn, qp->qpn );
  1338. arbel_ring_doorbell ( arbel, &db_reg, ARBEL_DB_POST_SND_OFFSET );
  1339. /* Update work queue's index */
  1340. wq->next_idx++;
  1341. return 0;
  1342. }
  1343. /**
  1344. * Post receive work queue entry
  1345. *
  1346. * @v ibdev Infiniband device
  1347. * @v qp Queue pair
  1348. * @v iobuf I/O buffer
  1349. * @ret rc Return status code
  1350. */
  1351. static int arbel_post_recv ( struct ib_device *ibdev,
  1352. struct ib_queue_pair *qp,
  1353. struct io_buffer *iobuf ) {
  1354. struct arbel *arbel = ib_get_drvdata ( ibdev );
  1355. struct arbel_queue_pair *arbel_qp = ib_qp_get_drvdata ( qp );
  1356. struct ib_work_queue *wq = &qp->recv;
  1357. struct arbel_recv_work_queue *arbel_recv_wq = &arbel_qp->recv;
  1358. struct arbelprm_recv_wqe *wqe;
  1359. struct arbelprm_wqe_segment_data_ptr *data;
  1360. struct ib_global_route_header *grh;
  1361. union arbelprm_doorbell_record *db_rec;
  1362. unsigned int wqe_idx_mask;
  1363. /* Allocate work queue entry */
  1364. wqe_idx_mask = ( wq->num_wqes - 1 );
  1365. if ( wq->iobufs[wq->next_idx & wqe_idx_mask] ) {
  1366. DBGC ( arbel, "Arbel %p QPN %#lx receive queue full\n",
  1367. arbel, qp->qpn );
  1368. return -ENOBUFS;
  1369. }
  1370. wq->iobufs[wq->next_idx & wqe_idx_mask] = iobuf;
  1371. wqe = &arbel_recv_wq->wqe[wq->next_idx & wqe_idx_mask].recv;
  1372. /* Construct work queue entry */
  1373. data = &wqe->data[0];
  1374. if ( arbel_recv_wq->grh ) {
  1375. grh = &arbel_recv_wq->grh[wq->next_idx & wqe_idx_mask];
  1376. MLX_FILL_1 ( data, 0, byte_count, sizeof ( *grh ) );
  1377. MLX_FILL_1 ( data, 1, l_key, arbel->lkey );
  1378. MLX_FILL_H ( data, 2, local_address_h, virt_to_bus ( grh ) );
  1379. MLX_FILL_1 ( data, 3, local_address_l, virt_to_bus ( grh ) );
  1380. data++;
  1381. }
  1382. MLX_FILL_1 ( data, 0, byte_count, iob_tailroom ( iobuf ) );
  1383. MLX_FILL_1 ( data, 1, l_key, arbel->lkey );
  1384. MLX_FILL_H ( data, 2, local_address_h, virt_to_bus ( iobuf->data ) );
  1385. MLX_FILL_1 ( data, 3, local_address_l, virt_to_bus ( iobuf->data ) );
  1386. /* Update doorbell record */
  1387. barrier();
  1388. db_rec = &arbel->db_rec[arbel_recv_wq->doorbell_idx];
  1389. MLX_FILL_1 ( &db_rec->qp, 0,
  1390. counter, ( ( wq->next_idx + 1 ) & 0xffff ) );
  1391. /* Update work queue's index */
  1392. wq->next_idx++;
  1393. return 0;
  1394. }
  1395. /**
  1396. * Handle completion
  1397. *
  1398. * @v ibdev Infiniband device
  1399. * @v cq Completion queue
  1400. * @v cqe Hardware completion queue entry
  1401. * @ret rc Return status code
  1402. */
  1403. static int arbel_complete ( struct ib_device *ibdev,
  1404. struct ib_completion_queue *cq,
  1405. union arbelprm_completion_entry *cqe ) {
  1406. struct arbel *arbel = ib_get_drvdata ( ibdev );
  1407. struct ib_work_queue *wq;
  1408. struct ib_queue_pair *qp;
  1409. struct arbel_queue_pair *arbel_qp;
  1410. struct arbel_send_work_queue *arbel_send_wq;
  1411. struct arbel_recv_work_queue *arbel_recv_wq;
  1412. struct arbelprm_recv_wqe *recv_wqe;
  1413. struct io_buffer *iobuf;
  1414. struct ib_address_vector recv_dest;
  1415. struct ib_address_vector recv_source;
  1416. struct ib_global_route_header *grh;
  1417. struct ib_address_vector *source;
  1418. unsigned int opcode;
  1419. unsigned long qpn;
  1420. int is_send;
  1421. unsigned long wqe_adr;
  1422. unsigned long wqe_idx;
  1423. size_t len;
  1424. int rc = 0;
  1425. /* Parse completion */
  1426. qpn = MLX_GET ( &cqe->normal, my_qpn );
  1427. is_send = MLX_GET ( &cqe->normal, s );
  1428. wqe_adr = ( MLX_GET ( &cqe->normal, wqe_adr ) << 6 );
  1429. opcode = MLX_GET ( &cqe->normal, opcode );
  1430. if ( opcode >= ARBEL_OPCODE_RECV_ERROR ) {
  1431. /* "s" field is not valid for error opcodes */
  1432. is_send = ( opcode == ARBEL_OPCODE_SEND_ERROR );
  1433. DBGC ( arbel, "Arbel %p CQN %#lx %s QPN %#lx syndrome %#x "
  1434. "vendor %#x\n", arbel, cq->cqn,
  1435. ( is_send ? "send" : "recv" ), qpn,
  1436. MLX_GET ( &cqe->error, syndrome ),
  1437. MLX_GET ( &cqe->error, vendor_code ) );
  1438. DBGC_HDA ( arbel, virt_to_phys ( cqe ), cqe, sizeof ( *cqe ) );
  1439. rc = -EIO;
  1440. /* Don't return immediately; propagate error to completer */
  1441. }
  1442. /* Identify work queue */
  1443. wq = ib_find_wq ( cq, qpn, is_send );
  1444. if ( ! wq ) {
  1445. DBGC ( arbel, "Arbel %p CQN %#lx unknown %s QPN %#lx\n",
  1446. arbel, cq->cqn, ( is_send ? "send" : "recv" ), qpn );
  1447. return -EIO;
  1448. }
  1449. qp = wq->qp;
  1450. arbel_qp = ib_qp_get_drvdata ( qp );
  1451. arbel_send_wq = &arbel_qp->send;
  1452. arbel_recv_wq = &arbel_qp->recv;
  1453. /* Identify work queue entry index */
  1454. if ( is_send ) {
  1455. wqe_idx = ( ( wqe_adr - virt_to_bus ( arbel_send_wq->wqe ) ) /
  1456. sizeof ( arbel_send_wq->wqe[0] ) );
  1457. assert ( wqe_idx < qp->send.num_wqes );
  1458. } else {
  1459. wqe_idx = ( ( wqe_adr - virt_to_bus ( arbel_recv_wq->wqe ) ) /
  1460. sizeof ( arbel_recv_wq->wqe[0] ) );
  1461. assert ( wqe_idx < qp->recv.num_wqes );
  1462. }
  1463. DBGCP ( arbel, "Arbel %p CQN %#lx QPN %#lx %s WQE %#lx completed:\n",
  1464. arbel, cq->cqn, qp->qpn, ( is_send ? "send" : "recv" ),
  1465. wqe_idx );
  1466. DBGCP_HDA ( arbel, virt_to_phys ( cqe ), cqe, sizeof ( *cqe ) );
  1467. /* Identify I/O buffer */
  1468. iobuf = wq->iobufs[wqe_idx];
  1469. if ( ! iobuf ) {
  1470. DBGC ( arbel, "Arbel %p CQN %#lx QPN %#lx empty %s WQE %#lx\n",
  1471. arbel, cq->cqn, qp->qpn, ( is_send ? "send" : "recv" ),
  1472. wqe_idx );
  1473. return -EIO;
  1474. }
  1475. wq->iobufs[wqe_idx] = NULL;
  1476. if ( is_send ) {
  1477. /* Hand off to completion handler */
  1478. ib_complete_send ( ibdev, qp, iobuf, rc );
  1479. } else {
  1480. /* Set received length */
  1481. len = MLX_GET ( &cqe->normal, byte_cnt );
  1482. recv_wqe = &arbel_recv_wq->wqe[wqe_idx].recv;
  1483. assert ( MLX_GET ( &recv_wqe->data[0], local_address_l ) ==
  1484. virt_to_bus ( iobuf->data ) );
  1485. assert ( MLX_GET ( &recv_wqe->data[0], byte_count ) ==
  1486. iob_tailroom ( iobuf ) );
  1487. MLX_FILL_1 ( &recv_wqe->data[0], 0, byte_count, 0 );
  1488. MLX_FILL_1 ( &recv_wqe->data[0], 1,
  1489. l_key, ARBEL_INVALID_LKEY );
  1490. memset ( &recv_dest, 0, sizeof ( recv_dest ) );
  1491. recv_dest.qpn = qpn;
  1492. switch ( qp->type ) {
  1493. case IB_QPT_SMI:
  1494. case IB_QPT_GSI:
  1495. case IB_QPT_UD:
  1496. /* Locate corresponding GRH */
  1497. assert ( arbel_recv_wq->grh != NULL );
  1498. grh = &arbel_recv_wq->grh[wqe_idx];
  1499. len -= sizeof ( *grh );
  1500. /* Construct address vector */
  1501. source = &recv_source;
  1502. memset ( source, 0, sizeof ( *source ) );
  1503. source->qpn = MLX_GET ( &cqe->normal, rqpn );
  1504. source->lid = MLX_GET ( &cqe->normal, rlid );
  1505. source->sl = MLX_GET ( &cqe->normal, sl );
  1506. recv_dest.gid_present = source->gid_present =
  1507. MLX_GET ( &cqe->normal, g );
  1508. memcpy ( &recv_dest.gid, &grh->dgid,
  1509. sizeof ( recv_dest.gid ) );
  1510. memcpy ( &source->gid, &grh->sgid,
  1511. sizeof ( source->gid ) );
  1512. break;
  1513. case IB_QPT_RC:
  1514. source = &qp->av;
  1515. break;
  1516. default:
  1517. assert ( 0 );
  1518. return -EINVAL;
  1519. }
  1520. assert ( len <= iob_tailroom ( iobuf ) );
  1521. iob_put ( iobuf, len );
  1522. /* Hand off to completion handler */
  1523. ib_complete_recv ( ibdev, qp, &recv_dest, source, iobuf, rc );
  1524. }
  1525. return rc;
  1526. }
  1527. /**
  1528. * Poll completion queue
  1529. *
  1530. * @v ibdev Infiniband device
  1531. * @v cq Completion queue
  1532. */
  1533. static void arbel_poll_cq ( struct ib_device *ibdev,
  1534. struct ib_completion_queue *cq ) {
  1535. struct arbel *arbel = ib_get_drvdata ( ibdev );
  1536. struct arbel_completion_queue *arbel_cq = ib_cq_get_drvdata ( cq );
  1537. struct arbelprm_cq_ci_db_record *ci_db_rec;
  1538. union arbelprm_completion_entry *cqe;
  1539. unsigned int cqe_idx_mask;
  1540. int rc;
  1541. while ( 1 ) {
  1542. /* Look for completion entry */
  1543. cqe_idx_mask = ( cq->num_cqes - 1 );
  1544. cqe = &arbel_cq->cqe[cq->next_idx & cqe_idx_mask];
  1545. if ( MLX_GET ( &cqe->normal, owner ) != 0 ) {
  1546. /* Entry still owned by hardware; end of poll */
  1547. break;
  1548. }
  1549. /* Handle completion */
  1550. if ( ( rc = arbel_complete ( ibdev, cq, cqe ) ) != 0 ) {
  1551. DBGC ( arbel, "Arbel %p CQN %#lx failed to complete: "
  1552. "%s\n", arbel, cq->cqn, strerror ( rc ) );
  1553. DBGC_HD ( arbel, cqe, sizeof ( *cqe ) );
  1554. }
  1555. /* Return ownership to hardware */
  1556. MLX_FILL_1 ( &cqe->normal, 7, owner, 1 );
  1557. barrier();
  1558. /* Update completion queue's index */
  1559. cq->next_idx++;
  1560. /* Update doorbell record */
  1561. ci_db_rec = &arbel->db_rec[arbel_cq->ci_doorbell_idx].cq_ci;
  1562. MLX_FILL_1 ( ci_db_rec, 0,
  1563. counter, ( cq->next_idx & 0xffffffffUL ) );
  1564. }
  1565. }
  1566. /***************************************************************************
  1567. *
  1568. * Event queues
  1569. *
  1570. ***************************************************************************
  1571. */
  1572. /**
  1573. * Create event queue
  1574. *
  1575. * @v arbel Arbel device
  1576. * @ret rc Return status code
  1577. */
  1578. static int arbel_create_eq ( struct arbel *arbel ) {
  1579. struct arbel_event_queue *arbel_eq = &arbel->eq;
  1580. struct arbelprm_eqc eqctx;
  1581. struct arbelprm_event_mask mask;
  1582. unsigned int i;
  1583. int rc;
  1584. /* Select event queue number */
  1585. arbel_eq->eqn = arbel->limits.reserved_eqs;
  1586. /* Calculate doorbell address */
  1587. arbel_eq->doorbell = ( arbel->eq_ci_doorbells +
  1588. ARBEL_DB_EQ_OFFSET ( arbel_eq->eqn ) );
  1589. /* Allocate event queue itself */
  1590. arbel_eq->eqe_size =
  1591. ( ARBEL_NUM_EQES * sizeof ( arbel_eq->eqe[0] ) );
  1592. arbel_eq->eqe = malloc_dma ( arbel_eq->eqe_size,
  1593. sizeof ( arbel_eq->eqe[0] ) );
  1594. if ( ! arbel_eq->eqe ) {
  1595. rc = -ENOMEM;
  1596. goto err_eqe;
  1597. }
  1598. memset ( arbel_eq->eqe, 0, arbel_eq->eqe_size );
  1599. for ( i = 0 ; i < ARBEL_NUM_EQES ; i++ ) {
  1600. MLX_FILL_1 ( &arbel_eq->eqe[i].generic, 7, owner, 1 );
  1601. }
  1602. barrier();
  1603. /* Hand queue over to hardware */
  1604. memset ( &eqctx, 0, sizeof ( eqctx ) );
  1605. MLX_FILL_1 ( &eqctx, 0, st, 0xa /* "Fired" */ );
  1606. MLX_FILL_H ( &eqctx, 1,
  1607. start_address_h, virt_to_phys ( arbel_eq->eqe ) );
  1608. MLX_FILL_1 ( &eqctx, 2,
  1609. start_address_l, virt_to_phys ( arbel_eq->eqe ) );
  1610. MLX_FILL_1 ( &eqctx, 3, log_eq_size, fls ( ARBEL_NUM_EQES - 1 ) );
  1611. MLX_FILL_1 ( &eqctx, 6, pd, ARBEL_GLOBAL_PD );
  1612. MLX_FILL_1 ( &eqctx, 7, lkey, arbel->lkey );
  1613. if ( ( rc = arbel_cmd_sw2hw_eq ( arbel, arbel_eq->eqn,
  1614. &eqctx ) ) != 0 ) {
  1615. DBGC ( arbel, "Arbel %p EQN %#lx SW2HW_EQ failed: %s\n",
  1616. arbel, arbel_eq->eqn, strerror ( rc ) );
  1617. goto err_sw2hw_eq;
  1618. }
  1619. /* Map events to this event queue */
  1620. memset ( &mask, 0xff, sizeof ( mask ) );
  1621. if ( ( rc = arbel_cmd_map_eq ( arbel,
  1622. ( ARBEL_MAP_EQ | arbel_eq->eqn ),
  1623. &mask ) ) != 0 ) {
  1624. DBGC ( arbel, "Arbel %p EQN %#lx MAP_EQ failed: %s\n",
  1625. arbel, arbel_eq->eqn, strerror ( rc ) );
  1626. goto err_map_eq;
  1627. }
  1628. DBGC ( arbel, "Arbel %p EQN %#lx ring [%08lx,%08lx), doorbell %08lx\n",
  1629. arbel, arbel_eq->eqn, virt_to_phys ( arbel_eq->eqe ),
  1630. ( virt_to_phys ( arbel_eq->eqe ) + arbel_eq->eqe_size ),
  1631. virt_to_phys ( arbel_eq->doorbell ) );
  1632. return 0;
  1633. err_map_eq:
  1634. arbel_cmd_hw2sw_eq ( arbel, arbel_eq->eqn, &eqctx );
  1635. err_sw2hw_eq:
  1636. free_dma ( arbel_eq->eqe, arbel_eq->eqe_size );
  1637. err_eqe:
  1638. memset ( arbel_eq, 0, sizeof ( *arbel_eq ) );
  1639. return rc;
  1640. }
  1641. /**
  1642. * Destroy event queue
  1643. *
  1644. * @v arbel Arbel device
  1645. */
  1646. static void arbel_destroy_eq ( struct arbel *arbel ) {
  1647. struct arbel_event_queue *arbel_eq = &arbel->eq;
  1648. struct arbelprm_eqc eqctx;
  1649. struct arbelprm_event_mask mask;
  1650. int rc;
  1651. /* Unmap events from event queue */
  1652. memset ( &mask, 0, sizeof ( mask ) );
  1653. MLX_FILL_1 ( &mask, 1, port_state_change, 1 );
  1654. if ( ( rc = arbel_cmd_map_eq ( arbel,
  1655. ( ARBEL_UNMAP_EQ | arbel_eq->eqn ),
  1656. &mask ) ) != 0 ) {
  1657. DBGC ( arbel, "Arbel %p EQN %#lx FATAL MAP_EQ failed to "
  1658. "unmap: %s\n", arbel, arbel_eq->eqn, strerror ( rc ) );
  1659. /* Continue; HCA may die but system should survive */
  1660. }
  1661. /* Take ownership back from hardware */
  1662. if ( ( rc = arbel_cmd_hw2sw_eq ( arbel, arbel_eq->eqn,
  1663. &eqctx ) ) != 0 ) {
  1664. DBGC ( arbel, "Arbel %p EQN %#lx FATAL HW2SW_EQ failed: %s\n",
  1665. arbel, arbel_eq->eqn, strerror ( rc ) );
  1666. /* Leak memory and return; at least we avoid corruption */
  1667. return;
  1668. }
  1669. /* Free memory */
  1670. free_dma ( arbel_eq->eqe, arbel_eq->eqe_size );
  1671. memset ( arbel_eq, 0, sizeof ( *arbel_eq ) );
  1672. }
  1673. /**
  1674. * Handle port state event
  1675. *
  1676. * @v arbel Arbel device
  1677. * @v eqe Port state change event queue entry
  1678. */
  1679. static void arbel_event_port_state_change ( struct arbel *arbel,
  1680. union arbelprm_event_entry *eqe){
  1681. unsigned int port;
  1682. int link_up;
  1683. /* Get port and link status */
  1684. port = ( MLX_GET ( &eqe->port_state_change, data.p ) - 1 );
  1685. link_up = ( MLX_GET ( &eqe->generic, event_sub_type ) & 0x04 );
  1686. DBGC ( arbel, "Arbel %p port %d link %s\n", arbel, ( port + 1 ),
  1687. ( link_up ? "up" : "down" ) );
  1688. /* Sanity check */
  1689. if ( port >= ARBEL_NUM_PORTS ) {
  1690. DBGC ( arbel, "Arbel %p port %d does not exist!\n",
  1691. arbel, ( port + 1 ) );
  1692. return;
  1693. }
  1694. /* Update MAD parameters */
  1695. ib_smc_update ( arbel->ibdev[port], arbel_mad );
  1696. }
  1697. /**
  1698. * Poll event queue
  1699. *
  1700. * @v ibdev Infiniband device
  1701. */
  1702. static void arbel_poll_eq ( struct ib_device *ibdev ) {
  1703. struct arbel *arbel = ib_get_drvdata ( ibdev );
  1704. struct arbel_event_queue *arbel_eq = &arbel->eq;
  1705. union arbelprm_event_entry *eqe;
  1706. union arbelprm_eq_doorbell_register db_reg;
  1707. unsigned int eqe_idx_mask;
  1708. unsigned int event_type;
  1709. /* No event is generated upon reaching INIT, so we must poll
  1710. * separately for link state changes while we remain DOWN.
  1711. */
  1712. if ( ib_is_open ( ibdev ) &&
  1713. ( ibdev->port_state == IB_PORT_STATE_DOWN ) ) {
  1714. ib_smc_update ( ibdev, arbel_mad );
  1715. }
  1716. /* Poll event queue */
  1717. while ( 1 ) {
  1718. /* Look for event entry */
  1719. eqe_idx_mask = ( ARBEL_NUM_EQES - 1 );
  1720. eqe = &arbel_eq->eqe[arbel_eq->next_idx & eqe_idx_mask];
  1721. if ( MLX_GET ( &eqe->generic, owner ) != 0 ) {
  1722. /* Entry still owned by hardware; end of poll */
  1723. break;
  1724. }
  1725. DBGCP ( arbel, "Arbel %p EQN %#lx event:\n",
  1726. arbel, arbel_eq->eqn );
  1727. DBGCP_HDA ( arbel, virt_to_phys ( eqe ),
  1728. eqe, sizeof ( *eqe ) );
  1729. /* Handle event */
  1730. event_type = MLX_GET ( &eqe->generic, event_type );
  1731. switch ( event_type ) {
  1732. case ARBEL_EV_PORT_STATE_CHANGE:
  1733. arbel_event_port_state_change ( arbel, eqe );
  1734. break;
  1735. default:
  1736. DBGC ( arbel, "Arbel %p EQN %#lx unrecognised event "
  1737. "type %#x:\n",
  1738. arbel, arbel_eq->eqn, event_type );
  1739. DBGC_HDA ( arbel, virt_to_phys ( eqe ),
  1740. eqe, sizeof ( *eqe ) );
  1741. break;
  1742. }
  1743. /* Return ownership to hardware */
  1744. MLX_FILL_1 ( &eqe->generic, 7, owner, 1 );
  1745. barrier();
  1746. /* Update event queue's index */
  1747. arbel_eq->next_idx++;
  1748. /* Ring doorbell */
  1749. MLX_FILL_1 ( &db_reg.ci, 0, ci, arbel_eq->next_idx );
  1750. writel ( db_reg.dword[0], arbel_eq->doorbell );
  1751. }
  1752. }
  1753. /***************************************************************************
  1754. *
  1755. * Firmware control
  1756. *
  1757. ***************************************************************************
  1758. */
  1759. /**
  1760. * Map virtual to physical address for firmware usage
  1761. *
  1762. * @v arbel Arbel device
  1763. * @v map Mapping function
  1764. * @v va Virtual address
  1765. * @v pa Physical address
  1766. * @v len Length of region
  1767. * @ret rc Return status code
  1768. */
  1769. static int arbel_map_vpm ( struct arbel *arbel,
  1770. int ( *map ) ( struct arbel *arbel,
  1771. const struct arbelprm_virtual_physical_mapping* ),
  1772. uint64_t va, physaddr_t pa, size_t len ) {
  1773. struct arbelprm_virtual_physical_mapping mapping;
  1774. physaddr_t start;
  1775. physaddr_t low;
  1776. physaddr_t high;
  1777. physaddr_t end;
  1778. size_t size;
  1779. int rc;
  1780. /* Sanity checks */
  1781. assert ( ( va & ( ARBEL_PAGE_SIZE - 1 ) ) == 0 );
  1782. assert ( ( pa & ( ARBEL_PAGE_SIZE - 1 ) ) == 0 );
  1783. assert ( ( len & ( ARBEL_PAGE_SIZE - 1 ) ) == 0 );
  1784. /* Calculate starting points */
  1785. start = pa;
  1786. end = ( start + len );
  1787. size = ( 1UL << ( fls ( start ^ end ) - 1 ) );
  1788. low = high = ( end & ~( size - 1 ) );
  1789. assert ( start < low );
  1790. assert ( high <= end );
  1791. /* These mappings tend to generate huge volumes of
  1792. * uninteresting debug data, which basically makes it
  1793. * impossible to use debugging otherwise.
  1794. */
  1795. DBG_DISABLE ( DBGLVL_LOG | DBGLVL_EXTRA );
  1796. /* Map blocks in descending order of size */
  1797. while ( size >= ARBEL_PAGE_SIZE ) {
  1798. /* Find the next candidate block */
  1799. if ( ( low - size ) >= start ) {
  1800. low -= size;
  1801. pa = low;
  1802. } else if ( ( high + size ) <= end ) {
  1803. pa = high;
  1804. high += size;
  1805. } else {
  1806. size >>= 1;
  1807. continue;
  1808. }
  1809. assert ( ( va & ( size - 1 ) ) == 0 );
  1810. assert ( ( pa & ( size - 1 ) ) == 0 );
  1811. /* Map this block */
  1812. memset ( &mapping, 0, sizeof ( mapping ) );
  1813. MLX_FILL_1 ( &mapping, 0, va_h, ( va >> 32 ) );
  1814. MLX_FILL_1 ( &mapping, 1, va_l, ( va >> 12 ) );
  1815. MLX_FILL_H ( &mapping, 2, pa_h, pa );
  1816. MLX_FILL_2 ( &mapping, 3,
  1817. log2size, ( ( fls ( size ) - 1 ) - 12 ),
  1818. pa_l, ( pa >> 12 ) );
  1819. if ( ( rc = map ( arbel, &mapping ) ) != 0 ) {
  1820. DBG_ENABLE ( DBGLVL_LOG | DBGLVL_EXTRA );
  1821. DBGC ( arbel, "Arbel %p could not map %08llx+%zx to "
  1822. "%08lx: %s\n",
  1823. arbel, va, size, pa, strerror ( rc ) );
  1824. return rc;
  1825. }
  1826. va += size;
  1827. }
  1828. assert ( low == start );
  1829. assert ( high == end );
  1830. DBG_ENABLE ( DBGLVL_LOG | DBGLVL_EXTRA );
  1831. return 0;
  1832. }
  1833. /**
  1834. * Start firmware running
  1835. *
  1836. * @v arbel Arbel device
  1837. * @ret rc Return status code
  1838. */
  1839. static int arbel_start_firmware ( struct arbel *arbel ) {
  1840. struct arbelprm_query_fw fw;
  1841. struct arbelprm_access_lam lam;
  1842. unsigned int fw_pages;
  1843. size_t fw_len;
  1844. physaddr_t fw_base;
  1845. uint64_t eq_set_ci_base_addr;
  1846. int rc;
  1847. /* Get firmware parameters */
  1848. if ( ( rc = arbel_cmd_query_fw ( arbel, &fw ) ) != 0 ) {
  1849. DBGC ( arbel, "Arbel %p could not query firmware: %s\n",
  1850. arbel, strerror ( rc ) );
  1851. goto err_query_fw;
  1852. }
  1853. DBGC ( arbel, "Arbel %p firmware version %d.%d.%d\n", arbel,
  1854. MLX_GET ( &fw, fw_rev_major ), MLX_GET ( &fw, fw_rev_minor ),
  1855. MLX_GET ( &fw, fw_rev_subminor ) );
  1856. fw_pages = MLX_GET ( &fw, fw_pages );
  1857. DBGC ( arbel, "Arbel %p requires %d kB for firmware\n",
  1858. arbel, ( fw_pages * 4 ) );
  1859. eq_set_ci_base_addr =
  1860. ( ( (uint64_t) MLX_GET ( &fw, eq_set_ci_base_addr_h ) << 32 ) |
  1861. ( (uint64_t) MLX_GET ( &fw, eq_set_ci_base_addr_l ) ) );
  1862. arbel->eq_ci_doorbells = ioremap ( eq_set_ci_base_addr, 0x200 );
  1863. /* Enable locally-attached memory. Ignore failure; there may
  1864. * be no attached memory.
  1865. */
  1866. arbel_cmd_enable_lam ( arbel, &lam );
  1867. /* Allocate firmware pages and map firmware area */
  1868. fw_len = ( fw_pages * ARBEL_PAGE_SIZE );
  1869. if ( ! arbel->firmware_area ) {
  1870. arbel->firmware_len = fw_len;
  1871. arbel->firmware_area = umalloc ( arbel->firmware_len );
  1872. if ( ! arbel->firmware_area ) {
  1873. rc = -ENOMEM;
  1874. goto err_alloc_fa;
  1875. }
  1876. } else {
  1877. assert ( arbel->firmware_len == fw_len );
  1878. }
  1879. fw_base = user_to_phys ( arbel->firmware_area, 0 );
  1880. DBGC ( arbel, "Arbel %p firmware area at [%08lx,%08lx)\n",
  1881. arbel, fw_base, ( fw_base + fw_len ) );
  1882. if ( ( rc = arbel_map_vpm ( arbel, arbel_cmd_map_fa,
  1883. 0, fw_base, fw_len ) ) != 0 ) {
  1884. DBGC ( arbel, "Arbel %p could not map firmware: %s\n",
  1885. arbel, strerror ( rc ) );
  1886. goto err_map_fa;
  1887. }
  1888. /* Start firmware */
  1889. if ( ( rc = arbel_cmd_run_fw ( arbel ) ) != 0 ) {
  1890. DBGC ( arbel, "Arbel %p could not run firmware: %s\n",
  1891. arbel, strerror ( rc ) );
  1892. goto err_run_fw;
  1893. }
  1894. DBGC ( arbel, "Arbel %p firmware started\n", arbel );
  1895. return 0;
  1896. err_run_fw:
  1897. arbel_cmd_unmap_fa ( arbel );
  1898. err_map_fa:
  1899. err_alloc_fa:
  1900. err_query_fw:
  1901. return rc;
  1902. }
  1903. /**
  1904. * Stop firmware running
  1905. *
  1906. * @v arbel Arbel device
  1907. */
  1908. static void arbel_stop_firmware ( struct arbel *arbel ) {
  1909. int rc;
  1910. if ( ( rc = arbel_cmd_unmap_fa ( arbel ) ) != 0 ) {
  1911. DBGC ( arbel, "Arbel %p FATAL could not stop firmware: %s\n",
  1912. arbel, strerror ( rc ) );
  1913. /* Leak memory and return; at least we avoid corruption */
  1914. arbel->firmware_area = UNULL;
  1915. return;
  1916. }
  1917. }
  1918. /***************************************************************************
  1919. *
  1920. * Infinihost Context Memory management
  1921. *
  1922. ***************************************************************************
  1923. */
  1924. /**
  1925. * Get device limits
  1926. *
  1927. * @v arbel Arbel device
  1928. * @ret rc Return status code
  1929. */
  1930. static int arbel_get_limits ( struct arbel *arbel ) {
  1931. struct arbelprm_query_dev_lim dev_lim;
  1932. int rc;
  1933. if ( ( rc = arbel_cmd_query_dev_lim ( arbel, &dev_lim ) ) != 0 ) {
  1934. DBGC ( arbel, "Arbel %p could not get device limits: %s\n",
  1935. arbel, strerror ( rc ) );
  1936. return rc;
  1937. }
  1938. arbel->limits.reserved_qps =
  1939. ( 1 << MLX_GET ( &dev_lim, log2_rsvd_qps ) );
  1940. arbel->limits.qpc_entry_size = MLX_GET ( &dev_lim, qpc_entry_sz );
  1941. arbel->limits.eqpc_entry_size = MLX_GET ( &dev_lim, eqpc_entry_sz );
  1942. arbel->limits.reserved_srqs =
  1943. ( 1 << MLX_GET ( &dev_lim, log2_rsvd_srqs ) );
  1944. arbel->limits.srqc_entry_size = MLX_GET ( &dev_lim, srq_entry_sz );
  1945. arbel->limits.reserved_ees =
  1946. ( 1 << MLX_GET ( &dev_lim, log2_rsvd_ees ) );
  1947. arbel->limits.eec_entry_size = MLX_GET ( &dev_lim, eec_entry_sz );
  1948. arbel->limits.eeec_entry_size = MLX_GET ( &dev_lim, eeec_entry_sz );
  1949. arbel->limits.reserved_cqs =
  1950. ( 1 << MLX_GET ( &dev_lim, log2_rsvd_cqs ) );
  1951. arbel->limits.cqc_entry_size = MLX_GET ( &dev_lim, cqc_entry_sz );
  1952. arbel->limits.reserved_mtts =
  1953. ( 1 << MLX_GET ( &dev_lim, log2_rsvd_mtts ) );
  1954. arbel->limits.mtt_entry_size = MLX_GET ( &dev_lim, mtt_entry_sz );
  1955. arbel->limits.reserved_mrws =
  1956. ( 1 << MLX_GET ( &dev_lim, log2_rsvd_mrws ) );
  1957. arbel->limits.mpt_entry_size = MLX_GET ( &dev_lim, mpt_entry_sz );
  1958. arbel->limits.reserved_rdbs =
  1959. ( 1 << MLX_GET ( &dev_lim, log2_rsvd_rdbs ) );
  1960. arbel->limits.reserved_eqs = MLX_GET ( &dev_lim, num_rsvd_eqs );
  1961. arbel->limits.eqc_entry_size = MLX_GET ( &dev_lim, eqc_entry_sz );
  1962. arbel->limits.reserved_uars = MLX_GET ( &dev_lim, num_rsvd_uars );
  1963. arbel->limits.uar_scratch_entry_size =
  1964. MLX_GET ( &dev_lim, uar_scratch_entry_sz );
  1965. DBGC ( arbel, "Arbel %p reserves %d x %#zx QPC, %d x %#zx EQPC, "
  1966. "%d x %#zx SRQC\n", arbel,
  1967. arbel->limits.reserved_qps, arbel->limits.qpc_entry_size,
  1968. arbel->limits.reserved_qps, arbel->limits.eqpc_entry_size,
  1969. arbel->limits.reserved_srqs, arbel->limits.srqc_entry_size );
  1970. DBGC ( arbel, "Arbel %p reserves %d x %#zx EEC, %d x %#zx EEEC, "
  1971. "%d x %#zx CQC\n", arbel,
  1972. arbel->limits.reserved_ees, arbel->limits.eec_entry_size,
  1973. arbel->limits.reserved_ees, arbel->limits.eeec_entry_size,
  1974. arbel->limits.reserved_cqs, arbel->limits.cqc_entry_size );
  1975. DBGC ( arbel, "Arbel %p reserves %d x %#zx EQC, %d x %#zx MTT, "
  1976. "%d x %#zx MPT\n", arbel,
  1977. arbel->limits.reserved_eqs, arbel->limits.eqc_entry_size,
  1978. arbel->limits.reserved_mtts, arbel->limits.mtt_entry_size,
  1979. arbel->limits.reserved_mrws, arbel->limits.mpt_entry_size );
  1980. DBGC ( arbel, "Arbel %p reserves %d x %#zx RDB, %d x %#zx UAR, "
  1981. "%d x %#zx UAR scratchpad\n", arbel,
  1982. arbel->limits.reserved_rdbs, ARBEL_RDB_ENTRY_SIZE,
  1983. arbel->limits.reserved_uars, ARBEL_PAGE_SIZE,
  1984. arbel->limits.reserved_uars,
  1985. arbel->limits.uar_scratch_entry_size );
  1986. return 0;
  1987. }
  1988. /**
  1989. * Align ICM table
  1990. *
  1991. * @v icm_offset Current ICM offset
  1992. * @v len ICM table length
  1993. * @ret icm_offset ICM offset
  1994. */
  1995. static size_t icm_align ( size_t icm_offset, size_t len ) {
  1996. /* Round up to a multiple of the table size */
  1997. assert ( len == ( 1UL << ( fls ( len ) - 1 ) ) );
  1998. return ( ( icm_offset + len - 1 ) & ~( len - 1 ) );
  1999. }
  2000. /**
  2001. * Allocate ICM
  2002. *
  2003. * @v arbel Arbel device
  2004. * @v init_hca INIT_HCA structure to fill in
  2005. * @ret rc Return status code
  2006. */
  2007. static int arbel_alloc_icm ( struct arbel *arbel,
  2008. struct arbelprm_init_hca *init_hca ) {
  2009. struct arbelprm_scalar_parameter icm_size;
  2010. struct arbelprm_scalar_parameter icm_aux_size;
  2011. struct arbelprm_scalar_parameter unmap_icm;
  2012. union arbelprm_doorbell_record *db_rec;
  2013. size_t icm_offset = 0;
  2014. unsigned int log_num_uars, log_num_qps, log_num_srqs, log_num_ees;
  2015. unsigned int log_num_cqs, log_num_mtts, log_num_mpts, log_num_rdbs;
  2016. unsigned int log_num_eqs, log_num_mcs;
  2017. size_t icm_len, icm_aux_len;
  2018. size_t len;
  2019. physaddr_t icm_phys;
  2020. int rc;
  2021. /* Calculate number of each object type within ICM */
  2022. log_num_qps = fls ( arbel->limits.reserved_qps +
  2023. ARBEL_RSVD_SPECIAL_QPS + ARBEL_MAX_QPS - 1 );
  2024. log_num_srqs = fls ( arbel->limits.reserved_srqs - 1 );
  2025. log_num_ees = fls ( arbel->limits.reserved_ees - 1 );
  2026. log_num_cqs = fls ( arbel->limits.reserved_cqs + ARBEL_MAX_CQS - 1 );
  2027. log_num_eqs = fls ( arbel->limits.reserved_eqs + ARBEL_MAX_EQS - 1 );
  2028. log_num_mtts = fls ( arbel->limits.reserved_mtts - 1 );
  2029. log_num_mpts = fls ( arbel->limits.reserved_mrws + 1 - 1 );
  2030. log_num_rdbs = fls ( arbel->limits.reserved_rdbs +
  2031. ARBEL_RSVD_SPECIAL_QPS + ARBEL_MAX_QPS - 1 );
  2032. log_num_uars = fls ( arbel->limits.reserved_uars +
  2033. 1 /* single UAR used */ - 1 );
  2034. log_num_mcs = ARBEL_LOG_MULTICAST_HASH_SIZE;
  2035. /* Queue pair contexts */
  2036. len = ( ( 1 << log_num_qps ) * arbel->limits.qpc_entry_size );
  2037. icm_offset = icm_align ( icm_offset, len );
  2038. MLX_FILL_2 ( init_hca, 13,
  2039. qpc_eec_cqc_eqc_rdb_parameters.qpc_base_addr_l,
  2040. ( icm_offset >> 7 ),
  2041. qpc_eec_cqc_eqc_rdb_parameters.log_num_of_qp,
  2042. log_num_qps );
  2043. DBGC ( arbel, "Arbel %p ICM QPC is %d x %#zx at [%zx,%zx)\n",
  2044. arbel, ( 1 << log_num_qps ), arbel->limits.qpc_entry_size,
  2045. icm_offset, ( icm_offset + len ) );
  2046. icm_offset += len;
  2047. /* Extended queue pair contexts */
  2048. len = ( ( 1 << log_num_qps ) * arbel->limits.eqpc_entry_size );
  2049. icm_offset = icm_align ( icm_offset, len );
  2050. MLX_FILL_1 ( init_hca, 25,
  2051. qpc_eec_cqc_eqc_rdb_parameters.eqpc_base_addr_l,
  2052. icm_offset );
  2053. DBGC ( arbel, "Arbel %p ICM EQPC is %d x %#zx at [%zx,%zx)\n",
  2054. arbel, ( 1 << log_num_qps ), arbel->limits.eqpc_entry_size,
  2055. icm_offset, ( icm_offset + len ) );
  2056. icm_offset += len;
  2057. /* Completion queue contexts */
  2058. len = ( ( 1 << log_num_cqs ) * arbel->limits.cqc_entry_size );
  2059. icm_offset = icm_align ( icm_offset, len );
  2060. MLX_FILL_2 ( init_hca, 21,
  2061. qpc_eec_cqc_eqc_rdb_parameters.cqc_base_addr_l,
  2062. ( icm_offset >> 6 ),
  2063. qpc_eec_cqc_eqc_rdb_parameters.log_num_of_cq,
  2064. log_num_cqs );
  2065. DBGC ( arbel, "Arbel %p ICM CQC is %d x %#zx at [%zx,%zx)\n",
  2066. arbel, ( 1 << log_num_cqs ), arbel->limits.cqc_entry_size,
  2067. icm_offset, ( icm_offset + len ) );
  2068. icm_offset += len;
  2069. /* Event queue contexts */
  2070. len = ( ( 1 << log_num_eqs ) * arbel->limits.eqc_entry_size );
  2071. icm_offset = icm_align ( icm_offset, len );
  2072. MLX_FILL_2 ( init_hca, 33,
  2073. qpc_eec_cqc_eqc_rdb_parameters.eqc_base_addr_l,
  2074. ( icm_offset >> 6 ),
  2075. qpc_eec_cqc_eqc_rdb_parameters.log_num_eq,
  2076. log_num_eqs );
  2077. DBGC ( arbel, "Arbel %p ICM EQC is %d x %#zx at [%zx,%zx)\n",
  2078. arbel, ( 1 << log_num_eqs ), arbel->limits.eqc_entry_size,
  2079. icm_offset, ( icm_offset + len ) );
  2080. icm_offset += len;
  2081. /* End-to-end contexts */
  2082. len = ( ( 1 << log_num_ees ) * arbel->limits.eec_entry_size );
  2083. icm_offset = icm_align ( icm_offset, len );
  2084. MLX_FILL_2 ( init_hca, 17,
  2085. qpc_eec_cqc_eqc_rdb_parameters.eec_base_addr_l,
  2086. ( icm_offset >> 7 ),
  2087. qpc_eec_cqc_eqc_rdb_parameters.log_num_of_ee,
  2088. log_num_ees );
  2089. DBGC ( arbel, "Arbel %p ICM EEC is %d x %#zx at [%zx,%zx)\n",
  2090. arbel, ( 1 << log_num_ees ), arbel->limits.eec_entry_size,
  2091. icm_offset, ( icm_offset + len ) );
  2092. icm_offset += len;
  2093. /* Shared receive queue contexts */
  2094. len = ( ( 1 << log_num_srqs ) * arbel->limits.srqc_entry_size );
  2095. icm_offset = icm_align ( icm_offset, len );
  2096. MLX_FILL_2 ( init_hca, 19,
  2097. qpc_eec_cqc_eqc_rdb_parameters.srqc_base_addr_l,
  2098. ( icm_offset >> 5 ),
  2099. qpc_eec_cqc_eqc_rdb_parameters.log_num_of_srq,
  2100. log_num_srqs );
  2101. DBGC ( arbel, "Arbel %p ICM SRQC is %d x %#zx at [%zx,%zx)\n",
  2102. arbel, ( 1 << log_num_srqs ), arbel->limits.srqc_entry_size,
  2103. icm_offset, ( icm_offset + len ) );
  2104. icm_offset += len;
  2105. /* Memory protection table */
  2106. len = ( ( 1 << log_num_mpts ) * arbel->limits.mpt_entry_size );
  2107. icm_offset = icm_align ( icm_offset, len );
  2108. MLX_FILL_1 ( init_hca, 61,
  2109. tpt_parameters.mpt_base_adr_l, icm_offset );
  2110. MLX_FILL_1 ( init_hca, 62,
  2111. tpt_parameters.log_mpt_sz, log_num_mpts );
  2112. DBGC ( arbel, "Arbel %p ICM MPT is %d x %#zx at [%zx,%zx)\n",
  2113. arbel, ( 1 << log_num_mpts ), arbel->limits.mpt_entry_size,
  2114. icm_offset, ( icm_offset + len ) );
  2115. icm_offset += len;
  2116. /* Remote read data base table */
  2117. len = ( ( 1 << log_num_rdbs ) * ARBEL_RDB_ENTRY_SIZE );
  2118. icm_offset = icm_align ( icm_offset, len );
  2119. MLX_FILL_1 ( init_hca, 37,
  2120. qpc_eec_cqc_eqc_rdb_parameters.rdb_base_addr_l,
  2121. icm_offset );
  2122. DBGC ( arbel, "Arbel %p ICM RDB is %d x %#zx at [%zx,%zx)\n",
  2123. arbel, ( 1 << log_num_rdbs ), ARBEL_RDB_ENTRY_SIZE,
  2124. icm_offset, ( icm_offset + len ) );
  2125. icm_offset += len;
  2126. /* Extended end-to-end contexts */
  2127. len = ( ( 1 << log_num_ees ) * arbel->limits.eeec_entry_size );
  2128. icm_offset = icm_align ( icm_offset, len );
  2129. MLX_FILL_1 ( init_hca, 29,
  2130. qpc_eec_cqc_eqc_rdb_parameters.eeec_base_addr_l,
  2131. icm_offset );
  2132. DBGC ( arbel, "Arbel %p ICM EEEC is %d x %#zx at [%zx,%zx)\n",
  2133. arbel, ( 1 << log_num_ees ), arbel->limits.eeec_entry_size,
  2134. icm_offset, ( icm_offset + len ) );
  2135. icm_offset += len;
  2136. /* Multicast table */
  2137. len = ( ( 1 << log_num_mcs ) * sizeof ( struct arbelprm_mgm_entry ) );
  2138. icm_offset = icm_align ( icm_offset, len );
  2139. MLX_FILL_1 ( init_hca, 49,
  2140. multicast_parameters.mc_base_addr_l, icm_offset );
  2141. MLX_FILL_1 ( init_hca, 52,
  2142. multicast_parameters.log_mc_table_entry_sz,
  2143. fls ( sizeof ( struct arbelprm_mgm_entry ) - 1 ) );
  2144. MLX_FILL_1 ( init_hca, 53,
  2145. multicast_parameters.mc_table_hash_sz,
  2146. ( 1 << log_num_mcs ) );
  2147. MLX_FILL_1 ( init_hca, 54,
  2148. multicast_parameters.log_mc_table_sz,
  2149. log_num_mcs /* Only one entry per hash */ );
  2150. DBGC ( arbel, "Arbel %p ICM MC is %d x %#zx at [%zx,%zx)\n", arbel,
  2151. ( 1 << log_num_mcs ), sizeof ( struct arbelprm_mgm_entry ),
  2152. icm_offset, ( icm_offset + len ) );
  2153. icm_offset += len;
  2154. /* Memory translation table */
  2155. len = ( ( 1 << log_num_mtts ) * arbel->limits.mtt_entry_size );
  2156. icm_offset = icm_align ( icm_offset, len );
  2157. MLX_FILL_1 ( init_hca, 65,
  2158. tpt_parameters.mtt_base_addr_l, icm_offset );
  2159. DBGC ( arbel, "Arbel %p ICM MTT is %d x %#zx at [%zx,%zx)\n",
  2160. arbel, ( 1 << log_num_mtts ), arbel->limits.mtt_entry_size,
  2161. icm_offset, ( icm_offset + len ) );
  2162. icm_offset += len;
  2163. /* User access region scratchpads */
  2164. len = ( ( 1 << log_num_uars ) * arbel->limits.uar_scratch_entry_size );
  2165. icm_offset = icm_align ( icm_offset, len );
  2166. MLX_FILL_1 ( init_hca, 77,
  2167. uar_parameters.uar_scratch_base_addr_l, icm_offset );
  2168. DBGC ( arbel, "Arbel %p UAR scratchpad is %d x %#zx at [%zx,%zx)\n",
  2169. arbel, ( 1 << log_num_uars ),
  2170. arbel->limits.uar_scratch_entry_size,
  2171. icm_offset, ( icm_offset + len ) );
  2172. icm_offset += len;
  2173. /* Record amount of ICM to be allocated */
  2174. icm_offset = icm_align ( icm_offset, ARBEL_PAGE_SIZE );
  2175. icm_len = icm_offset;
  2176. /* User access region contexts
  2177. *
  2178. * The reserved UAR(s) do not need to be backed by physical
  2179. * memory, and our UAR is allocated separately; neither are
  2180. * part of the umalloc()ed ICM block, but both contribute to
  2181. * the total length of ICM virtual address space.
  2182. */
  2183. len = ( ( 1 << log_num_uars ) * ARBEL_PAGE_SIZE );
  2184. icm_offset = icm_align ( icm_offset, len );
  2185. MLX_FILL_1 ( init_hca, 74, uar_parameters.log_max_uars, log_num_uars );
  2186. MLX_FILL_1 ( init_hca, 79,
  2187. uar_parameters.uar_context_base_addr_l, icm_offset );
  2188. arbel->db_rec_offset =
  2189. ( icm_offset +
  2190. ( arbel->limits.reserved_uars * ARBEL_PAGE_SIZE ) );
  2191. DBGC ( arbel, "Arbel %p UAR is %d x %#zx at [%zx,%zx), doorbells "
  2192. "[%zx,%zx)\n", arbel, ( 1 << log_num_uars ), ARBEL_PAGE_SIZE,
  2193. icm_offset, ( icm_offset + len ), arbel->db_rec_offset,
  2194. ( arbel->db_rec_offset + ARBEL_PAGE_SIZE ) );
  2195. icm_offset += len;
  2196. /* Get ICM auxiliary area size */
  2197. memset ( &icm_size, 0, sizeof ( icm_size ) );
  2198. MLX_FILL_1 ( &icm_size, 1, value, icm_len );
  2199. if ( ( rc = arbel_cmd_set_icm_size ( arbel, &icm_size,
  2200. &icm_aux_size ) ) != 0 ) {
  2201. DBGC ( arbel, "Arbel %p could not set ICM size: %s\n",
  2202. arbel, strerror ( rc ) );
  2203. goto err_set_icm_size;
  2204. }
  2205. icm_aux_len = ( MLX_GET ( &icm_aux_size, value ) * ARBEL_PAGE_SIZE );
  2206. /* Allocate ICM data and auxiliary area */
  2207. DBGC ( arbel, "Arbel %p requires %zd kB ICM and %zd kB AUX ICM\n",
  2208. arbel, ( icm_len / 1024 ), ( icm_aux_len / 1024 ) );
  2209. if ( ! arbel->icm ) {
  2210. arbel->icm_len = icm_len;
  2211. arbel->icm_aux_len = icm_aux_len;
  2212. arbel->icm = umalloc ( arbel->icm_len + arbel->icm_aux_len );
  2213. if ( ! arbel->icm ) {
  2214. rc = -ENOMEM;
  2215. goto err_alloc_icm;
  2216. }
  2217. } else {
  2218. assert ( arbel->icm_len == icm_len );
  2219. assert ( arbel->icm_aux_len == icm_aux_len );
  2220. }
  2221. icm_phys = user_to_phys ( arbel->icm, 0 );
  2222. /* Allocate doorbell UAR */
  2223. arbel->db_rec = malloc_dma ( ARBEL_PAGE_SIZE, ARBEL_PAGE_SIZE );
  2224. if ( ! arbel->db_rec ) {
  2225. rc = -ENOMEM;
  2226. goto err_alloc_doorbell;
  2227. }
  2228. /* Map ICM auxiliary area */
  2229. DBGC ( arbel, "Arbel %p ICM AUX at [%08lx,%08lx)\n",
  2230. arbel, icm_phys, ( icm_phys + arbel->icm_aux_len ) );
  2231. if ( ( rc = arbel_map_vpm ( arbel, arbel_cmd_map_icm_aux,
  2232. 0, icm_phys, arbel->icm_aux_len ) ) != 0 ){
  2233. DBGC ( arbel, "Arbel %p could not map AUX ICM: %s\n",
  2234. arbel, strerror ( rc ) );
  2235. goto err_map_icm_aux;
  2236. }
  2237. icm_phys += arbel->icm_aux_len;
  2238. /* Map ICM area */
  2239. DBGC ( arbel, "Arbel %p ICM at [%08lx,%08lx)\n",
  2240. arbel, icm_phys, ( icm_phys + arbel->icm_len ) );
  2241. if ( ( rc = arbel_map_vpm ( arbel, arbel_cmd_map_icm,
  2242. 0, icm_phys, arbel->icm_len ) ) != 0 ) {
  2243. DBGC ( arbel, "Arbel %p could not map ICM: %s\n",
  2244. arbel, strerror ( rc ) );
  2245. goto err_map_icm;
  2246. }
  2247. icm_phys += arbel->icm_len;
  2248. /* Map doorbell UAR */
  2249. DBGC ( arbel, "Arbel %p UAR at [%08lx,%08lx)\n",
  2250. arbel, virt_to_phys ( arbel->db_rec ),
  2251. ( virt_to_phys ( arbel->db_rec ) + ARBEL_PAGE_SIZE ) );
  2252. if ( ( rc = arbel_map_vpm ( arbel, arbel_cmd_map_icm,
  2253. arbel->db_rec_offset,
  2254. virt_to_phys ( arbel->db_rec ),
  2255. ARBEL_PAGE_SIZE ) ) != 0 ) {
  2256. DBGC ( arbel, "Arbel %p could not map doorbell UAR: %s\n",
  2257. arbel, strerror ( rc ) );
  2258. goto err_map_doorbell;
  2259. }
  2260. /* Initialise doorbell records */
  2261. memset ( arbel->db_rec, 0, ARBEL_PAGE_SIZE );
  2262. db_rec = &arbel->db_rec[ARBEL_GROUP_SEPARATOR_DOORBELL];
  2263. MLX_FILL_1 ( &db_rec->qp, 1, res, ARBEL_UAR_RES_GROUP_SEP );
  2264. return 0;
  2265. memset ( &unmap_icm, 0, sizeof ( unmap_icm ) );
  2266. MLX_FILL_1 ( &unmap_icm, 1, value, arbel->db_rec_offset );
  2267. arbel_cmd_unmap_icm ( arbel, 1, &unmap_icm );
  2268. err_map_doorbell:
  2269. memset ( &unmap_icm, 0, sizeof ( unmap_icm ) );
  2270. arbel_cmd_unmap_icm ( arbel, ( arbel->icm_len / ARBEL_PAGE_SIZE ),
  2271. &unmap_icm );
  2272. err_map_icm:
  2273. arbel_cmd_unmap_icm_aux ( arbel );
  2274. err_map_icm_aux:
  2275. free_dma ( arbel->db_rec, ARBEL_PAGE_SIZE );
  2276. arbel->db_rec= NULL;
  2277. err_alloc_doorbell:
  2278. err_alloc_icm:
  2279. err_set_icm_size:
  2280. return rc;
  2281. }
  2282. /**
  2283. * Free ICM
  2284. *
  2285. * @v arbel Arbel device
  2286. */
  2287. static void arbel_free_icm ( struct arbel *arbel ) {
  2288. struct arbelprm_scalar_parameter unmap_icm;
  2289. memset ( &unmap_icm, 0, sizeof ( unmap_icm ) );
  2290. MLX_FILL_1 ( &unmap_icm, 1, value, arbel->db_rec_offset );
  2291. arbel_cmd_unmap_icm ( arbel, 1, &unmap_icm );
  2292. memset ( &unmap_icm, 0, sizeof ( unmap_icm ) );
  2293. arbel_cmd_unmap_icm ( arbel, ( arbel->icm_len / ARBEL_PAGE_SIZE ),
  2294. &unmap_icm );
  2295. arbel_cmd_unmap_icm_aux ( arbel );
  2296. free_dma ( arbel->db_rec, ARBEL_PAGE_SIZE );
  2297. arbel->db_rec = NULL;
  2298. }
  2299. /***************************************************************************
  2300. *
  2301. * Initialisation and teardown
  2302. *
  2303. ***************************************************************************
  2304. */
  2305. /**
  2306. * Reset device
  2307. *
  2308. * @v arbel Arbel device
  2309. */
  2310. static void arbel_reset ( struct arbel *arbel ) {
  2311. struct pci_device *pci = arbel->pci;
  2312. struct pci_config_backup backup;
  2313. static const uint8_t backup_exclude[] =
  2314. PCI_CONFIG_BACKUP_EXCLUDE ( 0x58, 0x5c );
  2315. uint16_t vendor;
  2316. unsigned int i;
  2317. /* Perform device reset and preserve PCI configuration */
  2318. pci_backup ( pci, &backup, backup_exclude );
  2319. writel ( ARBEL_RESET_MAGIC,
  2320. ( arbel->config + ARBEL_RESET_OFFSET ) );
  2321. for ( i = 0 ; i < ARBEL_RESET_WAIT_TIME_MS ; i++ ) {
  2322. mdelay ( 1 );
  2323. pci_read_config_word ( pci, PCI_VENDOR_ID, &vendor );
  2324. if ( vendor != 0xffff )
  2325. break;
  2326. }
  2327. pci_restore ( pci, &backup, backup_exclude );
  2328. }
  2329. /**
  2330. * Set up memory protection table
  2331. *
  2332. * @v arbel Arbel device
  2333. * @ret rc Return status code
  2334. */
  2335. static int arbel_setup_mpt ( struct arbel *arbel ) {
  2336. struct arbelprm_mpt mpt;
  2337. uint32_t key;
  2338. int rc;
  2339. /* Derive key */
  2340. key = ( arbel->limits.reserved_mrws | ARBEL_MKEY_PREFIX );
  2341. arbel->lkey = ( ( key << 8 ) | ( key >> 24 ) );
  2342. /* Initialise memory protection table */
  2343. memset ( &mpt, 0, sizeof ( mpt ) );
  2344. MLX_FILL_7 ( &mpt, 0,
  2345. a, 1,
  2346. rw, 1,
  2347. rr, 1,
  2348. lw, 1,
  2349. lr, 1,
  2350. pa, 1,
  2351. r_w, 1 );
  2352. MLX_FILL_1 ( &mpt, 2, mem_key, key );
  2353. MLX_FILL_2 ( &mpt, 3,
  2354. pd, ARBEL_GLOBAL_PD,
  2355. rae, 1 );
  2356. MLX_FILL_1 ( &mpt, 6, reg_wnd_len_h, 0xffffffffUL );
  2357. MLX_FILL_1 ( &mpt, 7, reg_wnd_len_l, 0xffffffffUL );
  2358. if ( ( rc = arbel_cmd_sw2hw_mpt ( arbel, arbel->limits.reserved_mrws,
  2359. &mpt ) ) != 0 ) {
  2360. DBGC ( arbel, "Arbel %p could not set up MPT: %s\n",
  2361. arbel, strerror ( rc ) );
  2362. return rc;
  2363. }
  2364. return 0;
  2365. }
  2366. /**
  2367. * Configure special queue pairs
  2368. *
  2369. * @v arbel Arbel device
  2370. * @ret rc Return status code
  2371. */
  2372. static int arbel_configure_special_qps ( struct arbel *arbel ) {
  2373. unsigned int smi_qpn_base;
  2374. unsigned int gsi_qpn_base;
  2375. int rc;
  2376. /* Special QP block must be aligned on an even number */
  2377. arbel->special_qpn_base = ( ( arbel->limits.reserved_qps + 1 ) & ~1 );
  2378. arbel->qpn_base = ( arbel->special_qpn_base +
  2379. ARBEL_NUM_SPECIAL_QPS );
  2380. DBGC ( arbel, "Arbel %p special QPs at [%lx,%lx]\n", arbel,
  2381. arbel->special_qpn_base, ( arbel->qpn_base - 1 ) );
  2382. smi_qpn_base = arbel->special_qpn_base;
  2383. gsi_qpn_base = ( smi_qpn_base + 2 );
  2384. /* Issue commands to configure special QPs */
  2385. if ( ( rc = arbel_cmd_conf_special_qp ( arbel, 0,
  2386. smi_qpn_base ) ) != 0 ) {
  2387. DBGC ( arbel, "Arbel %p could not configure SMI QPs: %s\n",
  2388. arbel, strerror ( rc ) );
  2389. return rc;
  2390. }
  2391. if ( ( rc = arbel_cmd_conf_special_qp ( arbel, 1,
  2392. gsi_qpn_base ) ) != 0 ) {
  2393. DBGC ( arbel, "Arbel %p could not configure GSI QPs: %s\n",
  2394. arbel, strerror ( rc ) );
  2395. return rc;
  2396. }
  2397. return 0;
  2398. }
  2399. /**
  2400. * Start Arbel device
  2401. *
  2402. * @v arbel Arbel device
  2403. * @v running Firmware is already running
  2404. * @ret rc Return status code
  2405. */
  2406. static int arbel_start ( struct arbel *arbel, int running ) {
  2407. struct arbelprm_init_hca init_hca;
  2408. unsigned int i;
  2409. int rc;
  2410. /* Start firmware if not already running */
  2411. if ( ! running ) {
  2412. if ( ( rc = arbel_start_firmware ( arbel ) ) != 0 )
  2413. goto err_start_firmware;
  2414. }
  2415. /* Allocate ICM */
  2416. memset ( &init_hca, 0, sizeof ( init_hca ) );
  2417. if ( ( rc = arbel_alloc_icm ( arbel, &init_hca ) ) != 0 )
  2418. goto err_alloc_icm;
  2419. /* Initialise HCA */
  2420. if ( ( rc = arbel_cmd_init_hca ( arbel, &init_hca ) ) != 0 ) {
  2421. DBGC ( arbel, "Arbel %p could not initialise HCA: %s\n",
  2422. arbel, strerror ( rc ) );
  2423. goto err_init_hca;
  2424. }
  2425. /* Set up memory protection */
  2426. if ( ( rc = arbel_setup_mpt ( arbel ) ) != 0 )
  2427. goto err_setup_mpt;
  2428. for ( i = 0 ; i < ARBEL_NUM_PORTS ; i++ )
  2429. arbel->ibdev[i]->rdma_key = arbel->lkey;
  2430. /* Set up event queue */
  2431. if ( ( rc = arbel_create_eq ( arbel ) ) != 0 )
  2432. goto err_create_eq;
  2433. /* Configure special QPs */
  2434. if ( ( rc = arbel_configure_special_qps ( arbel ) ) != 0 )
  2435. goto err_conf_special_qps;
  2436. return 0;
  2437. err_conf_special_qps:
  2438. arbel_destroy_eq ( arbel );
  2439. err_create_eq:
  2440. err_setup_mpt:
  2441. arbel_cmd_close_hca ( arbel );
  2442. err_init_hca:
  2443. arbel_free_icm ( arbel );
  2444. err_alloc_icm:
  2445. arbel_stop_firmware ( arbel );
  2446. err_start_firmware:
  2447. return rc;
  2448. }
  2449. /**
  2450. * Stop Arbel device
  2451. *
  2452. * @v arbel Arbel device
  2453. */
  2454. static void arbel_stop ( struct arbel *arbel ) {
  2455. arbel_destroy_eq ( arbel );
  2456. arbel_cmd_close_hca ( arbel );
  2457. arbel_free_icm ( arbel );
  2458. arbel_stop_firmware ( arbel );
  2459. arbel_reset ( arbel );
  2460. }
  2461. /**
  2462. * Open Arbel device
  2463. *
  2464. * @v arbel Arbel device
  2465. * @ret rc Return status code
  2466. */
  2467. static int arbel_open ( struct arbel *arbel ) {
  2468. int rc;
  2469. /* Start device if applicable */
  2470. if ( arbel->open_count == 0 ) {
  2471. if ( ( rc = arbel_start ( arbel, 0 ) ) != 0 )
  2472. return rc;
  2473. }
  2474. /* Increment open counter */
  2475. arbel->open_count++;
  2476. return 0;
  2477. }
  2478. /**
  2479. * Close Arbel device
  2480. *
  2481. * @v arbel Arbel device
  2482. */
  2483. static void arbel_close ( struct arbel *arbel ) {
  2484. /* Decrement open counter */
  2485. assert ( arbel->open_count != 0 );
  2486. arbel->open_count--;
  2487. /* Stop device if applicable */
  2488. if ( arbel->open_count == 0 )
  2489. arbel_stop ( arbel );
  2490. }
  2491. /***************************************************************************
  2492. *
  2493. * Infiniband link-layer operations
  2494. *
  2495. ***************************************************************************
  2496. */
  2497. /**
  2498. * Initialise Infiniband link
  2499. *
  2500. * @v ibdev Infiniband device
  2501. * @ret rc Return status code
  2502. */
  2503. static int arbel_ib_open ( struct ib_device *ibdev ) {
  2504. struct arbel *arbel = ib_get_drvdata ( ibdev );
  2505. struct arbelprm_init_ib init_ib;
  2506. int rc;
  2507. /* Open hardware */
  2508. if ( ( rc = arbel_open ( arbel ) ) != 0 )
  2509. goto err_open;
  2510. /* Initialise IB */
  2511. memset ( &init_ib, 0, sizeof ( init_ib ) );
  2512. MLX_FILL_3 ( &init_ib, 0,
  2513. mtu_cap, ARBEL_MTU_2048,
  2514. port_width_cap, 3,
  2515. vl_cap, 1 );
  2516. MLX_FILL_1 ( &init_ib, 1, max_gid, 1 );
  2517. MLX_FILL_1 ( &init_ib, 2, max_pkey, 64 );
  2518. if ( ( rc = arbel_cmd_init_ib ( arbel, ibdev->port,
  2519. &init_ib ) ) != 0 ) {
  2520. DBGC ( arbel, "Arbel %p port %d could not intialise IB: %s\n",
  2521. arbel, ibdev->port, strerror ( rc ) );
  2522. goto err_init_ib;
  2523. }
  2524. /* Update MAD parameters */
  2525. ib_smc_update ( ibdev, arbel_mad );
  2526. return 0;
  2527. err_init_ib:
  2528. arbel_close ( arbel );
  2529. err_open:
  2530. return rc;
  2531. }
  2532. /**
  2533. * Close Infiniband link
  2534. *
  2535. * @v ibdev Infiniband device
  2536. */
  2537. static void arbel_ib_close ( struct ib_device *ibdev ) {
  2538. struct arbel *arbel = ib_get_drvdata ( ibdev );
  2539. int rc;
  2540. /* Close IB */
  2541. if ( ( rc = arbel_cmd_close_ib ( arbel, ibdev->port ) ) != 0 ) {
  2542. DBGC ( arbel, "Arbel %p port %d could not close IB: %s\n",
  2543. arbel, ibdev->port, strerror ( rc ) );
  2544. /* Nothing we can do about this */
  2545. }
  2546. /* Close hardware */
  2547. arbel_close ( arbel );
  2548. }
  2549. /**
  2550. * Inform embedded subnet management agent of a received MAD
  2551. *
  2552. * @v ibdev Infiniband device
  2553. * @v mad MAD
  2554. * @ret rc Return status code
  2555. */
  2556. static int arbel_inform_sma ( struct ib_device *ibdev, union ib_mad *mad ) {
  2557. int rc;
  2558. /* Send the MAD to the embedded SMA */
  2559. if ( ( rc = arbel_mad ( ibdev, mad ) ) != 0 )
  2560. return rc;
  2561. /* Update parameters held in software */
  2562. ib_smc_update ( ibdev, arbel_mad );
  2563. return 0;
  2564. }
  2565. /***************************************************************************
  2566. *
  2567. * Multicast group operations
  2568. *
  2569. ***************************************************************************
  2570. */
  2571. /**
  2572. * Attach to multicast group
  2573. *
  2574. * @v ibdev Infiniband device
  2575. * @v qp Queue pair
  2576. * @v gid Multicast GID
  2577. * @ret rc Return status code
  2578. */
  2579. static int arbel_mcast_attach ( struct ib_device *ibdev,
  2580. struct ib_queue_pair *qp,
  2581. union ib_gid *gid ) {
  2582. struct arbel *arbel = ib_get_drvdata ( ibdev );
  2583. struct arbelprm_mgm_hash hash;
  2584. struct arbelprm_mgm_entry mgm;
  2585. unsigned int index;
  2586. int rc;
  2587. /* Generate hash table index */
  2588. if ( ( rc = arbel_cmd_mgid_hash ( arbel, gid, &hash ) ) != 0 ) {
  2589. DBGC ( arbel, "Arbel %p could not hash GID: %s\n",
  2590. arbel, strerror ( rc ) );
  2591. return rc;
  2592. }
  2593. index = MLX_GET ( &hash, hash );
  2594. /* Check for existing hash table entry */
  2595. if ( ( rc = arbel_cmd_read_mgm ( arbel, index, &mgm ) ) != 0 ) {
  2596. DBGC ( arbel, "Arbel %p could not read MGM %#x: %s\n",
  2597. arbel, index, strerror ( rc ) );
  2598. return rc;
  2599. }
  2600. if ( MLX_GET ( &mgm, mgmqp_0.qi ) != 0 ) {
  2601. /* FIXME: this implementation allows only a single QP
  2602. * per multicast group, and doesn't handle hash
  2603. * collisions. Sufficient for IPoIB but may need to
  2604. * be extended in future.
  2605. */
  2606. DBGC ( arbel, "Arbel %p MGID index %#x already in use\n",
  2607. arbel, index );
  2608. return -EBUSY;
  2609. }
  2610. /* Update hash table entry */
  2611. MLX_FILL_2 ( &mgm, 8,
  2612. mgmqp_0.qpn_i, qp->qpn,
  2613. mgmqp_0.qi, 1 );
  2614. memcpy ( &mgm.u.dwords[4], gid, sizeof ( *gid ) );
  2615. if ( ( rc = arbel_cmd_write_mgm ( arbel, index, &mgm ) ) != 0 ) {
  2616. DBGC ( arbel, "Arbel %p could not write MGM %#x: %s\n",
  2617. arbel, index, strerror ( rc ) );
  2618. return rc;
  2619. }
  2620. return 0;
  2621. }
  2622. /**
  2623. * Detach from multicast group
  2624. *
  2625. * @v ibdev Infiniband device
  2626. * @v qp Queue pair
  2627. * @v gid Multicast GID
  2628. */
  2629. static void arbel_mcast_detach ( struct ib_device *ibdev,
  2630. struct ib_queue_pair *qp __unused,
  2631. union ib_gid *gid ) {
  2632. struct arbel *arbel = ib_get_drvdata ( ibdev );
  2633. struct arbelprm_mgm_hash hash;
  2634. struct arbelprm_mgm_entry mgm;
  2635. unsigned int index;
  2636. int rc;
  2637. /* Generate hash table index */
  2638. if ( ( rc = arbel_cmd_mgid_hash ( arbel, gid, &hash ) ) != 0 ) {
  2639. DBGC ( arbel, "Arbel %p could not hash GID: %s\n",
  2640. arbel, strerror ( rc ) );
  2641. return;
  2642. }
  2643. index = MLX_GET ( &hash, hash );
  2644. /* Clear hash table entry */
  2645. memset ( &mgm, 0, sizeof ( mgm ) );
  2646. if ( ( rc = arbel_cmd_write_mgm ( arbel, index, &mgm ) ) != 0 ) {
  2647. DBGC ( arbel, "Arbel %p could not write MGM %#x: %s\n",
  2648. arbel, index, strerror ( rc ) );
  2649. return;
  2650. }
  2651. }
  2652. /** Arbel Infiniband operations */
  2653. static struct ib_device_operations arbel_ib_operations = {
  2654. .create_cq = arbel_create_cq,
  2655. .destroy_cq = arbel_destroy_cq,
  2656. .create_qp = arbel_create_qp,
  2657. .modify_qp = arbel_modify_qp,
  2658. .destroy_qp = arbel_destroy_qp,
  2659. .post_send = arbel_post_send,
  2660. .post_recv = arbel_post_recv,
  2661. .poll_cq = arbel_poll_cq,
  2662. .poll_eq = arbel_poll_eq,
  2663. .open = arbel_ib_open,
  2664. .close = arbel_ib_close,
  2665. .mcast_attach = arbel_mcast_attach,
  2666. .mcast_detach = arbel_mcast_detach,
  2667. .set_port_info = arbel_inform_sma,
  2668. .set_pkey_table = arbel_inform_sma,
  2669. };
  2670. /***************************************************************************
  2671. *
  2672. * PCI interface
  2673. *
  2674. ***************************************************************************
  2675. */
  2676. /**
  2677. * Allocate Arbel device
  2678. *
  2679. * @ret arbel Arbel device
  2680. */
  2681. static struct arbel * arbel_alloc ( void ) {
  2682. struct arbel *arbel;
  2683. /* Allocate Arbel device */
  2684. arbel = zalloc ( sizeof ( *arbel ) );
  2685. if ( ! arbel )
  2686. goto err_arbel;
  2687. /* Allocate space for mailboxes */
  2688. arbel->mailbox_in = malloc_dma ( ARBEL_MBOX_SIZE, ARBEL_MBOX_ALIGN );
  2689. if ( ! arbel->mailbox_in )
  2690. goto err_mailbox_in;
  2691. arbel->mailbox_out = malloc_dma ( ARBEL_MBOX_SIZE, ARBEL_MBOX_ALIGN );
  2692. if ( ! arbel->mailbox_out )
  2693. goto err_mailbox_out;
  2694. return arbel;
  2695. free_dma ( arbel->mailbox_out, ARBEL_MBOX_SIZE );
  2696. err_mailbox_out:
  2697. free_dma ( arbel->mailbox_in, ARBEL_MBOX_SIZE );
  2698. err_mailbox_in:
  2699. free ( arbel );
  2700. err_arbel:
  2701. return NULL;
  2702. }
  2703. /**
  2704. * Free Arbel device
  2705. *
  2706. * @v arbel Arbel device
  2707. */
  2708. static void arbel_free ( struct arbel *arbel ) {
  2709. ufree ( arbel->icm );
  2710. ufree ( arbel->firmware_area );
  2711. free_dma ( arbel->mailbox_out, ARBEL_MBOX_SIZE );
  2712. free_dma ( arbel->mailbox_in, ARBEL_MBOX_SIZE );
  2713. free ( arbel );
  2714. }
  2715. /**
  2716. * Probe PCI device
  2717. *
  2718. * @v pci PCI device
  2719. * @v id PCI ID
  2720. * @ret rc Return status code
  2721. */
  2722. static int arbel_probe ( struct pci_device *pci ) {
  2723. struct arbel *arbel;
  2724. struct ib_device *ibdev;
  2725. int i;
  2726. int rc;
  2727. /* Allocate Arbel device */
  2728. arbel = arbel_alloc();
  2729. if ( ! arbel ) {
  2730. rc = -ENOMEM;
  2731. goto err_alloc;
  2732. }
  2733. pci_set_drvdata ( pci, arbel );
  2734. arbel->pci = pci;
  2735. /* Fix up PCI device */
  2736. adjust_pci_device ( pci );
  2737. /* Map PCI BARs */
  2738. arbel->config = ioremap ( pci_bar_start ( pci, ARBEL_PCI_CONFIG_BAR ),
  2739. ARBEL_PCI_CONFIG_BAR_SIZE );
  2740. arbel->uar = ioremap ( ( pci_bar_start ( pci, ARBEL_PCI_UAR_BAR ) +
  2741. ARBEL_PCI_UAR_IDX * ARBEL_PCI_UAR_SIZE ),
  2742. ARBEL_PCI_UAR_SIZE );
  2743. /* Allocate Infiniband devices */
  2744. for ( i = 0 ; i < ARBEL_NUM_PORTS ; i++ ) {
  2745. ibdev = alloc_ibdev ( 0 );
  2746. if ( ! ibdev ) {
  2747. rc = -ENOMEM;
  2748. goto err_alloc_ibdev;
  2749. }
  2750. arbel->ibdev[i] = ibdev;
  2751. ibdev->op = &arbel_ib_operations;
  2752. ibdev->dev = &pci->dev;
  2753. ibdev->port = ( ARBEL_PORT_BASE + i );
  2754. ib_set_drvdata ( ibdev, arbel );
  2755. }
  2756. /* Reset device */
  2757. arbel_reset ( arbel );
  2758. /* Start firmware */
  2759. if ( ( rc = arbel_start_firmware ( arbel ) ) != 0 )
  2760. goto err_start_firmware;
  2761. /* Get device limits */
  2762. if ( ( rc = arbel_get_limits ( arbel ) ) != 0 )
  2763. goto err_get_limits;
  2764. /* Start device */
  2765. if ( ( rc = arbel_start ( arbel, 1 ) ) != 0 )
  2766. goto err_start;
  2767. /* Initialise parameters using SMC */
  2768. for ( i = 0 ; i < ARBEL_NUM_PORTS ; i++ )
  2769. ib_smc_init ( arbel->ibdev[i], arbel_mad );
  2770. /* Register Infiniband devices */
  2771. for ( i = 0 ; i < ARBEL_NUM_PORTS ; i++ ) {
  2772. if ( ( rc = register_ibdev ( arbel->ibdev[i] ) ) != 0 ) {
  2773. DBGC ( arbel, "Arbel %p port %d could not register IB "
  2774. "device: %s\n", arbel,
  2775. arbel->ibdev[i]->port, strerror ( rc ) );
  2776. goto err_register_ibdev;
  2777. }
  2778. }
  2779. /* Leave device quiescent until opened */
  2780. if ( arbel->open_count == 0 )
  2781. arbel_stop ( arbel );
  2782. return 0;
  2783. i = ARBEL_NUM_PORTS;
  2784. err_register_ibdev:
  2785. for ( i-- ; i >= 0 ; i-- )
  2786. unregister_ibdev ( arbel->ibdev[i] );
  2787. arbel_stop ( arbel );
  2788. err_start:
  2789. err_get_limits:
  2790. arbel_stop_firmware ( arbel );
  2791. err_start_firmware:
  2792. i = ARBEL_NUM_PORTS;
  2793. err_alloc_ibdev:
  2794. for ( i-- ; i >= 0 ; i-- )
  2795. ibdev_put ( arbel->ibdev[i] );
  2796. iounmap ( arbel->uar );
  2797. iounmap ( arbel->config );
  2798. arbel_free ( arbel );
  2799. err_alloc:
  2800. return rc;
  2801. }
  2802. /**
  2803. * Remove PCI device
  2804. *
  2805. * @v pci PCI device
  2806. */
  2807. static void arbel_remove ( struct pci_device *pci ) {
  2808. struct arbel *arbel = pci_get_drvdata ( pci );
  2809. int i;
  2810. for ( i = ( ARBEL_NUM_PORTS - 1 ) ; i >= 0 ; i-- )
  2811. unregister_ibdev ( arbel->ibdev[i] );
  2812. for ( i = ( ARBEL_NUM_PORTS - 1 ) ; i >= 0 ; i-- )
  2813. ibdev_put ( arbel->ibdev[i] );
  2814. iounmap ( arbel->uar );
  2815. iounmap ( arbel->config );
  2816. arbel_free ( arbel );
  2817. }
  2818. static struct pci_device_id arbel_nics[] = {
  2819. PCI_ROM ( 0x15b3, 0x6282, "mt25218", "MT25218 HCA driver", 0 ),
  2820. PCI_ROM ( 0x15b3, 0x6274, "mt25204", "MT25204 HCA driver", 0 ),
  2821. };
  2822. struct pci_driver arbel_driver __pci_driver = {
  2823. .ids = arbel_nics,
  2824. .id_count = ( sizeof ( arbel_nics ) / sizeof ( arbel_nics[0] ) ),
  2825. .probe = arbel_probe,
  2826. .remove = arbel_remove,
  2827. };