You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

arbel.c 59KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979
  1. /*
  2. * Copyright (C) 2007 Michael Brown <mbrown@fensystems.co.uk>.
  3. *
  4. * Based in part upon the original driver by Mellanox Technologies
  5. * Ltd. Portions may be Copyright (c) Mellanox Technologies Ltd.
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License as
  9. * published by the Free Software Foundation; either version 2 of the
  10. * License, or any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  20. */
  21. #include <stdint.h>
  22. #include <stdlib.h>
  23. #include <stdio.h>
  24. #include <string.h>
  25. #include <strings.h>
  26. #include <unistd.h>
  27. #include <errno.h>
  28. #include <byteswap.h>
  29. #include <gpxe/pci.h>
  30. #include <gpxe/malloc.h>
  31. #include <gpxe/umalloc.h>
  32. #include <gpxe/iobuf.h>
  33. #include <gpxe/netdevice.h>
  34. #include <gpxe/infiniband.h>
  35. #include "arbel.h"
  36. /**
  37. * @file
  38. *
  39. * Mellanox Arbel Infiniband HCA
  40. *
  41. */
  42. /***************************************************************************
  43. *
  44. * Queue number allocation
  45. *
  46. ***************************************************************************
  47. */
  48. /**
  49. * Allocate queue number
  50. *
  51. * @v q_inuse Queue usage bitmask
  52. * @v max_inuse Maximum number of in-use queues
  53. * @ret qn_offset Free queue number offset, or negative error
  54. */
  55. static int arbel_alloc_qn_offset ( arbel_bitmask_t *q_inuse,
  56. unsigned int max_inuse ) {
  57. unsigned int qn_offset = 0;
  58. arbel_bitmask_t mask = 1;
  59. while ( qn_offset < max_inuse ) {
  60. if ( ( mask & *q_inuse ) == 0 ) {
  61. *q_inuse |= mask;
  62. return qn_offset;
  63. }
  64. qn_offset++;
  65. mask <<= 1;
  66. if ( ! mask ) {
  67. mask = 1;
  68. q_inuse++;
  69. }
  70. }
  71. return -ENFILE;
  72. }
  73. /**
  74. * Free queue number
  75. *
  76. * @v q_inuse Queue usage bitmask
  77. * @v qn_offset Queue number offset
  78. */
  79. static void arbel_free_qn_offset ( arbel_bitmask_t *q_inuse, int qn_offset ) {
  80. arbel_bitmask_t mask;
  81. mask = ( 1 << ( qn_offset % ( 8 * sizeof ( mask ) ) ) );
  82. q_inuse += ( qn_offset / ( 8 * sizeof ( mask ) ) );
  83. *q_inuse &= ~mask;
  84. }
  85. /***************************************************************************
  86. *
  87. * HCA commands
  88. *
  89. ***************************************************************************
  90. */
  91. /**
  92. * Wait for Arbel command completion
  93. *
  94. * @v arbel Arbel device
  95. * @ret rc Return status code
  96. */
  97. static int arbel_cmd_wait ( struct arbel *arbel,
  98. struct arbelprm_hca_command_register *hcr ) {
  99. unsigned int wait;
  100. for ( wait = ARBEL_HCR_MAX_WAIT_MS ; wait ; wait-- ) {
  101. hcr->u.dwords[6] =
  102. readl ( arbel->config + ARBEL_HCR_REG ( 6 ) );
  103. if ( MLX_GET ( hcr, go ) == 0 )
  104. return 0;
  105. mdelay ( 1 );
  106. }
  107. return -EBUSY;
  108. }
  109. /**
  110. * Issue HCA command
  111. *
  112. * @v arbel Arbel device
  113. * @v command Command opcode, flags and input/output lengths
  114. * @v op_mod Opcode modifier (0 if no modifier applicable)
  115. * @v in Input parameters
  116. * @v in_mod Input modifier (0 if no modifier applicable)
  117. * @v out Output parameters
  118. * @ret rc Return status code
  119. */
  120. static int arbel_cmd ( struct arbel *arbel, unsigned long command,
  121. unsigned int op_mod, const void *in,
  122. unsigned int in_mod, void *out ) {
  123. struct arbelprm_hca_command_register hcr;
  124. unsigned int opcode = ARBEL_HCR_OPCODE ( command );
  125. size_t in_len = ARBEL_HCR_IN_LEN ( command );
  126. size_t out_len = ARBEL_HCR_OUT_LEN ( command );
  127. void *in_buffer;
  128. void *out_buffer;
  129. unsigned int status;
  130. unsigned int i;
  131. int rc;
  132. assert ( in_len <= ARBEL_MBOX_SIZE );
  133. assert ( out_len <= ARBEL_MBOX_SIZE );
  134. DBGC2 ( arbel, "Arbel %p command %02x in %zx%s out %zx%s\n",
  135. arbel, opcode, in_len,
  136. ( ( command & ARBEL_HCR_IN_MBOX ) ? "(mbox)" : "" ), out_len,
  137. ( ( command & ARBEL_HCR_OUT_MBOX ) ? "(mbox)" : "" ) );
  138. /* Check that HCR is free */
  139. if ( ( rc = arbel_cmd_wait ( arbel, &hcr ) ) != 0 ) {
  140. DBGC ( arbel, "Arbel %p command interface locked\n", arbel );
  141. return rc;
  142. }
  143. /* Prepare HCR */
  144. memset ( &hcr, 0, sizeof ( hcr ) );
  145. in_buffer = &hcr.u.dwords[0];
  146. if ( in_len && ( command & ARBEL_HCR_IN_MBOX ) ) {
  147. in_buffer = arbel->mailbox_in;
  148. MLX_FILL_1 ( &hcr, 1, in_param_l, virt_to_bus ( in_buffer ) );
  149. }
  150. memcpy ( in_buffer, in, in_len );
  151. MLX_FILL_1 ( &hcr, 2, input_modifier, in_mod );
  152. out_buffer = &hcr.u.dwords[3];
  153. if ( out_len && ( command & ARBEL_HCR_OUT_MBOX ) ) {
  154. out_buffer = arbel->mailbox_out;
  155. MLX_FILL_1 ( &hcr, 4, out_param_l,
  156. virt_to_bus ( out_buffer ) );
  157. }
  158. MLX_FILL_3 ( &hcr, 6,
  159. opcode, opcode,
  160. opcode_modifier, op_mod,
  161. go, 1 );
  162. DBGC2_HD ( arbel, &hcr, sizeof ( hcr ) );
  163. if ( in_len ) {
  164. DBGC2 ( arbel, "Input:\n" );
  165. DBGC2_HD ( arbel, in, ( ( in_len < 512 ) ? in_len : 512 ) );
  166. }
  167. /* Issue command */
  168. for ( i = 0 ; i < ( sizeof ( hcr ) / sizeof ( hcr.u.dwords[0] ) ) ;
  169. i++ ) {
  170. writel ( hcr.u.dwords[i],
  171. arbel->config + ARBEL_HCR_REG ( i ) );
  172. barrier();
  173. }
  174. /* Wait for command completion */
  175. if ( ( rc = arbel_cmd_wait ( arbel, &hcr ) ) != 0 ) {
  176. DBGC ( arbel, "Arbel %p timed out waiting for command:\n",
  177. arbel );
  178. DBGC_HD ( arbel, &hcr, sizeof ( hcr ) );
  179. return rc;
  180. }
  181. /* Check command status */
  182. status = MLX_GET ( &hcr, status );
  183. if ( status != 0 ) {
  184. DBGC ( arbel, "Arbel %p command failed with status %02x:\n",
  185. arbel, status );
  186. DBGC_HD ( arbel, &hcr, sizeof ( hcr ) );
  187. return -EIO;
  188. }
  189. /* Read output parameters, if any */
  190. hcr.u.dwords[3] = readl ( arbel->config + ARBEL_HCR_REG ( 3 ) );
  191. hcr.u.dwords[4] = readl ( arbel->config + ARBEL_HCR_REG ( 4 ) );
  192. memcpy ( out, out_buffer, out_len );
  193. if ( out_len ) {
  194. DBGC2 ( arbel, "Output:\n" );
  195. DBGC2_HD ( arbel, out, ( ( out_len < 512 ) ? out_len : 512 ) );
  196. }
  197. return 0;
  198. }
  199. static inline int
  200. arbel_cmd_query_dev_lim ( struct arbel *arbel,
  201. struct arbelprm_query_dev_lim *dev_lim ) {
  202. return arbel_cmd ( arbel,
  203. ARBEL_HCR_OUT_CMD ( ARBEL_HCR_QUERY_DEV_LIM,
  204. 1, sizeof ( *dev_lim ) ),
  205. 0, NULL, 0, dev_lim );
  206. }
  207. static inline int
  208. arbel_cmd_query_fw ( struct arbel *arbel, struct arbelprm_query_fw *fw ) {
  209. return arbel_cmd ( arbel,
  210. ARBEL_HCR_OUT_CMD ( ARBEL_HCR_QUERY_FW,
  211. 1, sizeof ( *fw ) ),
  212. 0, NULL, 0, fw );
  213. }
  214. static inline int
  215. arbel_cmd_init_hca ( struct arbel *arbel,
  216. const struct arbelprm_init_hca *init_hca ) {
  217. return arbel_cmd ( arbel,
  218. ARBEL_HCR_IN_CMD ( ARBEL_HCR_INIT_HCA,
  219. 1, sizeof ( *init_hca ) ),
  220. 0, init_hca, 0, NULL );
  221. }
  222. static inline int
  223. arbel_cmd_close_hca ( struct arbel *arbel ) {
  224. return arbel_cmd ( arbel,
  225. ARBEL_HCR_VOID_CMD ( ARBEL_HCR_CLOSE_HCA ),
  226. 0, NULL, 0, NULL );
  227. }
  228. static inline int
  229. arbel_cmd_init_ib ( struct arbel *arbel, unsigned int port,
  230. const struct arbelprm_init_ib *init_ib ) {
  231. return arbel_cmd ( arbel,
  232. ARBEL_HCR_IN_CMD ( ARBEL_HCR_INIT_IB,
  233. 1, sizeof ( *init_ib ) ),
  234. 0, init_ib, port, NULL );
  235. }
  236. static inline int
  237. arbel_cmd_close_ib ( struct arbel *arbel, unsigned int port ) {
  238. return arbel_cmd ( arbel,
  239. ARBEL_HCR_VOID_CMD ( ARBEL_HCR_CLOSE_IB ),
  240. 0, NULL, port, NULL );
  241. }
  242. static inline int
  243. arbel_cmd_sw2hw_mpt ( struct arbel *arbel, unsigned int index,
  244. const struct arbelprm_mpt *mpt ) {
  245. return arbel_cmd ( arbel,
  246. ARBEL_HCR_IN_CMD ( ARBEL_HCR_SW2HW_MPT,
  247. 1, sizeof ( *mpt ) ),
  248. 0, mpt, index, NULL );
  249. }
  250. static inline int
  251. arbel_cmd_sw2hw_eq ( struct arbel *arbel, unsigned int index,
  252. const struct arbelprm_eqc *eqc ) {
  253. return arbel_cmd ( arbel,
  254. ARBEL_HCR_IN_CMD ( ARBEL_HCR_SW2HW_EQ,
  255. 1, sizeof ( *eqc ) ),
  256. 0, eqc, index, NULL );
  257. }
  258. static inline int
  259. arbel_cmd_hw2sw_eq ( struct arbel *arbel, unsigned int index ) {
  260. return arbel_cmd ( arbel,
  261. ARBEL_HCR_VOID_CMD ( ARBEL_HCR_HW2SW_EQ ),
  262. 1, NULL, index, NULL );
  263. }
  264. static inline int
  265. arbel_cmd_sw2hw_cq ( struct arbel *arbel, unsigned long cqn,
  266. const struct arbelprm_completion_queue_context *cqctx ) {
  267. return arbel_cmd ( arbel,
  268. ARBEL_HCR_IN_CMD ( ARBEL_HCR_SW2HW_CQ,
  269. 1, sizeof ( *cqctx ) ),
  270. 0, cqctx, cqn, NULL );
  271. }
  272. static inline int
  273. arbel_cmd_hw2sw_cq ( struct arbel *arbel, unsigned long cqn,
  274. struct arbelprm_completion_queue_context *cqctx) {
  275. return arbel_cmd ( arbel,
  276. ARBEL_HCR_OUT_CMD ( ARBEL_HCR_HW2SW_CQ,
  277. 1, sizeof ( *cqctx ) ),
  278. 0, NULL, cqn, cqctx );
  279. }
  280. static inline int
  281. arbel_cmd_rst2init_qpee ( struct arbel *arbel, unsigned long qpn,
  282. const struct arbelprm_qp_ee_state_transitions *ctx ){
  283. return arbel_cmd ( arbel,
  284. ARBEL_HCR_IN_CMD ( ARBEL_HCR_RST2INIT_QPEE,
  285. 1, sizeof ( *ctx ) ),
  286. 0, ctx, qpn, NULL );
  287. }
  288. static inline int
  289. arbel_cmd_init2rtr_qpee ( struct arbel *arbel, unsigned long qpn,
  290. const struct arbelprm_qp_ee_state_transitions *ctx ){
  291. return arbel_cmd ( arbel,
  292. ARBEL_HCR_IN_CMD ( ARBEL_HCR_INIT2RTR_QPEE,
  293. 1, sizeof ( *ctx ) ),
  294. 0, ctx, qpn, NULL );
  295. }
  296. static inline int
  297. arbel_cmd_rtr2rts_qpee ( struct arbel *arbel, unsigned long qpn,
  298. const struct arbelprm_qp_ee_state_transitions *ctx ) {
  299. return arbel_cmd ( arbel,
  300. ARBEL_HCR_IN_CMD ( ARBEL_HCR_RTR2RTS_QPEE,
  301. 1, sizeof ( *ctx ) ),
  302. 0, ctx, qpn, NULL );
  303. }
  304. static inline int
  305. arbel_cmd_2rst_qpee ( struct arbel *arbel, unsigned long qpn ) {
  306. return arbel_cmd ( arbel,
  307. ARBEL_HCR_VOID_CMD ( ARBEL_HCR_2RST_QPEE ),
  308. 0x03, NULL, qpn, NULL );
  309. }
  310. static inline int
  311. arbel_cmd_mad_ifc ( struct arbel *arbel, unsigned int port,
  312. union arbelprm_mad *mad ) {
  313. return arbel_cmd ( arbel,
  314. ARBEL_HCR_INOUT_CMD ( ARBEL_HCR_MAD_IFC,
  315. 1, sizeof ( *mad ),
  316. 1, sizeof ( *mad ) ),
  317. 0x03, mad, port, mad );
  318. }
  319. static inline int
  320. arbel_cmd_read_mgm ( struct arbel *arbel, unsigned int index,
  321. struct arbelprm_mgm_entry *mgm ) {
  322. return arbel_cmd ( arbel,
  323. ARBEL_HCR_OUT_CMD ( ARBEL_HCR_READ_MGM,
  324. 1, sizeof ( *mgm ) ),
  325. 0, NULL, index, mgm );
  326. }
  327. static inline int
  328. arbel_cmd_write_mgm ( struct arbel *arbel, unsigned int index,
  329. const struct arbelprm_mgm_entry *mgm ) {
  330. return arbel_cmd ( arbel,
  331. ARBEL_HCR_IN_CMD ( ARBEL_HCR_WRITE_MGM,
  332. 1, sizeof ( *mgm ) ),
  333. 0, mgm, index, NULL );
  334. }
  335. static inline int
  336. arbel_cmd_mgid_hash ( struct arbel *arbel, const struct ib_gid *gid,
  337. struct arbelprm_mgm_hash *hash ) {
  338. return arbel_cmd ( arbel,
  339. ARBEL_HCR_INOUT_CMD ( ARBEL_HCR_MGID_HASH,
  340. 1, sizeof ( *gid ),
  341. 0, sizeof ( *hash ) ),
  342. 0, gid, 0, hash );
  343. }
  344. static inline int
  345. arbel_cmd_run_fw ( struct arbel *arbel ) {
  346. return arbel_cmd ( arbel,
  347. ARBEL_HCR_VOID_CMD ( ARBEL_HCR_RUN_FW ),
  348. 0, NULL, 0, NULL );
  349. }
  350. static inline int
  351. arbel_cmd_disable_lam ( struct arbel *arbel ) {
  352. return arbel_cmd ( arbel,
  353. ARBEL_HCR_VOID_CMD ( ARBEL_HCR_DISABLE_LAM ),
  354. 0, NULL, 0, NULL );
  355. }
  356. static inline int
  357. arbel_cmd_enable_lam ( struct arbel *arbel, struct arbelprm_access_lam *lam ) {
  358. return arbel_cmd ( arbel,
  359. ARBEL_HCR_OUT_CMD ( ARBEL_HCR_ENABLE_LAM,
  360. 1, sizeof ( *lam ) ),
  361. 1, NULL, 0, lam );
  362. }
  363. static inline int
  364. arbel_cmd_unmap_icm ( struct arbel *arbel, unsigned int page_count ) {
  365. return arbel_cmd ( arbel,
  366. ARBEL_HCR_VOID_CMD ( ARBEL_HCR_UNMAP_ICM ),
  367. 0, NULL, page_count, NULL );
  368. }
  369. static inline int
  370. arbel_cmd_map_icm ( struct arbel *arbel,
  371. const struct arbelprm_virtual_physical_mapping *map ) {
  372. return arbel_cmd ( arbel,
  373. ARBEL_HCR_IN_CMD ( ARBEL_HCR_MAP_ICM,
  374. 1, sizeof ( *map ) ),
  375. 0, map, 1, NULL );
  376. }
  377. static inline int
  378. arbel_cmd_unmap_icm_aux ( struct arbel *arbel ) {
  379. return arbel_cmd ( arbel,
  380. ARBEL_HCR_VOID_CMD ( ARBEL_HCR_UNMAP_ICM_AUX ),
  381. 0, NULL, 0, NULL );
  382. }
  383. static inline int
  384. arbel_cmd_map_icm_aux ( struct arbel *arbel,
  385. const struct arbelprm_virtual_physical_mapping *map ) {
  386. return arbel_cmd ( arbel,
  387. ARBEL_HCR_IN_CMD ( ARBEL_HCR_MAP_ICM_AUX,
  388. 1, sizeof ( *map ) ),
  389. 0, map, 1, NULL );
  390. }
  391. static inline int
  392. arbel_cmd_set_icm_size ( struct arbel *arbel,
  393. const struct arbelprm_scalar_parameter *icm_size,
  394. struct arbelprm_scalar_parameter *icm_aux_size ) {
  395. return arbel_cmd ( arbel,
  396. ARBEL_HCR_INOUT_CMD ( ARBEL_HCR_SET_ICM_SIZE,
  397. 0, sizeof ( *icm_size ),
  398. 0, sizeof ( *icm_aux_size ) ),
  399. 0, icm_size, 0, icm_aux_size );
  400. }
  401. static inline int
  402. arbel_cmd_unmap_fa ( struct arbel *arbel ) {
  403. return arbel_cmd ( arbel,
  404. ARBEL_HCR_VOID_CMD ( ARBEL_HCR_UNMAP_FA ),
  405. 0, NULL, 0, NULL );
  406. }
  407. static inline int
  408. arbel_cmd_map_fa ( struct arbel *arbel,
  409. const struct arbelprm_virtual_physical_mapping *map ) {
  410. return arbel_cmd ( arbel,
  411. ARBEL_HCR_IN_CMD ( ARBEL_HCR_MAP_FA,
  412. 1, sizeof ( *map ) ),
  413. 0, map, 1, NULL );
  414. }
  415. /***************************************************************************
  416. *
  417. * Completion queue operations
  418. *
  419. ***************************************************************************
  420. */
  421. /**
  422. * Create completion queue
  423. *
  424. * @v ibdev Infiniband device
  425. * @v cq Completion queue
  426. * @ret rc Return status code
  427. */
  428. static int arbel_create_cq ( struct ib_device *ibdev,
  429. struct ib_completion_queue *cq ) {
  430. struct arbel *arbel = ib_get_drvdata ( ibdev );
  431. struct arbel_completion_queue *arbel_cq;
  432. struct arbelprm_completion_queue_context cqctx;
  433. struct arbelprm_cq_ci_db_record *ci_db_rec;
  434. struct arbelprm_cq_arm_db_record *arm_db_rec;
  435. int cqn_offset;
  436. unsigned int i;
  437. int rc;
  438. /* Find a free completion queue number */
  439. cqn_offset = arbel_alloc_qn_offset ( arbel->cq_inuse, ARBEL_MAX_CQS );
  440. if ( cqn_offset < 0 ) {
  441. DBGC ( arbel, "Arbel %p out of completion queues\n", arbel );
  442. rc = cqn_offset;
  443. goto err_cqn_offset;
  444. }
  445. cq->cqn = ( arbel->limits.reserved_cqs + cqn_offset );
  446. /* Allocate control structures */
  447. arbel_cq = zalloc ( sizeof ( *arbel_cq ) );
  448. if ( ! arbel_cq ) {
  449. rc = -ENOMEM;
  450. goto err_arbel_cq;
  451. }
  452. arbel_cq->ci_doorbell_idx = arbel_cq_ci_doorbell_idx ( cqn_offset );
  453. arbel_cq->arm_doorbell_idx = arbel_cq_arm_doorbell_idx ( cqn_offset );
  454. /* Allocate completion queue itself */
  455. arbel_cq->cqe_size = ( cq->num_cqes * sizeof ( arbel_cq->cqe[0] ) );
  456. arbel_cq->cqe = malloc_dma ( arbel_cq->cqe_size,
  457. sizeof ( arbel_cq->cqe[0] ) );
  458. if ( ! arbel_cq->cqe ) {
  459. rc = -ENOMEM;
  460. goto err_cqe;
  461. }
  462. memset ( arbel_cq->cqe, 0, arbel_cq->cqe_size );
  463. for ( i = 0 ; i < cq->num_cqes ; i++ ) {
  464. MLX_FILL_1 ( &arbel_cq->cqe[i].normal, 7, owner, 1 );
  465. }
  466. barrier();
  467. /* Initialise doorbell records */
  468. ci_db_rec = &arbel->db_rec[arbel_cq->ci_doorbell_idx].cq_ci;
  469. MLX_FILL_1 ( ci_db_rec, 0, counter, 0 );
  470. MLX_FILL_2 ( ci_db_rec, 1,
  471. res, ARBEL_UAR_RES_CQ_CI,
  472. cq_number, cq->cqn );
  473. arm_db_rec = &arbel->db_rec[arbel_cq->arm_doorbell_idx].cq_arm;
  474. MLX_FILL_1 ( arm_db_rec, 0, counter, 0 );
  475. MLX_FILL_2 ( arm_db_rec, 1,
  476. res, ARBEL_UAR_RES_CQ_ARM,
  477. cq_number, cq->cqn );
  478. /* Hand queue over to hardware */
  479. memset ( &cqctx, 0, sizeof ( cqctx ) );
  480. MLX_FILL_1 ( &cqctx, 0, st, 0xa /* "Event fired" */ );
  481. MLX_FILL_1 ( &cqctx, 2, start_address_l,
  482. virt_to_bus ( arbel_cq->cqe ) );
  483. MLX_FILL_2 ( &cqctx, 3,
  484. usr_page, arbel->limits.reserved_uars,
  485. log_cq_size, fls ( cq->num_cqes - 1 ) );
  486. MLX_FILL_1 ( &cqctx, 5, c_eqn, ARBEL_NO_EQ );
  487. MLX_FILL_1 ( &cqctx, 6, pd, ARBEL_GLOBAL_PD );
  488. MLX_FILL_1 ( &cqctx, 7, l_key, arbel->reserved_lkey );
  489. MLX_FILL_1 ( &cqctx, 12, cqn, cq->cqn );
  490. MLX_FILL_1 ( &cqctx, 13,
  491. cq_ci_db_record, arbel_cq->ci_doorbell_idx );
  492. MLX_FILL_1 ( &cqctx, 14,
  493. cq_state_db_record, arbel_cq->arm_doorbell_idx );
  494. if ( ( rc = arbel_cmd_sw2hw_cq ( arbel, cq->cqn, &cqctx ) ) != 0 ) {
  495. DBGC ( arbel, "Arbel %p SW2HW_CQ failed: %s\n",
  496. arbel, strerror ( rc ) );
  497. goto err_sw2hw_cq;
  498. }
  499. DBGC ( arbel, "Arbel %p CQN %#lx ring at [%p,%p)\n",
  500. arbel, cq->cqn, arbel_cq->cqe,
  501. ( ( ( void * ) arbel_cq->cqe ) + arbel_cq->cqe_size ) );
  502. ib_cq_set_drvdata ( cq, arbel_cq );
  503. return 0;
  504. err_sw2hw_cq:
  505. MLX_FILL_1 ( ci_db_rec, 1, res, ARBEL_UAR_RES_NONE );
  506. MLX_FILL_1 ( arm_db_rec, 1, res, ARBEL_UAR_RES_NONE );
  507. free_dma ( arbel_cq->cqe, arbel_cq->cqe_size );
  508. err_cqe:
  509. free ( arbel_cq );
  510. err_arbel_cq:
  511. arbel_free_qn_offset ( arbel->cq_inuse, cqn_offset );
  512. err_cqn_offset:
  513. return rc;
  514. }
  515. /**
  516. * Destroy completion queue
  517. *
  518. * @v ibdev Infiniband device
  519. * @v cq Completion queue
  520. */
  521. static void arbel_destroy_cq ( struct ib_device *ibdev,
  522. struct ib_completion_queue *cq ) {
  523. struct arbel *arbel = ib_get_drvdata ( ibdev );
  524. struct arbel_completion_queue *arbel_cq = ib_cq_get_drvdata ( cq );
  525. struct arbelprm_completion_queue_context cqctx;
  526. struct arbelprm_cq_ci_db_record *ci_db_rec;
  527. struct arbelprm_cq_arm_db_record *arm_db_rec;
  528. int cqn_offset;
  529. int rc;
  530. /* Take ownership back from hardware */
  531. if ( ( rc = arbel_cmd_hw2sw_cq ( arbel, cq->cqn, &cqctx ) ) != 0 ) {
  532. DBGC ( arbel, "Arbel %p FATAL HW2SW_CQ failed on CQN %#lx: "
  533. "%s\n", arbel, cq->cqn, strerror ( rc ) );
  534. /* Leak memory and return; at least we avoid corruption */
  535. return;
  536. }
  537. /* Clear doorbell records */
  538. ci_db_rec = &arbel->db_rec[arbel_cq->ci_doorbell_idx].cq_ci;
  539. arm_db_rec = &arbel->db_rec[arbel_cq->arm_doorbell_idx].cq_arm;
  540. MLX_FILL_1 ( ci_db_rec, 1, res, ARBEL_UAR_RES_NONE );
  541. MLX_FILL_1 ( arm_db_rec, 1, res, ARBEL_UAR_RES_NONE );
  542. /* Free memory */
  543. free_dma ( arbel_cq->cqe, arbel_cq->cqe_size );
  544. free ( arbel_cq );
  545. /* Mark queue number as free */
  546. cqn_offset = ( cq->cqn - arbel->limits.reserved_cqs );
  547. arbel_free_qn_offset ( arbel->cq_inuse, cqn_offset );
  548. ib_cq_set_drvdata ( cq, NULL );
  549. }
  550. /***************************************************************************
  551. *
  552. * Queue pair operations
  553. *
  554. ***************************************************************************
  555. */
  556. /**
  557. * Create send work queue
  558. *
  559. * @v arbel_send_wq Send work queue
  560. * @v num_wqes Number of work queue entries
  561. * @ret rc Return status code
  562. */
  563. static int arbel_create_send_wq ( struct arbel_send_work_queue *arbel_send_wq,
  564. unsigned int num_wqes ) {
  565. struct arbelprm_ud_send_wqe *wqe;
  566. struct arbelprm_ud_send_wqe *next_wqe;
  567. unsigned int wqe_idx_mask;
  568. unsigned int i;
  569. /* Allocate work queue */
  570. arbel_send_wq->wqe_size = ( num_wqes *
  571. sizeof ( arbel_send_wq->wqe[0] ) );
  572. arbel_send_wq->wqe = malloc_dma ( arbel_send_wq->wqe_size,
  573. sizeof ( arbel_send_wq->wqe[0] ) );
  574. if ( ! arbel_send_wq->wqe )
  575. return -ENOMEM;
  576. memset ( arbel_send_wq->wqe, 0, arbel_send_wq->wqe_size );
  577. /* Link work queue entries */
  578. wqe_idx_mask = ( num_wqes - 1 );
  579. for ( i = 0 ; i < num_wqes ; i++ ) {
  580. wqe = &arbel_send_wq->wqe[i].ud;
  581. next_wqe = &arbel_send_wq->wqe[ ( i + 1 ) & wqe_idx_mask ].ud;
  582. MLX_FILL_1 ( &wqe->next, 0, nda_31_6,
  583. ( virt_to_bus ( next_wqe ) >> 6 ) );
  584. }
  585. return 0;
  586. }
  587. /**
  588. * Create receive work queue
  589. *
  590. * @v arbel_recv_wq Receive work queue
  591. * @v num_wqes Number of work queue entries
  592. * @ret rc Return status code
  593. */
  594. static int arbel_create_recv_wq ( struct arbel_recv_work_queue *arbel_recv_wq,
  595. unsigned int num_wqes ) {
  596. struct arbelprm_recv_wqe *wqe;
  597. struct arbelprm_recv_wqe *next_wqe;
  598. unsigned int wqe_idx_mask;
  599. size_t nds;
  600. unsigned int i;
  601. unsigned int j;
  602. /* Allocate work queue */
  603. arbel_recv_wq->wqe_size = ( num_wqes *
  604. sizeof ( arbel_recv_wq->wqe[0] ) );
  605. arbel_recv_wq->wqe = malloc_dma ( arbel_recv_wq->wqe_size,
  606. sizeof ( arbel_recv_wq->wqe[0] ) );
  607. if ( ! arbel_recv_wq->wqe )
  608. return -ENOMEM;
  609. memset ( arbel_recv_wq->wqe, 0, arbel_recv_wq->wqe_size );
  610. /* Link work queue entries */
  611. wqe_idx_mask = ( num_wqes - 1 );
  612. nds = ( ( offsetof ( typeof ( *wqe ), data ) +
  613. sizeof ( wqe->data[0] ) ) >> 4 );
  614. for ( i = 0 ; i < num_wqes ; i++ ) {
  615. wqe = &arbel_recv_wq->wqe[i].recv;
  616. next_wqe = &arbel_recv_wq->wqe[( i + 1 ) & wqe_idx_mask].recv;
  617. MLX_FILL_1 ( &wqe->next, 0, nda_31_6,
  618. ( virt_to_bus ( next_wqe ) >> 6 ) );
  619. MLX_FILL_1 ( &wqe->next, 1, nds, ( sizeof ( *wqe ) / 16 ) );
  620. for ( j = 0 ; ( ( ( void * ) &wqe->data[j] ) <
  621. ( ( void * ) ( wqe + 1 ) ) ) ; j++ ) {
  622. MLX_FILL_1 ( &wqe->data[j], 1,
  623. l_key, ARBEL_INVALID_LKEY );
  624. }
  625. }
  626. return 0;
  627. }
  628. /**
  629. * Create queue pair
  630. *
  631. * @v ibdev Infiniband device
  632. * @v qp Queue pair
  633. * @ret rc Return status code
  634. */
  635. static int arbel_create_qp ( struct ib_device *ibdev,
  636. struct ib_queue_pair *qp ) {
  637. struct arbel *arbel = ib_get_drvdata ( ibdev );
  638. struct arbel_queue_pair *arbel_qp;
  639. struct arbelprm_qp_ee_state_transitions qpctx;
  640. struct arbelprm_qp_db_record *send_db_rec;
  641. struct arbelprm_qp_db_record *recv_db_rec;
  642. int qpn_offset;
  643. int rc;
  644. /* Find a free queue pair number */
  645. qpn_offset = arbel_alloc_qn_offset ( arbel->qp_inuse, ARBEL_MAX_QPS );
  646. if ( qpn_offset < 0 ) {
  647. DBGC ( arbel, "Arbel %p out of queue pairs\n", arbel );
  648. rc = qpn_offset;
  649. goto err_qpn_offset;
  650. }
  651. qp->qpn = ( ARBEL_QPN_BASE + arbel->limits.reserved_qps + qpn_offset );
  652. /* Allocate control structures */
  653. arbel_qp = zalloc ( sizeof ( *arbel_qp ) );
  654. if ( ! arbel_qp ) {
  655. rc = -ENOMEM;
  656. goto err_arbel_qp;
  657. }
  658. arbel_qp->send.doorbell_idx = arbel_send_doorbell_idx ( qpn_offset );
  659. arbel_qp->recv.doorbell_idx = arbel_recv_doorbell_idx ( qpn_offset );
  660. /* Create send and receive work queues */
  661. if ( ( rc = arbel_create_send_wq ( &arbel_qp->send,
  662. qp->send.num_wqes ) ) != 0 )
  663. goto err_create_send_wq;
  664. if ( ( rc = arbel_create_recv_wq ( &arbel_qp->recv,
  665. qp->recv.num_wqes ) ) != 0 )
  666. goto err_create_recv_wq;
  667. /* Initialise doorbell records */
  668. send_db_rec = &arbel->db_rec[arbel_qp->send.doorbell_idx].qp;
  669. MLX_FILL_1 ( send_db_rec, 0, counter, 0 );
  670. MLX_FILL_2 ( send_db_rec, 1,
  671. res, ARBEL_UAR_RES_SQ,
  672. qp_number, qp->qpn );
  673. recv_db_rec = &arbel->db_rec[arbel_qp->recv.doorbell_idx].qp;
  674. MLX_FILL_1 ( recv_db_rec, 0, counter, 0 );
  675. MLX_FILL_2 ( recv_db_rec, 1,
  676. res, ARBEL_UAR_RES_RQ,
  677. qp_number, qp->qpn );
  678. /* Hand queue over to hardware */
  679. memset ( &qpctx, 0, sizeof ( qpctx ) );
  680. MLX_FILL_3 ( &qpctx, 2,
  681. qpc_eec_data.de, 1,
  682. qpc_eec_data.pm_state, 0x03 /* Always 0x03 for UD */,
  683. qpc_eec_data.st, ARBEL_ST_UD );
  684. MLX_FILL_6 ( &qpctx, 4,
  685. qpc_eec_data.mtu, ARBEL_MTU_2048,
  686. qpc_eec_data.msg_max, 11 /* 2^11 = 2048 */,
  687. qpc_eec_data.log_rq_size, fls ( qp->recv.num_wqes - 1 ),
  688. qpc_eec_data.log_rq_stride,
  689. ( fls ( sizeof ( arbel_qp->recv.wqe[0] ) - 1 ) - 4 ),
  690. qpc_eec_data.log_sq_size, fls ( qp->send.num_wqes - 1 ),
  691. qpc_eec_data.log_sq_stride,
  692. ( fls ( sizeof ( arbel_qp->send.wqe[0] ) - 1 ) - 4 ) );
  693. MLX_FILL_1 ( &qpctx, 5,
  694. qpc_eec_data.usr_page, arbel->limits.reserved_uars );
  695. MLX_FILL_1 ( &qpctx, 10, qpc_eec_data.primary_address_path.port_number,
  696. ibdev->port );
  697. MLX_FILL_1 ( &qpctx, 27, qpc_eec_data.pd, ARBEL_GLOBAL_PD );
  698. MLX_FILL_1 ( &qpctx, 29, qpc_eec_data.wqe_lkey, arbel->reserved_lkey );
  699. MLX_FILL_1 ( &qpctx, 30, qpc_eec_data.ssc, 1 );
  700. MLX_FILL_1 ( &qpctx, 33, qpc_eec_data.cqn_snd, qp->send.cq->cqn );
  701. MLX_FILL_1 ( &qpctx, 34, qpc_eec_data.snd_wqe_base_adr_l,
  702. ( virt_to_bus ( arbel_qp->send.wqe ) >> 6 ) );
  703. MLX_FILL_1 ( &qpctx, 35, qpc_eec_data.snd_db_record_index,
  704. arbel_qp->send.doorbell_idx );
  705. MLX_FILL_1 ( &qpctx, 38, qpc_eec_data.rsc, 1 );
  706. MLX_FILL_1 ( &qpctx, 41, qpc_eec_data.cqn_rcv, qp->recv.cq->cqn );
  707. MLX_FILL_1 ( &qpctx, 42, qpc_eec_data.rcv_wqe_base_adr_l,
  708. ( virt_to_bus ( arbel_qp->recv.wqe ) >> 6 ) );
  709. MLX_FILL_1 ( &qpctx, 43, qpc_eec_data.rcv_db_record_index,
  710. arbel_qp->recv.doorbell_idx );
  711. MLX_FILL_1 ( &qpctx, 44, qpc_eec_data.q_key, qp->qkey );
  712. if ( ( rc = arbel_cmd_rst2init_qpee ( arbel, qp->qpn, &qpctx )) != 0 ){
  713. DBGC ( arbel, "Arbel %p RST2INIT_QPEE failed: %s\n",
  714. arbel, strerror ( rc ) );
  715. goto err_rst2init_qpee;
  716. }
  717. memset ( &qpctx, 0, sizeof ( qpctx ) );
  718. MLX_FILL_2 ( &qpctx, 4,
  719. qpc_eec_data.mtu, ARBEL_MTU_2048,
  720. qpc_eec_data.msg_max, 11 /* 2^11 = 2048 */ );
  721. if ( ( rc = arbel_cmd_init2rtr_qpee ( arbel, qp->qpn, &qpctx )) != 0 ){
  722. DBGC ( arbel, "Arbel %p INIT2RTR_QPEE failed: %s\n",
  723. arbel, strerror ( rc ) );
  724. goto err_init2rtr_qpee;
  725. }
  726. memset ( &qpctx, 0, sizeof ( qpctx ) );
  727. if ( ( rc = arbel_cmd_rtr2rts_qpee ( arbel, qp->qpn, &qpctx ) ) != 0 ){
  728. DBGC ( arbel, "Arbel %p RTR2RTS_QPEE failed: %s\n",
  729. arbel, strerror ( rc ) );
  730. goto err_rtr2rts_qpee;
  731. }
  732. DBGC ( arbel, "Arbel %p QPN %#lx send ring at [%p,%p)\n",
  733. arbel, qp->qpn, arbel_qp->send.wqe,
  734. ( ( (void *) arbel_qp->send.wqe ) + arbel_qp->send.wqe_size ) );
  735. DBGC ( arbel, "Arbel %p QPN %#lx receive ring at [%p,%p)\n",
  736. arbel, qp->qpn, arbel_qp->recv.wqe,
  737. ( ( (void *) arbel_qp->recv.wqe ) + arbel_qp->recv.wqe_size ) );
  738. ib_qp_set_drvdata ( qp, arbel_qp );
  739. return 0;
  740. err_rtr2rts_qpee:
  741. err_init2rtr_qpee:
  742. arbel_cmd_2rst_qpee ( arbel, qp->qpn );
  743. err_rst2init_qpee:
  744. MLX_FILL_1 ( send_db_rec, 1, res, ARBEL_UAR_RES_NONE );
  745. MLX_FILL_1 ( recv_db_rec, 1, res, ARBEL_UAR_RES_NONE );
  746. free_dma ( arbel_qp->recv.wqe, arbel_qp->recv.wqe_size );
  747. err_create_recv_wq:
  748. free_dma ( arbel_qp->send.wqe, arbel_qp->send.wqe_size );
  749. err_create_send_wq:
  750. free ( arbel_qp );
  751. err_arbel_qp:
  752. arbel_free_qn_offset ( arbel->qp_inuse, qpn_offset );
  753. err_qpn_offset:
  754. return rc;
  755. }
  756. /**
  757. * Destroy queue pair
  758. *
  759. * @v ibdev Infiniband device
  760. * @v qp Queue pair
  761. */
  762. static void arbel_destroy_qp ( struct ib_device *ibdev,
  763. struct ib_queue_pair *qp ) {
  764. struct arbel *arbel = ib_get_drvdata ( ibdev );
  765. struct arbel_queue_pair *arbel_qp = ib_qp_get_drvdata ( qp );
  766. struct arbelprm_qp_db_record *send_db_rec;
  767. struct arbelprm_qp_db_record *recv_db_rec;
  768. int qpn_offset;
  769. int rc;
  770. /* Take ownership back from hardware */
  771. if ( ( rc = arbel_cmd_2rst_qpee ( arbel, qp->qpn ) ) != 0 ) {
  772. DBGC ( arbel, "Arbel %p FATAL 2RST_QPEE failed on QPN %#lx: "
  773. "%s\n", arbel, qp->qpn, strerror ( rc ) );
  774. /* Leak memory and return; at least we avoid corruption */
  775. return;
  776. }
  777. /* Clear doorbell records */
  778. send_db_rec = &arbel->db_rec[arbel_qp->send.doorbell_idx].qp;
  779. recv_db_rec = &arbel->db_rec[arbel_qp->recv.doorbell_idx].qp;
  780. MLX_FILL_1 ( send_db_rec, 1, res, ARBEL_UAR_RES_NONE );
  781. MLX_FILL_1 ( recv_db_rec, 1, res, ARBEL_UAR_RES_NONE );
  782. /* Free memory */
  783. free_dma ( arbel_qp->send.wqe, arbel_qp->send.wqe_size );
  784. free_dma ( arbel_qp->recv.wqe, arbel_qp->recv.wqe_size );
  785. free ( arbel_qp );
  786. /* Mark queue number as free */
  787. qpn_offset = ( qp->qpn - ARBEL_QPN_BASE - arbel->limits.reserved_qps );
  788. arbel_free_qn_offset ( arbel->qp_inuse, qpn_offset );
  789. ib_qp_set_drvdata ( qp, NULL );
  790. }
  791. /***************************************************************************
  792. *
  793. * Work request operations
  794. *
  795. ***************************************************************************
  796. */
  797. /**
  798. * Ring doorbell register in UAR
  799. *
  800. * @v arbel Arbel device
  801. * @v db_reg Doorbell register structure
  802. * @v offset Address of doorbell
  803. */
  804. static void arbel_ring_doorbell ( struct arbel *arbel,
  805. union arbelprm_doorbell_register *db_reg,
  806. unsigned int offset ) {
  807. DBGC2 ( arbel, "Arbel %p ringing doorbell %08lx:%08lx at %lx\n",
  808. arbel, db_reg->dword[0], db_reg->dword[1],
  809. virt_to_phys ( arbel->uar + offset ) );
  810. barrier();
  811. writel ( db_reg->dword[0], ( arbel->uar + offset + 0 ) );
  812. barrier();
  813. writel ( db_reg->dword[1], ( arbel->uar + offset + 4 ) );
  814. }
  815. /** GID used for GID-less send work queue entries */
  816. static const struct ib_gid arbel_no_gid = {
  817. { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0 } }
  818. };
  819. /**
  820. * Post send work queue entry
  821. *
  822. * @v ibdev Infiniband device
  823. * @v qp Queue pair
  824. * @v av Address vector
  825. * @v iobuf I/O buffer
  826. * @ret rc Return status code
  827. */
  828. static int arbel_post_send ( struct ib_device *ibdev,
  829. struct ib_queue_pair *qp,
  830. struct ib_address_vector *av,
  831. struct io_buffer *iobuf ) {
  832. struct arbel *arbel = ib_get_drvdata ( ibdev );
  833. struct arbel_queue_pair *arbel_qp = ib_qp_get_drvdata ( qp );
  834. struct ib_work_queue *wq = &qp->send;
  835. struct arbel_send_work_queue *arbel_send_wq = &arbel_qp->send;
  836. struct arbelprm_ud_send_wqe *prev_wqe;
  837. struct arbelprm_ud_send_wqe *wqe;
  838. struct arbelprm_qp_db_record *qp_db_rec;
  839. union arbelprm_doorbell_register db_reg;
  840. const struct ib_gid *gid;
  841. unsigned int wqe_idx_mask;
  842. size_t nds;
  843. /* Allocate work queue entry */
  844. wqe_idx_mask = ( wq->num_wqes - 1 );
  845. if ( wq->iobufs[wq->next_idx & wqe_idx_mask] ) {
  846. DBGC ( arbel, "Arbel %p send queue full", arbel );
  847. return -ENOBUFS;
  848. }
  849. wq->iobufs[wq->next_idx & wqe_idx_mask] = iobuf;
  850. prev_wqe = &arbel_send_wq->wqe[(wq->next_idx - 1) & wqe_idx_mask].ud;
  851. wqe = &arbel_send_wq->wqe[wq->next_idx & wqe_idx_mask].ud;
  852. /* Construct work queue entry */
  853. MLX_FILL_1 ( &wqe->next, 1, always1, 1 );
  854. memset ( &wqe->ctrl, 0, sizeof ( wqe->ctrl ) );
  855. MLX_FILL_1 ( &wqe->ctrl, 0, always1, 1 );
  856. memset ( &wqe->ud, 0, sizeof ( wqe->ud ) );
  857. MLX_FILL_2 ( &wqe->ud, 0,
  858. ud_address_vector.pd, ARBEL_GLOBAL_PD,
  859. ud_address_vector.port_number, ibdev->port );
  860. MLX_FILL_2 ( &wqe->ud, 1,
  861. ud_address_vector.rlid, av->dlid,
  862. ud_address_vector.g, av->gid_present );
  863. MLX_FILL_2 ( &wqe->ud, 2,
  864. ud_address_vector.max_stat_rate,
  865. ( ( av->rate >= 3 ) ? 0 : 1 ),
  866. ud_address_vector.msg, 3 );
  867. MLX_FILL_1 ( &wqe->ud, 3, ud_address_vector.sl, av->sl );
  868. gid = ( av->gid_present ? &av->gid : &arbel_no_gid );
  869. memcpy ( &wqe->ud.u.dwords[4], gid, sizeof ( *gid ) );
  870. MLX_FILL_1 ( &wqe->ud, 8, destination_qp, av->dest_qp );
  871. MLX_FILL_1 ( &wqe->ud, 9, q_key, av->qkey );
  872. MLX_FILL_1 ( &wqe->data[0], 0, byte_count, iob_len ( iobuf ) );
  873. MLX_FILL_1 ( &wqe->data[0], 1, l_key, arbel->reserved_lkey );
  874. MLX_FILL_1 ( &wqe->data[0], 3,
  875. local_address_l, virt_to_bus ( iobuf->data ) );
  876. /* Update previous work queue entry's "next" field */
  877. nds = ( ( offsetof ( typeof ( *wqe ), data ) +
  878. sizeof ( wqe->data[0] ) ) >> 4 );
  879. MLX_SET ( &prev_wqe->next, nopcode, ARBEL_OPCODE_SEND );
  880. MLX_FILL_3 ( &prev_wqe->next, 1,
  881. nds, nds,
  882. f, 1,
  883. always1, 1 );
  884. /* Update doorbell record */
  885. barrier();
  886. qp_db_rec = &arbel->db_rec[arbel_send_wq->doorbell_idx].qp;
  887. MLX_FILL_1 ( qp_db_rec, 0,
  888. counter, ( ( wq->next_idx + 1 ) & 0xffff ) );
  889. /* Ring doorbell register */
  890. MLX_FILL_4 ( &db_reg.send, 0,
  891. nopcode, ARBEL_OPCODE_SEND,
  892. f, 1,
  893. wqe_counter, ( wq->next_idx & 0xffff ),
  894. wqe_cnt, 1 );
  895. MLX_FILL_2 ( &db_reg.send, 1,
  896. nds, nds,
  897. qpn, qp->qpn );
  898. arbel_ring_doorbell ( arbel, &db_reg, ARBEL_DB_POST_SND_OFFSET );
  899. /* Update work queue's index */
  900. wq->next_idx++;
  901. return 0;
  902. }
  903. /**
  904. * Post receive work queue entry
  905. *
  906. * @v ibdev Infiniband device
  907. * @v qp Queue pair
  908. * @v iobuf I/O buffer
  909. * @ret rc Return status code
  910. */
  911. static int arbel_post_recv ( struct ib_device *ibdev,
  912. struct ib_queue_pair *qp,
  913. struct io_buffer *iobuf ) {
  914. struct arbel *arbel = ib_get_drvdata ( ibdev );
  915. struct arbel_queue_pair *arbel_qp = ib_qp_get_drvdata ( qp );
  916. struct ib_work_queue *wq = &qp->recv;
  917. struct arbel_recv_work_queue *arbel_recv_wq = &arbel_qp->recv;
  918. struct arbelprm_recv_wqe *wqe;
  919. union arbelprm_doorbell_record *db_rec;
  920. unsigned int wqe_idx_mask;
  921. /* Allocate work queue entry */
  922. wqe_idx_mask = ( wq->num_wqes - 1 );
  923. if ( wq->iobufs[wq->next_idx & wqe_idx_mask] ) {
  924. DBGC ( arbel, "Arbel %p receive queue full", arbel );
  925. return -ENOBUFS;
  926. }
  927. wq->iobufs[wq->next_idx & wqe_idx_mask] = iobuf;
  928. wqe = &arbel_recv_wq->wqe[wq->next_idx & wqe_idx_mask].recv;
  929. /* Construct work queue entry */
  930. MLX_FILL_1 ( &wqe->data[0], 0, byte_count, iob_tailroom ( iobuf ) );
  931. MLX_FILL_1 ( &wqe->data[0], 1, l_key, arbel->reserved_lkey );
  932. MLX_FILL_1 ( &wqe->data[0], 3,
  933. local_address_l, virt_to_bus ( iobuf->data ) );
  934. /* Update doorbell record */
  935. barrier();
  936. db_rec = &arbel->db_rec[arbel_recv_wq->doorbell_idx];
  937. MLX_FILL_1 ( &db_rec->qp, 0,
  938. counter, ( ( wq->next_idx + 1 ) & 0xffff ) );
  939. /* Update work queue's index */
  940. wq->next_idx++;
  941. return 0;
  942. }
  943. /**
  944. * Handle completion
  945. *
  946. * @v ibdev Infiniband device
  947. * @v cq Completion queue
  948. * @v cqe Hardware completion queue entry
  949. * @v complete_send Send completion handler
  950. * @v complete_recv Receive completion handler
  951. * @ret rc Return status code
  952. */
  953. static int arbel_complete ( struct ib_device *ibdev,
  954. struct ib_completion_queue *cq,
  955. union arbelprm_completion_entry *cqe,
  956. ib_completer_t complete_send,
  957. ib_completer_t complete_recv ) {
  958. struct arbel *arbel = ib_get_drvdata ( ibdev );
  959. struct ib_completion completion;
  960. struct ib_work_queue *wq;
  961. struct ib_queue_pair *qp;
  962. struct arbel_queue_pair *arbel_qp;
  963. struct arbel_send_work_queue *arbel_send_wq;
  964. struct arbel_recv_work_queue *arbel_recv_wq;
  965. struct arbelprm_recv_wqe *recv_wqe;
  966. struct io_buffer *iobuf;
  967. ib_completer_t complete;
  968. unsigned int opcode;
  969. unsigned long qpn;
  970. int is_send;
  971. unsigned long wqe_adr;
  972. unsigned int wqe_idx;
  973. int rc = 0;
  974. /* Parse completion */
  975. memset ( &completion, 0, sizeof ( completion ) );
  976. qpn = MLX_GET ( &cqe->normal, my_qpn );
  977. is_send = MLX_GET ( &cqe->normal, s );
  978. wqe_adr = ( MLX_GET ( &cqe->normal, wqe_adr ) << 6 );
  979. opcode = MLX_GET ( &cqe->normal, opcode );
  980. if ( opcode >= ARBEL_OPCODE_RECV_ERROR ) {
  981. /* "s" field is not valid for error opcodes */
  982. is_send = ( opcode == ARBEL_OPCODE_SEND_ERROR );
  983. completion.syndrome = MLX_GET ( &cqe->error, syndrome );
  984. DBGC ( arbel, "Arbel %p CPN %lx syndrome %x vendor %lx\n",
  985. arbel, cq->cqn, completion.syndrome,
  986. MLX_GET ( &cqe->error, vendor_code ) );
  987. rc = -EIO;
  988. /* Don't return immediately; propagate error to completer */
  989. }
  990. /* Identify work queue */
  991. wq = ib_find_wq ( cq, qpn, is_send );
  992. if ( ! wq ) {
  993. DBGC ( arbel, "Arbel %p CQN %lx unknown %s QPN %lx\n",
  994. arbel, cq->cqn, ( is_send ? "send" : "recv" ), qpn );
  995. return -EIO;
  996. }
  997. qp = wq->qp;
  998. arbel_qp = ib_qp_get_drvdata ( qp );
  999. arbel_send_wq = &arbel_qp->send;
  1000. arbel_recv_wq = &arbel_qp->recv;
  1001. /* Identify work queue entry index */
  1002. if ( is_send ) {
  1003. wqe_idx = ( ( wqe_adr - virt_to_bus ( arbel_send_wq->wqe ) ) /
  1004. sizeof ( arbel_send_wq->wqe[0] ) );
  1005. assert ( wqe_idx < qp->send.num_wqes );
  1006. } else {
  1007. wqe_idx = ( ( wqe_adr - virt_to_bus ( arbel_recv_wq->wqe ) ) /
  1008. sizeof ( arbel_recv_wq->wqe[0] ) );
  1009. assert ( wqe_idx < qp->recv.num_wqes );
  1010. }
  1011. /* Identify I/O buffer */
  1012. iobuf = wq->iobufs[wqe_idx];
  1013. if ( ! iobuf ) {
  1014. DBGC ( arbel, "Arbel %p CQN %lx QPN %lx empty WQE %x\n",
  1015. arbel, cq->cqn, qpn, wqe_idx );
  1016. return -EIO;
  1017. }
  1018. wq->iobufs[wqe_idx] = NULL;
  1019. /* Fill in length for received packets */
  1020. if ( ! is_send ) {
  1021. completion.len = MLX_GET ( &cqe->normal, byte_cnt );
  1022. recv_wqe = &arbel_recv_wq->wqe[wqe_idx].recv;
  1023. assert ( MLX_GET ( &recv_wqe->data[0], local_address_l ) ==
  1024. virt_to_bus ( iobuf->data ) );
  1025. assert ( MLX_GET ( &recv_wqe->data[0], byte_count ) ==
  1026. iob_tailroom ( iobuf ) );
  1027. MLX_FILL_1 ( &recv_wqe->data[0], 0, byte_count, 0 );
  1028. MLX_FILL_1 ( &recv_wqe->data[0], 1,
  1029. l_key, ARBEL_INVALID_LKEY );
  1030. if ( completion.len > iob_tailroom ( iobuf ) ) {
  1031. DBGC ( arbel, "Arbel %p CQN %lx QPN %lx IDX %x "
  1032. "overlength received packet length %zd\n",
  1033. arbel, cq->cqn, qpn, wqe_idx, completion.len );
  1034. return -EIO;
  1035. }
  1036. }
  1037. /* Pass off to caller's completion handler */
  1038. complete = ( is_send ? complete_send : complete_recv );
  1039. complete ( ibdev, qp, &completion, iobuf );
  1040. return rc;
  1041. }
  1042. /**
  1043. * Poll completion queue
  1044. *
  1045. * @v ibdev Infiniband device
  1046. * @v cq Completion queue
  1047. * @v complete_send Send completion handler
  1048. * @v complete_recv Receive completion handler
  1049. */
  1050. static void arbel_poll_cq ( struct ib_device *ibdev,
  1051. struct ib_completion_queue *cq,
  1052. ib_completer_t complete_send,
  1053. ib_completer_t complete_recv ) {
  1054. struct arbel *arbel = ib_get_drvdata ( ibdev );
  1055. struct arbel_completion_queue *arbel_cq = ib_cq_get_drvdata ( cq );
  1056. struct arbelprm_cq_ci_db_record *ci_db_rec;
  1057. union arbelprm_completion_entry *cqe;
  1058. unsigned int cqe_idx_mask;
  1059. int rc;
  1060. while ( 1 ) {
  1061. /* Look for completion entry */
  1062. cqe_idx_mask = ( cq->num_cqes - 1 );
  1063. cqe = &arbel_cq->cqe[cq->next_idx & cqe_idx_mask];
  1064. if ( MLX_GET ( &cqe->normal, owner ) != 0 ) {
  1065. /* Entry still owned by hardware; end of poll */
  1066. break;
  1067. }
  1068. /* Handle completion */
  1069. if ( ( rc = arbel_complete ( ibdev, cq, cqe, complete_send,
  1070. complete_recv ) ) != 0 ) {
  1071. DBGC ( arbel, "Arbel %p failed to complete: %s\n",
  1072. arbel, strerror ( rc ) );
  1073. DBGC_HD ( arbel, cqe, sizeof ( *cqe ) );
  1074. }
  1075. /* Return ownership to hardware */
  1076. MLX_FILL_1 ( &cqe->normal, 7, owner, 1 );
  1077. barrier();
  1078. /* Update completion queue's index */
  1079. cq->next_idx++;
  1080. /* Update doorbell record */
  1081. ci_db_rec = &arbel->db_rec[arbel_cq->ci_doorbell_idx].cq_ci;
  1082. MLX_FILL_1 ( ci_db_rec, 0,
  1083. counter, ( cq->next_idx & 0xffffffffUL ) );
  1084. }
  1085. }
  1086. /***************************************************************************
  1087. *
  1088. * Infiniband link-layer operations
  1089. *
  1090. ***************************************************************************
  1091. */
  1092. /**
  1093. * Initialise Infiniband link
  1094. *
  1095. * @v ibdev Infiniband device
  1096. * @ret rc Return status code
  1097. */
  1098. static int arbel_open ( struct ib_device *ibdev ) {
  1099. struct arbel *arbel = ib_get_drvdata ( ibdev );
  1100. struct arbelprm_init_ib init_ib;
  1101. int rc;
  1102. memset ( &init_ib, 0, sizeof ( init_ib ) );
  1103. MLX_FILL_3 ( &init_ib, 0,
  1104. mtu_cap, ARBEL_MTU_2048,
  1105. port_width_cap, 3,
  1106. vl_cap, 1 );
  1107. MLX_FILL_1 ( &init_ib, 1, max_gid, 1 );
  1108. MLX_FILL_1 ( &init_ib, 2, max_pkey, 64 );
  1109. if ( ( rc = arbel_cmd_init_ib ( arbel, ibdev->port,
  1110. &init_ib ) ) != 0 ) {
  1111. DBGC ( arbel, "Arbel %p could not intialise IB: %s\n",
  1112. arbel, strerror ( rc ) );
  1113. return rc;
  1114. }
  1115. return 0;
  1116. }
  1117. /**
  1118. * Close Infiniband link
  1119. *
  1120. * @v ibdev Infiniband device
  1121. */
  1122. static void arbel_close ( struct ib_device *ibdev ) {
  1123. struct arbel *arbel = ib_get_drvdata ( ibdev );
  1124. int rc;
  1125. if ( ( rc = arbel_cmd_close_ib ( arbel, ibdev->port ) ) != 0 ) {
  1126. DBGC ( arbel, "Arbel %p could not close IB: %s\n",
  1127. arbel, strerror ( rc ) );
  1128. /* Nothing we can do about this */
  1129. }
  1130. }
  1131. /***************************************************************************
  1132. *
  1133. * Multicast group operations
  1134. *
  1135. ***************************************************************************
  1136. */
  1137. /**
  1138. * Attach to multicast group
  1139. *
  1140. * @v ibdev Infiniband device
  1141. * @v qp Queue pair
  1142. * @v gid Multicast GID
  1143. * @ret rc Return status code
  1144. */
  1145. static int arbel_mcast_attach ( struct ib_device *ibdev,
  1146. struct ib_queue_pair *qp,
  1147. struct ib_gid *gid ) {
  1148. struct arbel *arbel = ib_get_drvdata ( ibdev );
  1149. struct arbelprm_mgm_hash hash;
  1150. struct arbelprm_mgm_entry mgm;
  1151. unsigned int index;
  1152. int rc;
  1153. /* Generate hash table index */
  1154. if ( ( rc = arbel_cmd_mgid_hash ( arbel, gid, &hash ) ) != 0 ) {
  1155. DBGC ( arbel, "Arbel %p could not hash GID: %s\n",
  1156. arbel, strerror ( rc ) );
  1157. return rc;
  1158. }
  1159. index = MLX_GET ( &hash, hash );
  1160. /* Check for existing hash table entry */
  1161. if ( ( rc = arbel_cmd_read_mgm ( arbel, index, &mgm ) ) != 0 ) {
  1162. DBGC ( arbel, "Arbel %p could not read MGM %#x: %s\n",
  1163. arbel, index, strerror ( rc ) );
  1164. return rc;
  1165. }
  1166. if ( MLX_GET ( &mgm, mgmqp_0.qi ) != 0 ) {
  1167. /* FIXME: this implementation allows only a single QP
  1168. * per multicast group, and doesn't handle hash
  1169. * collisions. Sufficient for IPoIB but may need to
  1170. * be extended in future.
  1171. */
  1172. DBGC ( arbel, "Arbel %p MGID index %#x already in use\n",
  1173. arbel, index );
  1174. return -EBUSY;
  1175. }
  1176. /* Update hash table entry */
  1177. MLX_FILL_2 ( &mgm, 8,
  1178. mgmqp_0.qpn_i, qp->qpn,
  1179. mgmqp_0.qi, 1 );
  1180. memcpy ( &mgm.u.dwords[4], gid, sizeof ( *gid ) );
  1181. if ( ( rc = arbel_cmd_write_mgm ( arbel, index, &mgm ) ) != 0 ) {
  1182. DBGC ( arbel, "Arbel %p could not write MGM %#x: %s\n",
  1183. arbel, index, strerror ( rc ) );
  1184. return rc;
  1185. }
  1186. return 0;
  1187. }
  1188. /**
  1189. * Detach from multicast group
  1190. *
  1191. * @v ibdev Infiniband device
  1192. * @v qp Queue pair
  1193. * @v gid Multicast GID
  1194. */
  1195. static void arbel_mcast_detach ( struct ib_device *ibdev,
  1196. struct ib_queue_pair *qp __unused,
  1197. struct ib_gid *gid ) {
  1198. struct arbel *arbel = ib_get_drvdata ( ibdev );
  1199. struct arbelprm_mgm_hash hash;
  1200. struct arbelprm_mgm_entry mgm;
  1201. unsigned int index;
  1202. int rc;
  1203. /* Generate hash table index */
  1204. if ( ( rc = arbel_cmd_mgid_hash ( arbel, gid, &hash ) ) != 0 ) {
  1205. DBGC ( arbel, "Arbel %p could not hash GID: %s\n",
  1206. arbel, strerror ( rc ) );
  1207. return;
  1208. }
  1209. index = MLX_GET ( &hash, hash );
  1210. /* Clear hash table entry */
  1211. memset ( &mgm, 0, sizeof ( mgm ) );
  1212. if ( ( rc = arbel_cmd_write_mgm ( arbel, index, &mgm ) ) != 0 ) {
  1213. DBGC ( arbel, "Arbel %p could not write MGM %#x: %s\n",
  1214. arbel, index, strerror ( rc ) );
  1215. return;
  1216. }
  1217. }
  1218. /***************************************************************************
  1219. *
  1220. * MAD operations
  1221. *
  1222. ***************************************************************************
  1223. */
  1224. /**
  1225. * Issue management datagram
  1226. *
  1227. * @v ibdev Infiniband device
  1228. * @v mad Management datagram
  1229. * @v len Length of management datagram
  1230. * @ret rc Return status code
  1231. */
  1232. static int arbel_mad ( struct ib_device *ibdev, struct ib_mad_hdr *mad,
  1233. size_t len ) {
  1234. struct arbel *arbel = ib_get_drvdata ( ibdev );
  1235. union arbelprm_mad mad_ifc;
  1236. int rc;
  1237. /* Copy in request packet */
  1238. memset ( &mad_ifc, 0, sizeof ( mad_ifc ) );
  1239. assert ( len <= sizeof ( mad_ifc.mad ) );
  1240. memcpy ( &mad_ifc.mad, mad, len );
  1241. /* Issue MAD */
  1242. if ( ( rc = arbel_cmd_mad_ifc ( arbel, ibdev->port,
  1243. &mad_ifc ) ) != 0 ) {
  1244. DBGC ( arbel, "Arbel %p could not issue MAD IFC: %s\n",
  1245. arbel, strerror ( rc ) );
  1246. return rc;
  1247. }
  1248. /* Copy out reply packet */
  1249. memcpy ( mad, &mad_ifc.mad, len );
  1250. if ( mad->status != 0 ) {
  1251. DBGC ( arbel, "Arbel %p MAD IFC status %04x\n",
  1252. arbel, ntohs ( mad->status ) );
  1253. return -EIO;
  1254. }
  1255. return 0;
  1256. }
  1257. /** Arbel Infiniband operations */
  1258. static struct ib_device_operations arbel_ib_operations = {
  1259. .create_cq = arbel_create_cq,
  1260. .destroy_cq = arbel_destroy_cq,
  1261. .create_qp = arbel_create_qp,
  1262. .destroy_qp = arbel_destroy_qp,
  1263. .post_send = arbel_post_send,
  1264. .post_recv = arbel_post_recv,
  1265. .poll_cq = arbel_poll_cq,
  1266. .open = arbel_open,
  1267. .close = arbel_close,
  1268. .mcast_attach = arbel_mcast_attach,
  1269. .mcast_detach = arbel_mcast_detach,
  1270. .mad = arbel_mad,
  1271. };
  1272. /***************************************************************************
  1273. *
  1274. * Firmware control
  1275. *
  1276. ***************************************************************************
  1277. */
  1278. /**
  1279. * Start firmware running
  1280. *
  1281. * @v arbel Arbel device
  1282. * @ret rc Return status code
  1283. */
  1284. static int arbel_start_firmware ( struct arbel *arbel ) {
  1285. struct arbelprm_query_fw fw;
  1286. struct arbelprm_access_lam lam;
  1287. struct arbelprm_virtual_physical_mapping map_fa;
  1288. unsigned int fw_pages;
  1289. unsigned int log2_fw_pages;
  1290. size_t fw_size;
  1291. physaddr_t fw_base;
  1292. int rc;
  1293. /* Get firmware parameters */
  1294. if ( ( rc = arbel_cmd_query_fw ( arbel, &fw ) ) != 0 ) {
  1295. DBGC ( arbel, "Arbel %p could not query firmware: %s\n",
  1296. arbel, strerror ( rc ) );
  1297. goto err_query_fw;
  1298. }
  1299. DBGC ( arbel, "Arbel %p firmware version %ld.%ld.%ld\n", arbel,
  1300. MLX_GET ( &fw, fw_rev_major ), MLX_GET ( &fw, fw_rev_minor ),
  1301. MLX_GET ( &fw, fw_rev_subminor ) );
  1302. fw_pages = MLX_GET ( &fw, fw_pages );
  1303. log2_fw_pages = fls ( fw_pages - 1 );
  1304. fw_pages = ( 1 << log2_fw_pages );
  1305. DBGC ( arbel, "Arbel %p requires %d kB for firmware\n",
  1306. arbel, ( fw_pages * 4 ) );
  1307. /* Enable locally-attached memory. Ignore failure; there may
  1308. * be no attached memory.
  1309. */
  1310. arbel_cmd_enable_lam ( arbel, &lam );
  1311. /* Allocate firmware pages and map firmware area */
  1312. fw_size = ( fw_pages * 4096 );
  1313. arbel->firmware_area = umalloc ( fw_size );
  1314. if ( ! arbel->firmware_area ) {
  1315. rc = -ENOMEM;
  1316. goto err_alloc_fa;
  1317. }
  1318. fw_base = ( user_to_phys ( arbel->firmware_area, fw_size ) &
  1319. ~( fw_size - 1 ) );
  1320. DBGC ( arbel, "Arbel %p firmware area at physical [%lx,%lx)\n",
  1321. arbel, fw_base, ( fw_base + fw_size ) );
  1322. memset ( &map_fa, 0, sizeof ( map_fa ) );
  1323. MLX_FILL_2 ( &map_fa, 3,
  1324. log2size, log2_fw_pages,
  1325. pa_l, ( fw_base >> 12 ) );
  1326. if ( ( rc = arbel_cmd_map_fa ( arbel, &map_fa ) ) != 0 ) {
  1327. DBGC ( arbel, "Arbel %p could not map firmware: %s\n",
  1328. arbel, strerror ( rc ) );
  1329. goto err_map_fa;
  1330. }
  1331. /* Start firmware */
  1332. if ( ( rc = arbel_cmd_run_fw ( arbel ) ) != 0 ) {
  1333. DBGC ( arbel, "Arbel %p could not run firmware: %s\n",
  1334. arbel, strerror ( rc ) );
  1335. goto err_run_fw;
  1336. }
  1337. DBGC ( arbel, "Arbel %p firmware started\n", arbel );
  1338. return 0;
  1339. err_run_fw:
  1340. arbel_cmd_unmap_fa ( arbel );
  1341. err_map_fa:
  1342. ufree ( arbel->firmware_area );
  1343. arbel->firmware_area = UNULL;
  1344. err_alloc_fa:
  1345. err_query_fw:
  1346. return rc;
  1347. }
  1348. /**
  1349. * Stop firmware running
  1350. *
  1351. * @v arbel Arbel device
  1352. */
  1353. static void arbel_stop_firmware ( struct arbel *arbel ) {
  1354. int rc;
  1355. if ( ( rc = arbel_cmd_unmap_fa ( arbel ) ) != 0 ) {
  1356. DBGC ( arbel, "Arbel %p FATAL could not stop firmware: %s\n",
  1357. arbel, strerror ( rc ) );
  1358. /* Leak memory and return; at least we avoid corruption */
  1359. return;
  1360. }
  1361. ufree ( arbel->firmware_area );
  1362. arbel->firmware_area = UNULL;
  1363. }
  1364. /***************************************************************************
  1365. *
  1366. * Infinihost Context Memory management
  1367. *
  1368. ***************************************************************************
  1369. */
  1370. /**
  1371. * Get device limits
  1372. *
  1373. * @v arbel Arbel device
  1374. * @ret rc Return status code
  1375. */
  1376. static int arbel_get_limits ( struct arbel *arbel ) {
  1377. struct arbelprm_query_dev_lim dev_lim;
  1378. int rc;
  1379. if ( ( rc = arbel_cmd_query_dev_lim ( arbel, &dev_lim ) ) != 0 ) {
  1380. DBGC ( arbel, "Arbel %p could not get device limits: %s\n",
  1381. arbel, strerror ( rc ) );
  1382. return rc;
  1383. }
  1384. arbel->limits.reserved_qps =
  1385. ( 1 << MLX_GET ( &dev_lim, log2_rsvd_qps ) );
  1386. arbel->limits.qpc_entry_size = MLX_GET ( &dev_lim, qpc_entry_sz );
  1387. arbel->limits.eqpc_entry_size = MLX_GET ( &dev_lim, eqpc_entry_sz );
  1388. arbel->limits.reserved_srqs =
  1389. ( 1 << MLX_GET ( &dev_lim, log2_rsvd_srqs ) );
  1390. arbel->limits.srqc_entry_size = MLX_GET ( &dev_lim, srq_entry_sz );
  1391. arbel->limits.reserved_ees =
  1392. ( 1 << MLX_GET ( &dev_lim, log2_rsvd_ees ) );
  1393. arbel->limits.eec_entry_size = MLX_GET ( &dev_lim, eec_entry_sz );
  1394. arbel->limits.eeec_entry_size = MLX_GET ( &dev_lim, eeec_entry_sz );
  1395. arbel->limits.reserved_cqs =
  1396. ( 1 << MLX_GET ( &dev_lim, log2_rsvd_cqs ) );
  1397. arbel->limits.cqc_entry_size = MLX_GET ( &dev_lim, cqc_entry_sz );
  1398. arbel->limits.reserved_mtts =
  1399. ( 1 << MLX_GET ( &dev_lim, log2_rsvd_mtts ) );
  1400. arbel->limits.mtt_entry_size = MLX_GET ( &dev_lim, mtt_entry_sz );
  1401. arbel->limits.reserved_mrws =
  1402. ( 1 << MLX_GET ( &dev_lim, log2_rsvd_mrws ) );
  1403. arbel->limits.mpt_entry_size = MLX_GET ( &dev_lim, mpt_entry_sz );
  1404. arbel->limits.reserved_rdbs =
  1405. ( 1 << MLX_GET ( &dev_lim, log2_rsvd_rdbs ) );
  1406. arbel->limits.eqc_entry_size = MLX_GET ( &dev_lim, eqc_entry_sz );
  1407. arbel->limits.reserved_uars = MLX_GET ( &dev_lim, num_rsvd_uars );
  1408. return 0;
  1409. }
  1410. /**
  1411. * Get ICM usage
  1412. *
  1413. * @v log_num_entries Log2 of the number of entries
  1414. * @v entry_size Entry size
  1415. * @ret usage Usage size in ICM
  1416. */
  1417. static size_t icm_usage ( unsigned int log_num_entries, size_t entry_size ) {
  1418. size_t usage;
  1419. usage = ( ( 1 << log_num_entries ) * entry_size );
  1420. usage = ( ( usage + 4095 ) & ~4095 );
  1421. return usage;
  1422. }
  1423. /**
  1424. * Allocate ICM
  1425. *
  1426. * @v arbel Arbel device
  1427. * @v init_hca INIT_HCA structure to fill in
  1428. * @ret rc Return status code
  1429. */
  1430. static int arbel_alloc_icm ( struct arbel *arbel,
  1431. struct arbelprm_init_hca *init_hca ) {
  1432. struct arbelprm_scalar_parameter icm_size;
  1433. struct arbelprm_scalar_parameter icm_aux_size;
  1434. struct arbelprm_virtual_physical_mapping map_icm_aux;
  1435. struct arbelprm_virtual_physical_mapping map_icm;
  1436. union arbelprm_doorbell_record *db_rec;
  1437. size_t icm_offset = 0;
  1438. unsigned int log_num_qps, log_num_srqs, log_num_ees, log_num_cqs;
  1439. unsigned int log_num_mtts, log_num_mpts, log_num_rdbs, log_num_eqs;
  1440. int rc;
  1441. icm_offset = ( ( arbel->limits.reserved_uars + 1 ) << 12 );
  1442. /* Queue pair contexts */
  1443. log_num_qps = fls ( arbel->limits.reserved_qps + ARBEL_MAX_QPS - 1 );
  1444. MLX_FILL_2 ( init_hca, 13,
  1445. qpc_eec_cqc_eqc_rdb_parameters.qpc_base_addr_l,
  1446. ( icm_offset >> 7 ),
  1447. qpc_eec_cqc_eqc_rdb_parameters.log_num_of_qp,
  1448. log_num_qps );
  1449. DBGC ( arbel, "Arbel %p ICM QPC base = %zx\n", arbel, icm_offset );
  1450. icm_offset += icm_usage ( log_num_qps, arbel->limits.qpc_entry_size );
  1451. /* Extended queue pair contexts */
  1452. MLX_FILL_1 ( init_hca, 25,
  1453. qpc_eec_cqc_eqc_rdb_parameters.eqpc_base_addr_l,
  1454. icm_offset );
  1455. DBGC ( arbel, "Arbel %p ICM EQPC base = %zx\n", arbel, icm_offset );
  1456. // icm_offset += icm_usage ( log_num_qps, arbel->limits.eqpc_entry_size );
  1457. icm_offset += icm_usage ( log_num_qps, arbel->limits.qpc_entry_size );
  1458. /* Shared receive queue contexts */
  1459. log_num_srqs = fls ( arbel->limits.reserved_srqs - 1 );
  1460. MLX_FILL_2 ( init_hca, 19,
  1461. qpc_eec_cqc_eqc_rdb_parameters.srqc_base_addr_l,
  1462. ( icm_offset >> 5 ),
  1463. qpc_eec_cqc_eqc_rdb_parameters.log_num_of_srq,
  1464. log_num_srqs );
  1465. DBGC ( arbel, "Arbel %p ICM SRQC base = %zx\n", arbel, icm_offset );
  1466. icm_offset += icm_usage ( log_num_srqs, arbel->limits.srqc_entry_size );
  1467. /* End-to-end contexts */
  1468. log_num_ees = fls ( arbel->limits.reserved_ees - 1 );
  1469. MLX_FILL_2 ( init_hca, 17,
  1470. qpc_eec_cqc_eqc_rdb_parameters.eec_base_addr_l,
  1471. ( icm_offset >> 7 ),
  1472. qpc_eec_cqc_eqc_rdb_parameters.log_num_of_ee,
  1473. log_num_ees );
  1474. DBGC ( arbel, "Arbel %p ICM EEC base = %zx\n", arbel, icm_offset );
  1475. icm_offset += icm_usage ( log_num_ees, arbel->limits.eec_entry_size );
  1476. /* Extended end-to-end contexts */
  1477. MLX_FILL_1 ( init_hca, 29,
  1478. qpc_eec_cqc_eqc_rdb_parameters.eeec_base_addr_l,
  1479. icm_offset );
  1480. DBGC ( arbel, "Arbel %p ICM EEEC base = %zx\n", arbel, icm_offset );
  1481. icm_offset += icm_usage ( log_num_ees, arbel->limits.eeec_entry_size );
  1482. /* Completion queue contexts */
  1483. log_num_cqs = fls ( arbel->limits.reserved_cqs + ARBEL_MAX_CQS - 1 );
  1484. MLX_FILL_2 ( init_hca, 21,
  1485. qpc_eec_cqc_eqc_rdb_parameters.cqc_base_addr_l,
  1486. ( icm_offset >> 6 ),
  1487. qpc_eec_cqc_eqc_rdb_parameters.log_num_of_cq,
  1488. log_num_cqs );
  1489. DBGC ( arbel, "Arbel %p ICM CQC base = %zx\n", arbel, icm_offset );
  1490. icm_offset += icm_usage ( log_num_cqs, arbel->limits.cqc_entry_size );
  1491. /* Memory translation table */
  1492. log_num_mtts = fls ( arbel->limits.reserved_mtts - 1 );
  1493. MLX_FILL_1 ( init_hca, 65,
  1494. tpt_parameters.mtt_base_addr_l, icm_offset );
  1495. DBGC ( arbel, "Arbel %p ICM MTT base = %zx\n", arbel, icm_offset );
  1496. icm_offset += icm_usage ( log_num_mtts, arbel->limits.mtt_entry_size );
  1497. /* Memory protection table */
  1498. log_num_mpts = fls ( arbel->limits.reserved_mrws + 1 - 1 );
  1499. MLX_FILL_1 ( init_hca, 61,
  1500. tpt_parameters.mpt_base_adr_l, icm_offset );
  1501. MLX_FILL_1 ( init_hca, 62,
  1502. tpt_parameters.log_mpt_sz, log_num_mpts );
  1503. DBGC ( arbel, "Arbel %p ICM MTT base = %zx\n", arbel, icm_offset );
  1504. icm_offset += icm_usage ( log_num_mpts, arbel->limits.mpt_entry_size );
  1505. /* RDMA something or other */
  1506. log_num_rdbs = fls ( arbel->limits.reserved_rdbs - 1 );
  1507. MLX_FILL_1 ( init_hca, 37,
  1508. qpc_eec_cqc_eqc_rdb_parameters.rdb_base_addr_l,
  1509. icm_offset );
  1510. DBGC ( arbel, "Arbel %p ICM RDB base = %zx\n", arbel, icm_offset );
  1511. icm_offset += icm_usage ( log_num_rdbs, 32 );
  1512. /* Event queue contexts */
  1513. log_num_eqs = 6;
  1514. MLX_FILL_2 ( init_hca, 33,
  1515. qpc_eec_cqc_eqc_rdb_parameters.eqc_base_addr_l,
  1516. ( icm_offset >> 6 ),
  1517. qpc_eec_cqc_eqc_rdb_parameters.log_num_eq,
  1518. log_num_eqs );
  1519. DBGC ( arbel, "Arbel %p ICM EQ base = %zx\n", arbel, icm_offset );
  1520. icm_offset += ( ( 1 << log_num_eqs ) * arbel->limits.eqc_entry_size );
  1521. /* Multicast table */
  1522. MLX_FILL_1 ( init_hca, 49,
  1523. multicast_parameters.mc_base_addr_l, icm_offset );
  1524. MLX_FILL_1 ( init_hca, 52,
  1525. multicast_parameters.log_mc_table_entry_sz,
  1526. fls ( sizeof ( struct arbelprm_mgm_entry ) - 1 ) );
  1527. MLX_FILL_1 ( init_hca, 53,
  1528. multicast_parameters.mc_table_hash_sz, 8 );
  1529. MLX_FILL_1 ( init_hca, 54,
  1530. multicast_parameters.log_mc_table_sz, 3 );
  1531. DBGC ( arbel, "Arbel %p ICM MC base = %zx\n", arbel, icm_offset );
  1532. icm_offset += ( 8 * sizeof ( struct arbelprm_mgm_entry ) );
  1533. arbel->icm_len = icm_offset;
  1534. arbel->icm_len = ( ( arbel->icm_len + 4095 ) & ~4095 );
  1535. /* Get ICM auxiliary area size */
  1536. memset ( &icm_size, 0, sizeof ( icm_size ) );
  1537. MLX_FILL_1 ( &icm_size, 1, value, arbel->icm_len );
  1538. if ( ( rc = arbel_cmd_set_icm_size ( arbel, &icm_size,
  1539. &icm_aux_size ) ) != 0 ) {
  1540. DBGC ( arbel, "Arbel %p could not set ICM size: %s\n",
  1541. arbel, strerror ( rc ) );
  1542. goto err_set_icm_size;
  1543. }
  1544. arbel->icm_aux_len = ( MLX_GET ( &icm_aux_size, value ) * 4096 );
  1545. /* Allocate ICM data and auxiliary area */
  1546. DBGC ( arbel, "Arbel %p requires %zd kB ICM and %zd kB AUX ICM\n",
  1547. arbel, ( arbel->icm_len / 1024 ),
  1548. ( arbel->icm_aux_len / 1024 ) );
  1549. arbel->icm = umalloc ( arbel->icm_len + arbel->icm_aux_len );
  1550. if ( ! arbel->icm ) {
  1551. rc = -ENOMEM;
  1552. goto err_alloc;
  1553. }
  1554. /* Map ICM auxiliary area */
  1555. memset ( &map_icm_aux, 0, sizeof ( map_icm_aux ) );
  1556. MLX_FILL_2 ( &map_icm_aux, 3,
  1557. log2size, fls ( ( arbel->icm_aux_len / 4096 ) - 1 ),
  1558. pa_l,
  1559. ( user_to_phys ( arbel->icm, arbel->icm_len ) >> 12 ) );
  1560. if ( ( rc = arbel_cmd_map_icm_aux ( arbel, &map_icm_aux ) ) != 0 ) {
  1561. DBGC ( arbel, "Arbel %p could not map AUX ICM: %s\n",
  1562. arbel, strerror ( rc ) );
  1563. goto err_map_icm_aux;
  1564. }
  1565. /* MAP ICM area */
  1566. memset ( &map_icm, 0, sizeof ( map_icm ) );
  1567. MLX_FILL_2 ( &map_icm, 3,
  1568. log2size, fls ( ( arbel->icm_len / 4096 ) - 1 ),
  1569. pa_l, ( user_to_phys ( arbel->icm, 0 ) >> 12 ) );
  1570. if ( ( rc = arbel_cmd_map_icm ( arbel, &map_icm ) ) != 0 ) {
  1571. DBGC ( arbel, "Arbel %p could not map ICM: %s\n",
  1572. arbel, strerror ( rc ) );
  1573. goto err_map_icm;
  1574. }
  1575. /* Initialise UAR context */
  1576. arbel->db_rec = phys_to_virt ( user_to_phys ( arbel->icm, 0 ) +
  1577. ( arbel->limits.reserved_uars *
  1578. ARBEL_PAGE_SIZE ) );
  1579. memset ( arbel->db_rec, 0, ARBEL_PAGE_SIZE );
  1580. db_rec = &arbel->db_rec[ARBEL_GROUP_SEPARATOR_DOORBELL];
  1581. MLX_FILL_1 ( &db_rec->qp, 1, res, ARBEL_UAR_RES_GROUP_SEP );
  1582. return 0;
  1583. arbel_cmd_unmap_icm ( arbel, ( arbel->icm_len / 4096 ) );
  1584. err_map_icm:
  1585. arbel_cmd_unmap_icm_aux ( arbel );
  1586. err_map_icm_aux:
  1587. ufree ( arbel->icm );
  1588. arbel->icm = UNULL;
  1589. err_alloc:
  1590. err_set_icm_size:
  1591. return rc;
  1592. }
  1593. /**
  1594. * Free ICM
  1595. *
  1596. * @v arbel Arbel device
  1597. */
  1598. static void arbel_free_icm ( struct arbel *arbel ) {
  1599. arbel_cmd_unmap_icm ( arbel, ( arbel->icm_len / 4096 ) );
  1600. arbel_cmd_unmap_icm_aux ( arbel );
  1601. ufree ( arbel->icm );
  1602. arbel->icm = UNULL;
  1603. }
  1604. /***************************************************************************
  1605. *
  1606. * PCI interface
  1607. *
  1608. ***************************************************************************
  1609. */
  1610. /**
  1611. * Set up memory protection table
  1612. *
  1613. * @v arbel Arbel device
  1614. * @ret rc Return status code
  1615. */
  1616. static int arbel_setup_mpt ( struct arbel *arbel ) {
  1617. struct arbelprm_mpt mpt;
  1618. uint32_t key;
  1619. int rc;
  1620. /* Derive key */
  1621. key = ( arbel->limits.reserved_mrws | ARBEL_MKEY_PREFIX );
  1622. arbel->reserved_lkey = ( ( key << 8 ) | ( key >> 24 ) );
  1623. /* Initialise memory protection table */
  1624. memset ( &mpt, 0, sizeof ( mpt ) );
  1625. MLX_FILL_4 ( &mpt, 0,
  1626. r_w, 1,
  1627. pa, 1,
  1628. lr, 1,
  1629. lw, 1 );
  1630. MLX_FILL_1 ( &mpt, 2, mem_key, key );
  1631. MLX_FILL_1 ( &mpt, 3, pd, ARBEL_GLOBAL_PD );
  1632. MLX_FILL_1 ( &mpt, 6, reg_wnd_len_h, 0xffffffffUL );
  1633. MLX_FILL_1 ( &mpt, 7, reg_wnd_len_l, 0xffffffffUL );
  1634. if ( ( rc = arbel_cmd_sw2hw_mpt ( arbel, arbel->limits.reserved_mrws,
  1635. &mpt ) ) != 0 ) {
  1636. DBGC ( arbel, "Arbel %p could not set up MPT: %s\n",
  1637. arbel, strerror ( rc ) );
  1638. return rc;
  1639. }
  1640. return 0;
  1641. }
  1642. /**
  1643. * Probe PCI device
  1644. *
  1645. * @v pci PCI device
  1646. * @v id PCI ID
  1647. * @ret rc Return status code
  1648. */
  1649. static int arbel_probe ( struct pci_device *pci,
  1650. const struct pci_device_id *id __unused ) {
  1651. struct arbel *arbel;
  1652. struct ib_device *ibdev;
  1653. struct arbelprm_init_hca init_hca;
  1654. int i;
  1655. int rc;
  1656. /* Allocate Arbel device */
  1657. arbel = zalloc ( sizeof ( *arbel ) );
  1658. if ( ! arbel ) {
  1659. rc = -ENOMEM;
  1660. goto err_alloc_arbel;
  1661. }
  1662. pci_set_drvdata ( pci, arbel );
  1663. /* Allocate Infiniband devices */
  1664. for ( i = 0 ; i < ARBEL_NUM_PORTS ; i++ ) {
  1665. ibdev = alloc_ibdev ( 0 );
  1666. if ( ! ibdev ) {
  1667. rc = -ENOMEM;
  1668. goto err_alloc_ibdev;
  1669. }
  1670. arbel->ibdev[i] = ibdev;
  1671. ibdev->op = &arbel_ib_operations;
  1672. ibdev->dev = &pci->dev;
  1673. ibdev->port = ( ARBEL_PORT_BASE + i );
  1674. ib_set_drvdata ( ibdev, arbel );
  1675. }
  1676. /* Fix up PCI device */
  1677. adjust_pci_device ( pci );
  1678. /* Get PCI BARs */
  1679. arbel->config = ioremap ( pci_bar_start ( pci, ARBEL_PCI_CONFIG_BAR ),
  1680. ARBEL_PCI_CONFIG_BAR_SIZE );
  1681. arbel->uar = ioremap ( ( pci_bar_start ( pci, ARBEL_PCI_UAR_BAR ) +
  1682. ARBEL_PCI_UAR_IDX * ARBEL_PCI_UAR_SIZE ),
  1683. ARBEL_PCI_UAR_SIZE );
  1684. /* Allocate space for mailboxes */
  1685. arbel->mailbox_in = malloc_dma ( ARBEL_MBOX_SIZE, ARBEL_MBOX_ALIGN );
  1686. if ( ! arbel->mailbox_in ) {
  1687. rc = -ENOMEM;
  1688. goto err_mailbox_in;
  1689. }
  1690. arbel->mailbox_out = malloc_dma ( ARBEL_MBOX_SIZE, ARBEL_MBOX_ALIGN );
  1691. if ( ! arbel->mailbox_out ) {
  1692. rc = -ENOMEM;
  1693. goto err_mailbox_out;
  1694. }
  1695. /* Start firmware */
  1696. if ( ( rc = arbel_start_firmware ( arbel ) ) != 0 )
  1697. goto err_start_firmware;
  1698. /* Get device limits */
  1699. if ( ( rc = arbel_get_limits ( arbel ) ) != 0 )
  1700. goto err_get_limits;
  1701. /* Allocate ICM */
  1702. memset ( &init_hca, 0, sizeof ( init_hca ) );
  1703. if ( ( rc = arbel_alloc_icm ( arbel, &init_hca ) ) != 0 )
  1704. goto err_alloc_icm;
  1705. /* Initialise HCA */
  1706. MLX_FILL_1 ( &init_hca, 74, uar_parameters.log_max_uars, 1 );
  1707. if ( ( rc = arbel_cmd_init_hca ( arbel, &init_hca ) ) != 0 ) {
  1708. DBGC ( arbel, "Arbel %p could not initialise HCA: %s\n",
  1709. arbel, strerror ( rc ) );
  1710. goto err_init_hca;
  1711. }
  1712. /* Set up memory protection */
  1713. if ( ( rc = arbel_setup_mpt ( arbel ) ) != 0 )
  1714. goto err_setup_mpt;
  1715. /* Register Infiniband devices */
  1716. for ( i = 0 ; i < ARBEL_NUM_PORTS ; i++ ) {
  1717. if ( ( rc = register_ibdev ( arbel->ibdev[i] ) ) != 0 ) {
  1718. DBGC ( arbel, "Arbel %p could not register IB "
  1719. "device: %s\n", arbel, strerror ( rc ) );
  1720. goto err_register_ibdev;
  1721. }
  1722. }
  1723. return 0;
  1724. i = ( ARBEL_NUM_PORTS - 1 );
  1725. err_register_ibdev:
  1726. for ( ; i >= 0 ; i-- )
  1727. unregister_ibdev ( arbel->ibdev[i] );
  1728. err_setup_mpt:
  1729. arbel_cmd_close_hca ( arbel );
  1730. err_init_hca:
  1731. arbel_free_icm ( arbel );
  1732. err_alloc_icm:
  1733. err_get_limits:
  1734. arbel_stop_firmware ( arbel );
  1735. err_start_firmware:
  1736. free_dma ( arbel->mailbox_out, ARBEL_MBOX_SIZE );
  1737. err_mailbox_out:
  1738. free_dma ( arbel->mailbox_in, ARBEL_MBOX_SIZE );
  1739. err_mailbox_in:
  1740. i = ( ARBEL_NUM_PORTS - 1 );
  1741. err_alloc_ibdev:
  1742. for ( ; i >= 0 ; i-- )
  1743. free_ibdev ( arbel->ibdev[i] );
  1744. free ( arbel );
  1745. err_alloc_arbel:
  1746. return rc;
  1747. }
  1748. /**
  1749. * Remove PCI device
  1750. *
  1751. * @v pci PCI device
  1752. */
  1753. static void arbel_remove ( struct pci_device *pci ) {
  1754. struct arbel *arbel = pci_get_drvdata ( pci );
  1755. int i;
  1756. for ( i = ( ARBEL_NUM_PORTS - 1 ) ; i >= 0 ; i-- )
  1757. unregister_ibdev ( arbel->ibdev[i] );
  1758. arbel_cmd_close_hca ( arbel );
  1759. arbel_free_icm ( arbel );
  1760. arbel_stop_firmware ( arbel );
  1761. arbel_stop_firmware ( arbel );
  1762. free_dma ( arbel->mailbox_out, ARBEL_MBOX_SIZE );
  1763. free_dma ( arbel->mailbox_in, ARBEL_MBOX_SIZE );
  1764. for ( i = ( ARBEL_NUM_PORTS - 1 ) ; i >= 0 ; i-- )
  1765. free_ibdev ( arbel->ibdev[i] );
  1766. free ( arbel );
  1767. }
  1768. static struct pci_device_id arbel_nics[] = {
  1769. PCI_ROM ( 0x15b3, 0x6282, "mt25218", "MT25218 HCA driver" ),
  1770. PCI_ROM ( 0x15b3, 0x6274, "mt25204", "MT25204 HCA driver" ),
  1771. };
  1772. struct pci_driver arbel_driver __pci_driver = {
  1773. .ids = arbel_nics,
  1774. .id_count = ( sizeof ( arbel_nics ) / sizeof ( arbel_nics[0] ) ),
  1775. .probe = arbel_probe,
  1776. .remove = arbel_remove,
  1777. };