You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

ib_gma.c 18KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644
  1. /*
  2. * Copyright (C) 2009 Michael Brown <mbrown@fensystems.co.uk>.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License as
  6. * published by the Free Software Foundation; either version 2 of the
  7. * License, or any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  17. */
  18. FILE_LICENCE ( GPL2_OR_LATER );
  19. #include <stdint.h>
  20. #include <stdlib.h>
  21. #include <string.h>
  22. #include <errno.h>
  23. #include <stdio.h>
  24. #include <unistd.h>
  25. #include <byteswap.h>
  26. #include <gpxe/infiniband.h>
  27. #include <gpxe/iobuf.h>
  28. #include <gpxe/ib_gma.h>
  29. /**
  30. * @file
  31. *
  32. * Infiniband General Management Agent
  33. *
  34. */
  35. /** A MAD request */
  36. struct ib_mad_request {
  37. /** Associated GMA */
  38. struct ib_gma *gma;
  39. /** List of outstanding MAD requests */
  40. struct list_head list;
  41. /** Retry timer */
  42. struct retry_timer timer;
  43. /** Destination address */
  44. struct ib_address_vector av;
  45. /** MAD request */
  46. union ib_mad mad;
  47. };
  48. /** GMA number of send WQEs
  49. *
  50. * This is a policy decision.
  51. */
  52. #define IB_GMA_NUM_SEND_WQES 4
  53. /** GMA number of receive WQEs
  54. *
  55. * This is a policy decision.
  56. */
  57. #define IB_GMA_NUM_RECV_WQES 2
  58. /** GMA number of completion queue entries
  59. *
  60. * This is a policy decision
  61. */
  62. #define IB_GMA_NUM_CQES 8
  63. /** TID magic signature */
  64. #define IB_GMA_TID_MAGIC ( ( 'g' << 24 ) | ( 'P' << 16 ) | ( 'X' << 8 ) | 'E' )
  65. /** TID to use for next MAD request */
  66. static unsigned int next_request_tid;
  67. /*****************************************************************************
  68. *
  69. * Subnet management MAD handlers
  70. *
  71. *****************************************************************************
  72. */
  73. /**
  74. * Get node information
  75. *
  76. * @v gma General management agent
  77. * @v mad MAD
  78. */
  79. static void ib_sma_get_node_info ( struct ib_gma *gma,
  80. union ib_mad *mad ) {
  81. struct ib_device *ibdev = gma->ibdev;
  82. struct ib_node_info *node_info = &mad->smp.smp_data.node_info;
  83. memset ( node_info, 0, sizeof ( *node_info ) );
  84. node_info->base_version = IB_MGMT_BASE_VERSION;
  85. node_info->class_version = IB_SMP_CLASS_VERSION;
  86. node_info->node_type = IB_NODE_TYPE_HCA;
  87. node_info->num_ports = ib_get_hca_info ( ibdev, &node_info->sys_guid );
  88. memcpy ( &node_info->node_guid, &node_info->sys_guid,
  89. sizeof ( node_info->node_guid ) );
  90. memcpy ( &node_info->port_guid, &ibdev->gid.u.half[1],
  91. sizeof ( node_info->port_guid ) );
  92. node_info->partition_cap = htons ( 1 );
  93. node_info->local_port_num = ibdev->port;
  94. }
  95. /**
  96. * Get node description
  97. *
  98. * @v gma General management agent
  99. * @v mad MAD
  100. */
  101. static void ib_sma_get_node_desc ( struct ib_gma *gma,
  102. union ib_mad *mad ) {
  103. struct ib_device *ibdev = gma->ibdev;
  104. struct ib_node_desc *node_desc = &mad->smp.smp_data.node_desc;
  105. struct ib_gid_half *guid = &ibdev->gid.u.half[1];
  106. memset ( node_desc, 0, sizeof ( *node_desc ) );
  107. snprintf ( node_desc->node_string, sizeof ( node_desc->node_string ),
  108. "gPXE %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x (%s)",
  109. guid->bytes[0], guid->bytes[1], guid->bytes[2],
  110. guid->bytes[3], guid->bytes[4], guid->bytes[5],
  111. guid->bytes[6], guid->bytes[7], ibdev->dev->name );
  112. }
  113. /**
  114. * Get GUID information
  115. *
  116. * @v gma General management agent
  117. * @v mad MAD
  118. */
  119. static void ib_sma_get_guid_info ( struct ib_gma *gma,
  120. union ib_mad *mad ) {
  121. struct ib_device *ibdev = gma->ibdev;
  122. struct ib_guid_info *guid_info = &mad->smp.smp_data.guid_info;
  123. memset ( guid_info, 0, sizeof ( *guid_info ) );
  124. memcpy ( guid_info->guid[0], &ibdev->gid.u.half[1],
  125. sizeof ( guid_info->guid[0] ) );
  126. }
  127. /**
  128. * Get port information
  129. *
  130. * @v gma General management agent
  131. * @v mad MAD
  132. */
  133. static void ib_sma_get_port_info ( struct ib_gma *gma,
  134. union ib_mad *mad ) {
  135. struct ib_device *ibdev = gma->ibdev;
  136. struct ib_port_info *port_info = &mad->smp.smp_data.port_info;
  137. memset ( port_info, 0, sizeof ( *port_info ) );
  138. memcpy ( port_info->gid_prefix, &ibdev->gid.u.half[0],
  139. sizeof ( port_info->gid_prefix ) );
  140. port_info->lid = ntohs ( ibdev->lid );
  141. port_info->mastersm_lid = ntohs ( ibdev->sm_lid );
  142. port_info->local_port_num = ibdev->port;
  143. port_info->link_width_enabled = ibdev->link_width;
  144. port_info->link_width_supported = ibdev->link_width;
  145. port_info->link_width_active = ibdev->link_width;
  146. port_info->link_speed_supported__port_state =
  147. ( ( ibdev->link_speed << 4 ) | ibdev->port_state );
  148. port_info->port_phys_state__link_down_def_state =
  149. ( ( IB_PORT_PHYS_STATE_POLLING << 4 ) |
  150. IB_PORT_PHYS_STATE_POLLING );
  151. port_info->link_speed_active__link_speed_enabled =
  152. ( ( ibdev->link_speed << 4 ) | ibdev->link_speed );
  153. port_info->neighbour_mtu__mastersm_sl =
  154. ( ( IB_MTU_2048 << 4 ) | ibdev->sm_sl );
  155. port_info->vl_cap__init_type = ( IB_VL_0 << 4 );
  156. port_info->init_type_reply__mtu_cap = IB_MTU_2048;
  157. port_info->operational_vls__enforcement = ( IB_VL_0 << 4 );
  158. port_info->guid_cap = 1;
  159. }
  160. /**
  161. * Set port information
  162. *
  163. * @v gma General management agent
  164. * @v mad MAD
  165. */
  166. static void ib_sma_set_port_info ( struct ib_gma *gma,
  167. union ib_mad *mad ) {
  168. struct ib_device *ibdev = gma->ibdev;
  169. const struct ib_port_info *port_info = &mad->smp.smp_data.port_info;
  170. int rc;
  171. memcpy ( &ibdev->gid.u.half[0], port_info->gid_prefix,
  172. sizeof ( ibdev->gid.u.half[0] ) );
  173. ibdev->lid = ntohs ( port_info->lid );
  174. ibdev->sm_lid = ntohs ( port_info->mastersm_lid );
  175. ibdev->sm_sl = ( port_info->neighbour_mtu__mastersm_sl & 0xf );
  176. if ( ( rc = ib_set_port_info ( ibdev, port_info ) ) != 0 ) {
  177. DBGC ( ibdev, "IBDEV %p could not set port information: %s\n",
  178. ibdev, strerror ( rc ) );
  179. mad->hdr.status =
  180. htons ( IB_MGMT_STATUS_UNSUPPORTED_METHOD_ATTR );
  181. }
  182. ib_sma_get_port_info ( gma, mad );
  183. }
  184. /**
  185. * Get partition key table
  186. *
  187. * @v gma General management agent
  188. * @v mad MAD
  189. */
  190. static void ib_sma_get_pkey_table ( struct ib_gma *gma,
  191. union ib_mad *mad ) {
  192. struct ib_device *ibdev = gma->ibdev;
  193. struct ib_pkey_table *pkey_table = &mad->smp.smp_data.pkey_table;
  194. memset ( pkey_table, 0, sizeof ( *pkey_table ) );
  195. pkey_table->pkey[0] = htons ( ibdev->pkey );
  196. }
  197. /**
  198. * Set partition key table
  199. *
  200. * @v gma General management agent
  201. * @v mad MAD
  202. */
  203. static void ib_sma_set_pkey_table ( struct ib_gma *gma,
  204. union ib_mad *mad ) {
  205. struct ib_device *ibdev = gma->ibdev;
  206. struct ib_pkey_table *pkey_table = &mad->smp.smp_data.pkey_table;
  207. ibdev->pkey = ntohs ( pkey_table->pkey[0] );
  208. ib_sma_get_pkey_table ( gma, mad );
  209. }
  210. /** List of attribute handlers */
  211. struct ib_gma_handler ib_sma_handlers[] __ib_gma_handler = {
  212. {
  213. .mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED,
  214. .mgmt_class_ignore = IB_SMP_CLASS_IGNORE,
  215. .class_version = IB_SMP_CLASS_VERSION,
  216. .method = IB_MGMT_METHOD_GET,
  217. .resp_method = IB_MGMT_METHOD_GET_RESP,
  218. .attr_id = htons ( IB_SMP_ATTR_NODE_INFO ),
  219. .handle = ib_sma_get_node_info,
  220. },
  221. {
  222. .mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED,
  223. .mgmt_class_ignore = IB_SMP_CLASS_IGNORE,
  224. .class_version = IB_SMP_CLASS_VERSION,
  225. .method = IB_MGMT_METHOD_GET,
  226. .resp_method = IB_MGMT_METHOD_GET_RESP,
  227. .attr_id = htons ( IB_SMP_ATTR_NODE_DESC ),
  228. .handle = ib_sma_get_node_desc,
  229. },
  230. {
  231. .mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED,
  232. .mgmt_class_ignore = IB_SMP_CLASS_IGNORE,
  233. .class_version = IB_SMP_CLASS_VERSION,
  234. .method = IB_MGMT_METHOD_GET,
  235. .resp_method = IB_MGMT_METHOD_GET_RESP,
  236. .attr_id = htons ( IB_SMP_ATTR_GUID_INFO ),
  237. .handle = ib_sma_get_guid_info,
  238. },
  239. {
  240. .mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED,
  241. .mgmt_class_ignore = IB_SMP_CLASS_IGNORE,
  242. .class_version = IB_SMP_CLASS_VERSION,
  243. .method = IB_MGMT_METHOD_GET,
  244. .resp_method = IB_MGMT_METHOD_GET_RESP,
  245. .attr_id = htons ( IB_SMP_ATTR_PORT_INFO ),
  246. .handle = ib_sma_get_port_info,
  247. },
  248. {
  249. .mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED,
  250. .mgmt_class_ignore = IB_SMP_CLASS_IGNORE,
  251. .class_version = IB_SMP_CLASS_VERSION,
  252. .method = IB_MGMT_METHOD_SET,
  253. .resp_method = IB_MGMT_METHOD_GET_RESP,
  254. .attr_id = htons ( IB_SMP_ATTR_PORT_INFO ),
  255. .handle = ib_sma_set_port_info,
  256. },
  257. {
  258. .mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED,
  259. .mgmt_class_ignore = IB_SMP_CLASS_IGNORE,
  260. .class_version = IB_SMP_CLASS_VERSION,
  261. .method = IB_MGMT_METHOD_GET,
  262. .resp_method = IB_MGMT_METHOD_GET_RESP,
  263. .attr_id = htons ( IB_SMP_ATTR_PKEY_TABLE ),
  264. .handle = ib_sma_get_pkey_table,
  265. },
  266. {
  267. .mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED,
  268. .mgmt_class_ignore = IB_SMP_CLASS_IGNORE,
  269. .class_version = IB_SMP_CLASS_VERSION,
  270. .method = IB_MGMT_METHOD_SET,
  271. .resp_method = IB_MGMT_METHOD_GET_RESP,
  272. .attr_id = htons ( IB_SMP_ATTR_PKEY_TABLE ),
  273. .handle = ib_sma_set_pkey_table,
  274. },
  275. };
  276. /*****************************************************************************
  277. *
  278. * General management agent
  279. *
  280. *****************************************************************************
  281. */
  282. /**
  283. * Call attribute handler
  284. *
  285. * @v gma General management agent
  286. * @v mad MAD
  287. */
  288. static void ib_handle_mad ( struct ib_gma *gma, union ib_mad *mad ) {
  289. struct ib_mad_hdr *hdr = &mad->hdr;
  290. struct ib_gma_handler *handler;
  291. for_each_table_entry ( handler, IB_GMA_HANDLERS ) {
  292. if ( ( ( handler->mgmt_class & ~handler->mgmt_class_ignore ) ==
  293. ( hdr->mgmt_class & ~handler->mgmt_class_ignore ) ) &&
  294. ( handler->class_version == hdr->class_version ) &&
  295. ( handler->method == hdr->method ) &&
  296. ( handler->attr_id == hdr->attr_id ) ) {
  297. hdr->method = handler->resp_method;
  298. handler->handle ( gma, mad );
  299. return;
  300. }
  301. }
  302. hdr->method = IB_MGMT_METHOD_TRAP;
  303. hdr->status = htons ( IB_MGMT_STATUS_UNSUPPORTED_METHOD_ATTR );
  304. }
  305. /**
  306. * Complete GMA receive
  307. *
  308. *
  309. * @v ibdev Infiniband device
  310. * @v qp Queue pair
  311. * @v av Address vector
  312. * @v iobuf I/O buffer
  313. * @v rc Completion status code
  314. */
  315. static void ib_gma_complete_recv ( struct ib_device *ibdev,
  316. struct ib_queue_pair *qp,
  317. struct ib_address_vector *av,
  318. struct io_buffer *iobuf, int rc ) {
  319. struct ib_gma *gma = ib_qp_get_ownerdata ( qp );
  320. struct ib_mad_request *request;
  321. union ib_mad *mad;
  322. struct ib_mad_hdr *hdr;
  323. unsigned int hop_pointer;
  324. unsigned int hop_count;
  325. /* Ignore errors */
  326. if ( rc != 0 ) {
  327. DBGC ( gma, "GMA %p RX error: %s\n", gma, strerror ( rc ) );
  328. goto out;
  329. }
  330. /* Sanity checks */
  331. if ( iob_len ( iobuf ) != sizeof ( *mad ) ) {
  332. DBGC ( gma, "GMA %p RX bad size (%zd bytes)\n",
  333. gma, iob_len ( iobuf ) );
  334. DBGC_HDA ( gma, 0, iobuf->data, iob_len ( iobuf ) );
  335. goto out;
  336. }
  337. mad = iobuf->data;
  338. hdr = &mad->hdr;
  339. if ( hdr->base_version != IB_MGMT_BASE_VERSION ) {
  340. DBGC ( gma, "GMA %p unsupported base version %x\n",
  341. gma, hdr->base_version );
  342. DBGC_HDA ( gma, 0, mad, sizeof ( *mad ) );
  343. goto out;
  344. }
  345. DBGC ( gma, "GMA %p RX TID %08x%08x (%02x,%02x,%02x,%04x) status "
  346. "%04x\n", gma, ntohl ( hdr->tid[0] ), ntohl ( hdr->tid[1] ),
  347. hdr->mgmt_class, hdr->class_version, hdr->method,
  348. ntohs ( hdr->attr_id ), ntohs ( hdr->status ) );
  349. DBGC2_HDA ( gma, 0, mad, sizeof ( *mad ) );
  350. /* Dequeue request if applicable */
  351. list_for_each_entry ( request, &gma->requests, list ) {
  352. if ( memcmp ( &request->mad.hdr.tid, &hdr->tid,
  353. sizeof ( request->mad.hdr.tid ) ) == 0 ) {
  354. stop_timer ( &request->timer );
  355. list_del ( &request->list );
  356. free ( request );
  357. break;
  358. }
  359. }
  360. /* Handle MAD */
  361. ib_handle_mad ( gma, mad );
  362. /* Finish processing if we have no response to send */
  363. if ( ! hdr->method )
  364. goto out;
  365. DBGC ( gma, "GMA %p TX TID %08x%08x (%02x,%02x,%02x,%04x) status "
  366. "%04x\n", gma, ntohl ( hdr->tid[0] ), ntohl ( hdr->tid[1] ),
  367. hdr->mgmt_class, hdr->class_version, hdr->method,
  368. ntohs ( hdr->attr_id ), ntohs ( hdr->status ) );
  369. DBGC2_HDA ( gma, 0, mad, sizeof ( *mad ) );
  370. /* Set response fields for directed route SMPs */
  371. if ( hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ) {
  372. struct ib_mad_smp *smp = &mad->smp;
  373. hdr->status |= htons ( IB_SMP_STATUS_D_INBOUND );
  374. hop_pointer = smp->mad_hdr.class_specific.smp.hop_pointer;
  375. hop_count = smp->mad_hdr.class_specific.smp.hop_count;
  376. assert ( hop_count == hop_pointer );
  377. if ( hop_pointer < ( sizeof ( smp->return_path.hops ) /
  378. sizeof ( smp->return_path.hops[0] ) ) ) {
  379. smp->return_path.hops[hop_pointer] = ibdev->port;
  380. } else {
  381. DBGC ( gma, "GMA %p invalid hop pointer %d\n",
  382. gma, hop_pointer );
  383. goto out;
  384. }
  385. }
  386. /* Send MAD response, if applicable */
  387. if ( ( rc = ib_post_send ( ibdev, qp, av,
  388. iob_disown ( iobuf ) ) ) != 0 ) {
  389. DBGC ( gma, "GMA %p could not send MAD response: %s\n",
  390. gma, strerror ( rc ) );
  391. goto out;
  392. }
  393. out:
  394. free_iob ( iobuf );
  395. }
  396. /** GMA completion operations */
  397. static struct ib_completion_queue_operations ib_gma_completion_ops = {
  398. .complete_recv = ib_gma_complete_recv,
  399. };
  400. /**
  401. * Transmit MAD request
  402. *
  403. * @v gma General management agent
  404. * @v request MAD request
  405. * @ret rc Return status code
  406. */
  407. static int ib_gma_send ( struct ib_gma *gma, struct ib_mad_request *request ) {
  408. struct io_buffer *iobuf;
  409. int rc;
  410. DBGC ( gma, "GMA %p TX TID %08x%08x (%02x,%02x,%02x,%04x)\n",
  411. gma, ntohl ( request->mad.hdr.tid[0] ),
  412. ntohl ( request->mad.hdr.tid[1] ), request->mad.hdr.mgmt_class,
  413. request->mad.hdr.class_version, request->mad.hdr.method,
  414. ntohs ( request->mad.hdr.attr_id ) );
  415. DBGC2_HDA ( gma, 0, &request->mad, sizeof ( request->mad ) );
  416. /* Construct I/O buffer */
  417. iobuf = alloc_iob ( sizeof ( request->mad ) );
  418. if ( ! iobuf ) {
  419. DBGC ( gma, "GMA %p could not allocate buffer for TID "
  420. "%08x%08x\n", gma, ntohl ( request->mad.hdr.tid[0] ),
  421. ntohl ( request->mad.hdr.tid[1] ) );
  422. return -ENOMEM;
  423. }
  424. memcpy ( iob_put ( iobuf, sizeof ( request->mad ) ), &request->mad,
  425. sizeof ( request->mad ) );
  426. /* Send I/O buffer */
  427. if ( ( rc = ib_post_send ( gma->ibdev, gma->qp, &request->av,
  428. iobuf ) ) != 0 ) {
  429. DBGC ( gma, "GMA %p could not send TID %08x%08x: %s\n",
  430. gma, ntohl ( request->mad.hdr.tid[0] ),
  431. ntohl ( request->mad.hdr.tid[1] ), strerror ( rc ) );
  432. free_iob ( iobuf );
  433. return rc;
  434. }
  435. return 0;
  436. }
  437. /**
  438. * Handle MAD request timer expiry
  439. *
  440. * @v timer Retry timer
  441. * @v expired Failure indicator
  442. */
  443. static void ib_gma_timer_expired ( struct retry_timer *timer, int expired ) {
  444. struct ib_mad_request *request =
  445. container_of ( timer, struct ib_mad_request, timer );
  446. struct ib_gma *gma = request->gma;
  447. /* Abandon TID if we have tried too many times */
  448. if ( expired ) {
  449. DBGC ( gma, "GMA %p abandoning TID %08x%08x\n",
  450. gma, ntohl ( request->mad.hdr.tid[0] ),
  451. ntohl ( request->mad.hdr.tid[1] ) );
  452. list_del ( &request->list );
  453. free ( request );
  454. return;
  455. }
  456. /* Restart retransmission timer */
  457. start_timer ( timer );
  458. /* Resend request */
  459. ib_gma_send ( gma, request );
  460. }
  461. /**
  462. * Issue MAD request
  463. *
  464. * @v gma General management agent
  465. * @v mad MAD request
  466. * @v av Destination address, or NULL for SM
  467. * @v retry Request should be retried until a response arrives
  468. * @ret rc Return status code
  469. */
  470. int ib_gma_request ( struct ib_gma *gma, union ib_mad *mad,
  471. struct ib_address_vector *av, int retry ) {
  472. struct ib_device *ibdev = gma->ibdev;
  473. struct ib_mad_request *request;
  474. /* Allocate and initialise structure */
  475. request = zalloc ( sizeof ( *request ) );
  476. if ( ! request ) {
  477. DBGC ( gma, "GMA %p could not allocate MAD request\n", gma );
  478. return -ENOMEM;
  479. }
  480. request->gma = gma;
  481. request->timer.expired = ib_gma_timer_expired;
  482. /* Determine address vector */
  483. if ( av ) {
  484. memcpy ( &request->av, av, sizeof ( request->av ) );
  485. } else {
  486. request->av.lid = ibdev->sm_lid;
  487. request->av.sl = ibdev->sm_sl;
  488. request->av.qpn = IB_QPN_GMA;
  489. request->av.qkey = IB_QKEY_GMA;
  490. }
  491. /* Copy MAD body */
  492. memcpy ( &request->mad, mad, sizeof ( request->mad ) );
  493. /* Allocate TID */
  494. request->mad.hdr.tid[0] = htonl ( IB_GMA_TID_MAGIC );
  495. request->mad.hdr.tid[1] = htonl ( ++next_request_tid );
  496. /* Send initial request. Ignore errors; the retry timer will
  497. * take care of those we care about.
  498. */
  499. ib_gma_send ( gma, request );
  500. /* Add to list and start timer if applicable */
  501. if ( retry ) {
  502. list_add ( &request->list, &gma->requests );
  503. start_timer ( &request->timer );
  504. } else {
  505. free ( request );
  506. }
  507. return 0;
  508. }
  509. /**
  510. * Create GMA
  511. *
  512. * @v ibdev Infiniband device
  513. * @v type Queue pair type
  514. * @ret gma General management agent, or NULL
  515. */
  516. struct ib_gma * ib_create_gma ( struct ib_device *ibdev,
  517. enum ib_queue_pair_type type ) {
  518. struct ib_gma *gma;
  519. unsigned long qkey;
  520. /* Allocate and initialise fields */
  521. gma = zalloc ( sizeof ( *gma ) );
  522. if ( ! gma )
  523. goto err_alloc;
  524. gma->ibdev = ibdev;
  525. INIT_LIST_HEAD ( &gma->requests );
  526. /* Create completion queue */
  527. gma->cq = ib_create_cq ( ibdev, IB_GMA_NUM_CQES,
  528. &ib_gma_completion_ops );
  529. if ( ! gma->cq ) {
  530. DBGC ( gma, "GMA %p could not allocate completion queue\n",
  531. gma );
  532. goto err_create_cq;
  533. }
  534. /* Create queue pair */
  535. qkey = ( ( type == IB_QPT_SMA ) ? IB_QKEY_SMA : IB_QKEY_GMA );
  536. gma->qp = ib_create_qp ( ibdev, type, IB_GMA_NUM_SEND_WQES, gma->cq,
  537. IB_GMA_NUM_RECV_WQES, gma->cq, qkey );
  538. if ( ! gma->qp ) {
  539. DBGC ( gma, "GMA %p could not allocate queue pair\n", gma );
  540. goto err_create_qp;
  541. }
  542. ib_qp_set_ownerdata ( gma->qp, gma );
  543. DBGC ( gma, "GMA %p running on QPN %#lx\n", gma, gma->qp->qpn );
  544. /* Fill receive ring */
  545. ib_refill_recv ( ibdev, gma->qp );
  546. return gma;
  547. ib_destroy_qp ( ibdev, gma->qp );
  548. err_create_qp:
  549. ib_destroy_cq ( ibdev, gma->cq );
  550. err_create_cq:
  551. free ( gma );
  552. err_alloc:
  553. return NULL;
  554. }
  555. /**
  556. * Destroy GMA
  557. *
  558. * @v gma General management agent
  559. */
  560. void ib_destroy_gma ( struct ib_gma *gma ) {
  561. struct ib_device *ibdev = gma->ibdev;
  562. struct ib_mad_request *request;
  563. struct ib_mad_request *tmp;
  564. /* Flush any outstanding requests */
  565. list_for_each_entry_safe ( request, tmp, &gma->requests, list ) {
  566. stop_timer ( &request->timer );
  567. list_del ( &request->list );
  568. free ( request );
  569. }
  570. ib_destroy_qp ( ibdev, gma->qp );
  571. ib_destroy_cq ( ibdev, gma->cq );
  572. free ( gma );
  573. }