You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

flexboot_nodnic.c 47KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590
  1. /*
  2. * Copyright (C) 2015 Mellanox Technologies Ltd.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License as
  6. * published by the Free Software Foundation; either version 2 of the
  7. * License, or any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  17. * 02110-1301, USA.
  18. */
  19. FILE_LICENCE ( GPL2_OR_LATER );
  20. #include <stdio.h>
  21. #include <unistd.h>
  22. #include <errno.h>
  23. #include <ipxe/pci.h>
  24. #include <ipxe/malloc.h>
  25. #include <ipxe/umalloc.h>
  26. #include <ipxe/if_ether.h>
  27. #include <ipxe/ethernet.h>
  28. #include <ipxe/vlan.h>
  29. #include <ipxe/io.h>
  30. #include "flexboot_nodnic.h"
  31. #include "mlx_utils/include/public/mlx_types.h"
  32. #include "mlx_utils/include/public/mlx_utils.h"
  33. #include "mlx_utils/include/public/mlx_bail.h"
  34. #include "mlx_nodnic/include/mlx_cmd.h"
  35. #include "mlx_utils/include/public/mlx_memory.h"
  36. #include "mlx_utils/include/public/mlx_pci.h"
  37. #include "mlx_nodnic/include/mlx_device.h"
  38. #include "mlx_nodnic/include/mlx_port.h"
  39. #include <byteswap.h>
  40. #include <usr/ifmgmt.h>
  41. #include "mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig.h"
  42. #include "mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig_defaults.h"
  43. #include "mlx_utils/include/public/mlx_pci_gw.h"
  44. #include "mlx_utils/mlx_lib/mlx_vmac/mlx_vmac.h"
  45. #include "mlx_utils/mlx_lib/mlx_mtu/mlx_mtu.h"
  46. /***************************************************************************
  47. *
  48. * Completion queue operations
  49. *
  50. ***************************************************************************
  51. */
  52. static int flexboot_nodnic_arm_cq ( struct flexboot_nodnic_port *port ) {
  53. #ifndef DEVICE_CX3
  54. mlx_uint32 val32 = 0;
  55. union arm_cq_uar cq_uar;
  56. #define ARM_CQ_UAR_CQ_CI_MASK 0xffffff
  57. #define ARM_CQ_UAR_CMDSN_MASK 3
  58. #define ARM_CQ_UAR_CMDSN_OFFSET 28
  59. #define ARM_CQ_UAR_CQ_CI_OFFSET 0x20
  60. if ( port->port_priv.device->device_cap.support_bar_cq_ctrl ) {
  61. cq_uar.dword[0] = cpu_to_be32((port->eth_cq->next_idx & ARM_CQ_UAR_CQ_CI_MASK) |
  62. ((port->cmdsn++ & ARM_CQ_UAR_CMDSN_MASK) << ARM_CQ_UAR_CMDSN_OFFSET));
  63. cq_uar.dword[1] = cpu_to_be32(port->eth_cq->cqn);
  64. wmb();
  65. writeq(cq_uar.qword, port->port_priv.device->uar.virt + ARM_CQ_UAR_CQ_CI_OFFSET);
  66. port->port_priv.arm_cq_doorbell_record->dword[0] = cq_uar.dword[1];
  67. port->port_priv.arm_cq_doorbell_record->dword[1] = cq_uar.dword[0];
  68. } else {
  69. val32 = ( port->eth_cq->next_idx & 0xffffff );
  70. if ( nodnic_port_set ( & port->port_priv, nodnic_port_option_arm_cq, val32 ) ) {
  71. MLX_DEBUG_ERROR( port->port_priv.device, "Failed to arm the CQ\n" );
  72. return MLX_FAILED;
  73. }
  74. }
  75. #else
  76. mlx_utils *utils = port->port_priv.device->utils;
  77. nodnic_port_data_flow_gw *ptr = port->port_priv.data_flow_gw;
  78. mlx_uint32 data = 0;
  79. mlx_uint32 val = 0;
  80. if ( port->port_priv.device->device_cap.crspace_doorbells == 0 ) {
  81. val = ( port->eth_cq->next_idx & 0xffff );
  82. if ( nodnic_port_set ( & port->port_priv, nodnic_port_option_arm_cq, val ) ) {
  83. MLX_DEBUG_ERROR( port->port_priv.device, "Failed to arm the CQ\n" );
  84. return MLX_FAILED;
  85. }
  86. } else {
  87. /* Arming the CQ with CQ CI should be with this format -
  88. * 16 bit - CQ CI - same endianness as the FW (don't swap bytes)
  89. * 15 bit - reserved
  90. * 1 bit - arm CQ - must correct the endianness with the reserved above */
  91. data = ( ( ( port->eth_cq->next_idx & 0xffff ) << 16 ) | 0x0080 );
  92. /* Write the new index and update FW that new data was submitted */
  93. mlx_pci_mem_write ( utils, MlxPciWidthUint32, 0,
  94. ( mlx_uintn ) & ( ptr->armcq_cq_ci_dword ), 1, &data );
  95. }
  96. #endif
  97. return 0;
  98. }
  99. /**
  100. * Create completion queue
  101. *
  102. * @v ibdev Infiniband device
  103. * @v cq Completion queue
  104. * @ret rc Return status code
  105. */
  106. static int flexboot_nodnic_create_cq ( struct ib_device *ibdev ,
  107. struct ib_completion_queue *cq ) {
  108. struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
  109. struct flexboot_nodnic_port *port = &flexboot_nodnic->port[ibdev->port - 1];
  110. struct flexboot_nodnic_completion_queue *flexboot_nodnic_cq;
  111. mlx_status status = MLX_SUCCESS;
  112. mlx_uint32 cqn;
  113. flexboot_nodnic_cq = (struct flexboot_nodnic_completion_queue *)
  114. zalloc(sizeof(*flexboot_nodnic_cq));
  115. if ( flexboot_nodnic_cq == NULL ) {
  116. status = MLX_OUT_OF_RESOURCES;
  117. goto qp_alloc_err;
  118. }
  119. status = nodnic_port_create_cq(&port->port_priv,
  120. cq->num_cqes *
  121. flexboot_nodnic->callbacks->get_cqe_size(),
  122. &flexboot_nodnic_cq->nodnic_completion_queue
  123. );
  124. MLX_FATAL_CHECK_STATUS(status, create_err,
  125. "nodnic_port_create_cq failed");
  126. flexboot_nodnic->callbacks->cqe_set_owner(
  127. flexboot_nodnic_cq->nodnic_completion_queue->cq_virt,
  128. cq->num_cqes);
  129. if ( flexboot_nodnic->device_priv.device_cap.support_bar_cq_ctrl ) {
  130. status = nodnic_port_query(&port->port_priv,
  131. nodnic_port_option_cq_n_index,
  132. (mlx_uint32 *)&cqn );
  133. MLX_FATAL_CHECK_STATUS(status, read_cqn_err,
  134. "failed to query cqn");
  135. cq->cqn = cqn;
  136. }
  137. ib_cq_set_drvdata ( cq, flexboot_nodnic_cq );
  138. return status;
  139. read_cqn_err:
  140. create_err:
  141. free(flexboot_nodnic_cq);
  142. qp_alloc_err:
  143. return status;
  144. }
  145. /**
  146. * Destroy completion queue
  147. *
  148. * @v ibdev Infiniband device
  149. * @v cq Completion queue
  150. */
  151. static void flexboot_nodnic_destroy_cq ( struct ib_device *ibdev ,
  152. struct ib_completion_queue *cq ) {
  153. struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
  154. struct flexboot_nodnic_port *port = &flexboot_nodnic->port[ibdev->port - 1];
  155. struct flexboot_nodnic_completion_queue *flexboot_nodnic_cq = ib_cq_get_drvdata ( cq );
  156. nodnic_port_destroy_cq(&port->port_priv,
  157. flexboot_nodnic_cq->nodnic_completion_queue);
  158. free(flexboot_nodnic_cq);
  159. }
  160. static
  161. struct ib_work_queue * flexboot_nodnic_find_wq ( struct ib_device *ibdev ,
  162. struct ib_completion_queue *cq,
  163. unsigned long qpn, int is_send ) {
  164. struct ib_work_queue *wq;
  165. struct flexboot_nodnic_queue_pair *flexboot_nodnic_qp;
  166. struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
  167. struct flexboot_nodnic_port *port = &flexboot_nodnic->port[ibdev->port - 1];
  168. struct nodnic_ring *ring;
  169. mlx_uint32 out_qpn;
  170. list_for_each_entry ( wq, &cq->work_queues, list ) {
  171. flexboot_nodnic_qp = ib_qp_get_drvdata ( wq->qp );
  172. if( wq->is_send == is_send && wq->is_send == TRUE ) {
  173. ring = &flexboot_nodnic_qp->nodnic_queue_pair->send.nodnic_ring;
  174. } else if( wq->is_send == is_send && wq->is_send == FALSE ) {
  175. ring = &flexboot_nodnic_qp->nodnic_queue_pair->receive.nodnic_ring;
  176. } else {
  177. continue;
  178. }
  179. nodnic_port_get_qpn(&port->port_priv, ring, &out_qpn);
  180. if ( out_qpn == qpn )
  181. return wq;
  182. }
  183. return NULL;
  184. }
  185. /**
  186. * Handle completion
  187. *
  188. * @v ibdev Infiniband device
  189. * @v cq Completion queue
  190. * @v cqe Hardware completion queue entry
  191. * @ret rc Return status code
  192. */
  193. static int flexboot_nodnic_complete ( struct ib_device *ibdev,
  194. struct ib_completion_queue *cq,
  195. struct cqe_data *cqe_data ) {
  196. struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
  197. struct ib_work_queue *wq;
  198. struct ib_queue_pair *qp;
  199. struct io_buffer *iobuf;
  200. struct ib_address_vector recv_dest;
  201. struct ib_address_vector recv_source;
  202. unsigned long qpn;
  203. unsigned long wqe_idx;
  204. unsigned long wqe_idx_mask;
  205. size_t len;
  206. int rc = 0;
  207. /* Parse completion */
  208. qpn = cqe_data->qpn;
  209. if ( cqe_data->is_error == TRUE ) {
  210. DBGC ( flexboot_nodnic, "flexboot_nodnic %p CQN %#lx syndrome %x vendor %x\n",
  211. flexboot_nodnic, cq->cqn, cqe_data->syndrome,
  212. cqe_data->vendor_err_syndrome );
  213. rc = -EIO;
  214. /* Don't return immediately; propagate error to completer */
  215. }
  216. /* Identify work queue */
  217. wq = flexboot_nodnic_find_wq( ibdev, cq, qpn, cqe_data->is_send );
  218. if ( wq == NULL ) {
  219. DBGC ( flexboot_nodnic,
  220. "flexboot_nodnic %p CQN %#lx unknown %s QPN %#lx\n",
  221. flexboot_nodnic, cq->cqn,
  222. ( cqe_data->is_send ? "send" : "recv" ), qpn );
  223. return -EIO;
  224. }
  225. qp = wq->qp;
  226. /* Identify work queue entry */
  227. wqe_idx = cqe_data->wqe_counter;
  228. wqe_idx_mask = ( wq->num_wqes - 1 );
  229. DBGCP ( flexboot_nodnic,
  230. "NODNIC %p CQN %#lx QPN %#lx %s WQE %#lx completed:\n",
  231. flexboot_nodnic, cq->cqn, qp->qpn,
  232. ( cqe_data->is_send ? "send" : "recv" ),
  233. wqe_idx );
  234. /* Identify I/O buffer */
  235. iobuf = wq->iobufs[wqe_idx & wqe_idx_mask];
  236. if ( iobuf == NULL ) {
  237. DBGC ( flexboot_nodnic,
  238. "NODNIC %p CQN %#lx QPN %#lx empty %s WQE %#lx\n",
  239. flexboot_nodnic, cq->cqn, qp->qpn,
  240. ( cqe_data->is_send ? "send" : "recv" ), wqe_idx );
  241. return -EIO;
  242. }
  243. wq->iobufs[wqe_idx & wqe_idx_mask] = NULL;
  244. if ( cqe_data->is_send == TRUE ) {
  245. /* Hand off to completion handler */
  246. ib_complete_send ( ibdev, qp, iobuf, rc );
  247. } else if ( rc != 0 ) {
  248. /* Propagate error to receive completion handler */
  249. ib_complete_recv ( ibdev, qp, NULL, NULL, iobuf, rc );
  250. } else {
  251. /* Set received length */
  252. len = cqe_data->byte_cnt;
  253. assert ( len <= iob_tailroom ( iobuf ) );
  254. iob_put ( iobuf, len );
  255. memset ( &recv_dest, 0, sizeof ( recv_dest ) );
  256. recv_dest.qpn = qpn;
  257. memset ( &recv_source, 0, sizeof ( recv_source ) );
  258. switch ( qp->type ) {
  259. case IB_QPT_SMI:
  260. case IB_QPT_GSI:
  261. case IB_QPT_UD:
  262. case IB_QPT_RC:
  263. break;
  264. case IB_QPT_ETH:
  265. break;
  266. default:
  267. assert ( 0 );
  268. return -EINVAL;
  269. }
  270. /* Hand off to completion handler */
  271. ib_complete_recv ( ibdev, qp, &recv_dest,
  272. &recv_source, iobuf, rc );
  273. }
  274. return rc;
  275. }
  276. /**
  277. * Poll completion queue
  278. *
  279. * @v ibdev Infiniband device
  280. * @v cq Completion queues
  281. */
  282. static void flexboot_nodnic_poll_cq ( struct ib_device *ibdev,
  283. struct ib_completion_queue *cq) {
  284. struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
  285. struct flexboot_nodnic_completion_queue *flexboot_nodnic_cq = ib_cq_get_drvdata ( cq );
  286. void *cqe;
  287. mlx_size cqe_size;
  288. struct cqe_data cqe_data;
  289. unsigned int cqe_idx_mask;
  290. int rc;
  291. cqe_size = flexboot_nodnic->callbacks->get_cqe_size();
  292. while ( TRUE ) {
  293. /* Look for completion entry */
  294. cqe_idx_mask = ( cq->num_cqes - 1 );
  295. cqe = ((uint8_t *)flexboot_nodnic_cq->nodnic_completion_queue->cq_virt) +
  296. cqe_size * (cq->next_idx & cqe_idx_mask);
  297. /* TODO: check fill_completion */
  298. flexboot_nodnic->callbacks->fill_completion(cqe, &cqe_data);
  299. if ( cqe_data.owner ^
  300. ( ( cq->next_idx & cq->num_cqes ) ? 1 : 0 ) ) {
  301. /* Entry still owned by hardware; end of poll */
  302. break;
  303. }
  304. /* Handle completion */
  305. rc = flexboot_nodnic_complete ( ibdev, cq, &cqe_data );
  306. if ( rc != 0 ) {
  307. DBGC ( flexboot_nodnic, "flexboot_nodnic %p CQN %#lx failed to complete: %s\n",
  308. flexboot_nodnic, cq->cqn, strerror ( rc ) );
  309. DBGC_HDA ( flexboot_nodnic, virt_to_phys ( cqe ),
  310. cqe, sizeof ( *cqe ) );
  311. }
  312. /* Update completion queue's index */
  313. cq->next_idx++;
  314. }
  315. }
  316. /***************************************************************************
  317. *
  318. * Queue pair operations
  319. *
  320. ***************************************************************************
  321. */
  322. /**
  323. * Create queue pair
  324. *
  325. * @v ibdev Infiniband device
  326. * @v qp Queue pair
  327. * @ret rc Return status code
  328. */
  329. static int flexboot_nodnic_create_qp ( struct ib_device *ibdev,
  330. struct ib_queue_pair *qp ) {
  331. struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
  332. struct flexboot_nodnic_port *port = &flexboot_nodnic->port[ibdev->port - 1];
  333. struct flexboot_nodnic_queue_pair *flexboot_nodnic_qp;
  334. mlx_status status = MLX_SUCCESS;
  335. flexboot_nodnic_qp = (struct flexboot_nodnic_queue_pair *)zalloc(sizeof(*flexboot_nodnic_qp));
  336. if ( flexboot_nodnic_qp == NULL ) {
  337. status = MLX_OUT_OF_RESOURCES;
  338. goto qp_alloc_err;
  339. }
  340. status = nodnic_port_create_qp(&port->port_priv, qp->type,
  341. qp->send.num_wqes * sizeof(struct nodnic_send_wqbb),
  342. qp->send.num_wqes,
  343. qp->recv.num_wqes * sizeof(struct nodnic_recv_wqe),
  344. qp->recv.num_wqes,
  345. &flexboot_nodnic_qp->nodnic_queue_pair);
  346. MLX_FATAL_CHECK_STATUS(status, create_err,
  347. "nodnic_port_create_qp failed");
  348. ib_qp_set_drvdata ( qp, flexboot_nodnic_qp );
  349. return status;
  350. create_err:
  351. free(flexboot_nodnic_qp);
  352. qp_alloc_err:
  353. return status;
  354. }
  355. /**
  356. * Modify queue pair
  357. *
  358. * @v ibdev Infiniband device
  359. * @v qp Queue pair
  360. * @ret rc Return status code
  361. */
  362. static int flexboot_nodnic_modify_qp ( struct ib_device *ibdev __unused,
  363. struct ib_queue_pair *qp __unused) {
  364. /*not needed*/
  365. return 0;
  366. }
  367. /**
  368. * Destroy queue pair
  369. *
  370. * @v ibdev Infiniband device
  371. * @v qp Queue pair
  372. */
  373. static void flexboot_nodnic_destroy_qp ( struct ib_device *ibdev,
  374. struct ib_queue_pair *qp ) {
  375. struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
  376. struct flexboot_nodnic_port *port = &flexboot_nodnic->port[ibdev->port - 1];
  377. struct flexboot_nodnic_queue_pair *flexboot_nodnic_qp = ib_qp_get_drvdata ( qp );
  378. nodnic_port_destroy_qp(&port->port_priv, qp->type,
  379. flexboot_nodnic_qp->nodnic_queue_pair);
  380. free(flexboot_nodnic_qp);
  381. }
  382. /***************************************************************************
  383. *
  384. * Work request operations
  385. *
  386. ***************************************************************************
  387. */
  388. /**
  389. * Post send work queue entry
  390. *
  391. * @v ibdev Infiniband device
  392. * @v qp Queue pair
  393. * @v av Address vector
  394. * @v iobuf I/O buffer
  395. * @ret rc Return status code
  396. */
  397. static int flexboot_nodnic_post_send ( struct ib_device *ibdev,
  398. struct ib_queue_pair *qp,
  399. struct ib_address_vector *av,
  400. struct io_buffer *iobuf) {
  401. struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
  402. struct flexboot_nodnic_queue_pair *flexboot_nodnic_qp = ib_qp_get_drvdata ( qp );
  403. struct flexboot_nodnic_port *port = &flexboot_nodnic->port[ibdev->port - 1];
  404. struct ib_work_queue *wq = &qp->send;
  405. struct nodnic_send_wqbb *wqbb;
  406. nodnic_qp *nodnic_qp = flexboot_nodnic_qp->nodnic_queue_pair;
  407. struct nodnic_send_ring *send_ring = &nodnic_qp->send;
  408. mlx_status status = MLX_SUCCESS;
  409. unsigned int wqe_idx_mask;
  410. unsigned long wqe_idx;
  411. if ( ( port->port_priv.dma_state == FALSE ) ||
  412. ( port->port_priv.port_state & NODNIC_PORT_DISABLING_DMA ) ) {
  413. DBGC ( flexboot_nodnic, "flexboot_nodnic DMA disabled\n");
  414. status = -ENETDOWN;
  415. goto post_send_done;
  416. }
  417. /* Allocate work queue entry */
  418. wqe_idx = wq->next_idx;
  419. wqe_idx_mask = ( wq->num_wqes - 1 );
  420. if ( wq->iobufs[wqe_idx & wqe_idx_mask] ) {
  421. DBGC ( flexboot_nodnic, "flexboot_nodnic %p QPN %#lx send queue full\n",
  422. flexboot_nodnic, qp->qpn );
  423. status = -ENOBUFS;
  424. goto post_send_done;
  425. }
  426. wqbb = &send_ring->wqe_virt[wqe_idx & wqe_idx_mask];
  427. wq->iobufs[wqe_idx & wqe_idx_mask] = iobuf;
  428. assert ( flexboot_nodnic->callbacks->
  429. fill_send_wqe[qp->type] != NULL );
  430. status = flexboot_nodnic->callbacks->
  431. fill_send_wqe[qp->type] ( ibdev, qp, av, iobuf,
  432. wqbb, wqe_idx );
  433. if ( status != 0 ) {
  434. DBGC ( flexboot_nodnic, "flexboot_nodnic %p QPN %#lx fill send wqe failed\n",
  435. flexboot_nodnic, qp->qpn );
  436. goto post_send_done;
  437. }
  438. wq->next_idx++;
  439. status = port->port_priv.send_doorbell ( &port->port_priv,
  440. &send_ring->nodnic_ring, ( mlx_uint16 ) wq->next_idx );
  441. if ( flexboot_nodnic->callbacks->tx_uar_send_doorbell_fn ) {
  442. flexboot_nodnic->callbacks->tx_uar_send_doorbell_fn ( ibdev, wqbb );
  443. }
  444. if ( status != 0 ) {
  445. DBGC ( flexboot_nodnic, "flexboot_nodnic %p ring send doorbell failed\n", flexboot_nodnic );
  446. }
  447. post_send_done:
  448. return status;
  449. }
  450. /**
  451. * Post receive work queue entry
  452. *
  453. * @v ibdev Infiniband device
  454. * @v qp Queue pair
  455. * @v iobuf I/O buffer
  456. * @ret rc Return status code
  457. */
  458. static int flexboot_nodnic_post_recv ( struct ib_device *ibdev,
  459. struct ib_queue_pair *qp,
  460. struct io_buffer *iobuf ) {
  461. struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
  462. struct flexboot_nodnic_queue_pair *flexboot_nodnic_qp = ib_qp_get_drvdata ( qp );
  463. struct flexboot_nodnic_port *port = &flexboot_nodnic->port[ibdev->port - 1];
  464. struct ib_work_queue *wq = &qp->recv;
  465. nodnic_qp *nodnic_qp = flexboot_nodnic_qp->nodnic_queue_pair;
  466. struct nodnic_recv_ring *recv_ring = &nodnic_qp->receive;
  467. struct nodnic_recv_wqe *wqe;
  468. unsigned int wqe_idx_mask;
  469. mlx_status status = MLX_SUCCESS;
  470. /* Allocate work queue entry */
  471. wqe_idx_mask = ( wq->num_wqes - 1 );
  472. if ( wq->iobufs[wq->next_idx & wqe_idx_mask] ) {
  473. DBGC ( flexboot_nodnic,
  474. "flexboot_nodnic %p QPN %#lx receive queue full\n",
  475. flexboot_nodnic, qp->qpn );
  476. status = -ENOBUFS;
  477. goto post_recv_done;
  478. }
  479. wq->iobufs[wq->next_idx & wqe_idx_mask] = iobuf;
  480. wqe = &((struct nodnic_recv_wqe*)recv_ring->wqe_virt)[wq->next_idx & wqe_idx_mask];
  481. MLX_FILL_1 ( &wqe->data[0], 0, byte_count, iob_tailroom ( iobuf ) );
  482. MLX_FILL_1 ( &wqe->data[0], 1, l_key, flexboot_nodnic->device_priv.lkey );
  483. MLX_FILL_H ( &wqe->data[0], 2,
  484. local_address_h, virt_to_bus ( iobuf->data ) );
  485. MLX_FILL_1 ( &wqe->data[0], 3,
  486. local_address_l, virt_to_bus ( iobuf->data ) );
  487. wq->next_idx++;
  488. status = port->port_priv.recv_doorbell ( &port->port_priv,
  489. &recv_ring->nodnic_ring, ( mlx_uint16 ) wq->next_idx );
  490. if ( status != 0 ) {
  491. DBGC ( flexboot_nodnic, "flexboot_nodnic %p ring receive doorbell failed\n", flexboot_nodnic );
  492. }
  493. post_recv_done:
  494. return status;
  495. }
  496. /***************************************************************************
  497. *
  498. * Event queues
  499. *
  500. ***************************************************************************
  501. */
  502. static void flexboot_nodnic_poll_eq ( struct ib_device *ibdev ) {
  503. struct flexboot_nodnic *flexboot_nodnic;
  504. struct flexboot_nodnic_port *port;
  505. struct net_device *netdev;
  506. nodnic_port_state state = 0;
  507. mlx_status status;
  508. if ( ! ibdev ) {
  509. DBG ( "%s: ibdev = NULL!!!\n", __FUNCTION__ );
  510. return;
  511. }
  512. flexboot_nodnic = ib_get_drvdata ( ibdev );
  513. port = &flexboot_nodnic->port[ibdev->port - 1];
  514. netdev = port->netdev;
  515. if ( ! netdev_is_open ( netdev ) ) {
  516. DBG2( "%s: port %d is closed\n", __FUNCTION__, port->ibdev->port );
  517. return;
  518. }
  519. /* we don't poll EQ. Just poll link status if it's not active */
  520. if ( ! netdev_link_ok ( netdev ) ) {
  521. status = nodnic_port_get_state ( &port->port_priv, &state );
  522. MLX_FATAL_CHECK_STATUS(status, state_err, "nodnic_port_get_state failed");
  523. if ( state == nodnic_port_state_active ) {
  524. DBG( "%s: port %d physical link is up\n", __FUNCTION__,
  525. port->ibdev->port );
  526. port->type->state_change ( flexboot_nodnic, port, 1 );
  527. }
  528. }
  529. state_err:
  530. return;
  531. }
  532. /***************************************************************************
  533. *
  534. * Multicast group operations
  535. *
  536. ***************************************************************************
  537. */
  538. static int flexboot_nodnic_mcast_attach ( struct ib_device *ibdev,
  539. struct ib_queue_pair *qp,
  540. union ib_gid *gid) {
  541. struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
  542. struct flexboot_nodnic_port *port = &flexboot_nodnic->port[ibdev->port - 1];
  543. mlx_mac_address mac;
  544. mlx_status status = MLX_SUCCESS;
  545. switch (qp->type) {
  546. case IB_QPT_ETH:
  547. memcpy(&mac, gid, sizeof(mac));
  548. status = nodnic_port_add_mac_filter(&port->port_priv, mac);
  549. MLX_CHECK_STATUS(flexboot_nodnic->device_priv, status, mac_err,
  550. "nodnic_port_add_mac_filter failed");
  551. break;
  552. default:
  553. break;
  554. }
  555. mac_err:
  556. return status;
  557. }
  558. static void flexboot_nodnic_mcast_detach ( struct ib_device *ibdev,
  559. struct ib_queue_pair *qp,
  560. union ib_gid *gid ) {
  561. struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
  562. struct flexboot_nodnic_port *port = &flexboot_nodnic->port[ibdev->port - 1];
  563. mlx_mac_address mac;
  564. mlx_status status = MLX_SUCCESS;
  565. switch (qp->type) {
  566. case IB_QPT_ETH:
  567. memcpy(&mac, gid, sizeof(mac));
  568. status = nodnic_port_remove_mac_filter(&port->port_priv, mac);
  569. MLX_CHECK_STATUS(flexboot_nodnic->device_priv, status, mac_err,
  570. "nodnic_port_remove_mac_filter failed");
  571. break;
  572. default:
  573. break;
  574. }
  575. mac_err:
  576. return;
  577. }
  578. /***************************************************************************
  579. *
  580. * Infiniband link-layer operations
  581. *
  582. ***************************************************************************
  583. */
  584. /**
  585. * Initialise Infiniband link
  586. *
  587. * @v ibdev Infiniband device
  588. * @ret rc Return status code
  589. */
  590. static int flexboot_nodnic_ib_open ( struct ib_device *ibdev __unused) {
  591. int rc = 0;
  592. /*TODO: add implementation*/
  593. return rc;
  594. }
  595. /**
  596. * Close Infiniband link
  597. *
  598. * @v ibdev Infiniband device
  599. */
  600. static void flexboot_nodnic_ib_close ( struct ib_device *ibdev __unused) {
  601. /*TODO: add implementation*/
  602. }
  603. /**
  604. * Inform embedded subnet management agent of a received MAD
  605. *
  606. * @v ibdev Infiniband device
  607. * @v mad MAD
  608. * @ret rc Return status code
  609. */
  610. static int flexboot_nodnic_inform_sma ( struct ib_device *ibdev __unused,
  611. union ib_mad *mad __unused) {
  612. /*TODO: add implementation*/
  613. return 0;
  614. }
  615. /** flexboot_nodnic Infiniband operations */
  616. static struct ib_device_operations flexboot_nodnic_ib_operations = {
  617. .create_cq = flexboot_nodnic_create_cq,
  618. .destroy_cq = flexboot_nodnic_destroy_cq,
  619. .create_qp = flexboot_nodnic_create_qp,
  620. .modify_qp = flexboot_nodnic_modify_qp,
  621. .destroy_qp = flexboot_nodnic_destroy_qp,
  622. .post_send = flexboot_nodnic_post_send,
  623. .post_recv = flexboot_nodnic_post_recv,
  624. .poll_cq = flexboot_nodnic_poll_cq,
  625. .poll_eq = flexboot_nodnic_poll_eq,
  626. .open = flexboot_nodnic_ib_open,
  627. .close = flexboot_nodnic_ib_close,
  628. .mcast_attach = flexboot_nodnic_mcast_attach,
  629. .mcast_detach = flexboot_nodnic_mcast_detach,
  630. .set_port_info = flexboot_nodnic_inform_sma,
  631. .set_pkey_table = flexboot_nodnic_inform_sma,
  632. };
  633. /***************************************************************************
  634. *
  635. *
  636. *
  637. ***************************************************************************
  638. */
  639. #define FLEX_NODNIC_TX_POLL_TOUT 500000
  640. #define FLEX_NODNIC_TX_POLL_USLEEP 10
  641. static void flexboot_nodnic_complete_all_tx ( struct flexboot_nodnic_port *port ) {
  642. struct ib_device *ibdev = port->ibdev;
  643. struct ib_completion_queue *cq;
  644. struct ib_work_queue *wq;
  645. int keep_polling = 0;
  646. int timeout = FLEX_NODNIC_TX_POLL_TOUT;
  647. list_for_each_entry ( cq, &ibdev->cqs, list ) {
  648. do {
  649. ib_poll_cq ( ibdev, cq );
  650. keep_polling = 0;
  651. list_for_each_entry ( wq, &cq->work_queues, list ) {
  652. if ( wq->is_send )
  653. keep_polling += ( wq->fill > 0 );
  654. }
  655. udelay ( FLEX_NODNIC_TX_POLL_USLEEP );
  656. } while ( keep_polling && ( timeout-- > 0 ) );
  657. }
  658. }
  659. static void flexboot_nodnic_port_disable_dma ( struct flexboot_nodnic_port *port ) {
  660. nodnic_port_priv *port_priv = & ( port->port_priv );
  661. mlx_status status;
  662. if ( ! ( port_priv->port_state & NODNIC_PORT_OPENED ) )
  663. return;
  664. port_priv->port_state |= NODNIC_PORT_DISABLING_DMA;
  665. flexboot_nodnic_complete_all_tx ( port );
  666. if ( ( status = nodnic_port_disable_dma ( port_priv ) ) ) {
  667. MLX_DEBUG_WARN ( port, "Failed to disable DMA %d\n", status );
  668. }
  669. port_priv->port_state &= ~NODNIC_PORT_DISABLING_DMA;
  670. }
  671. /***************************************************************************
  672. *
  673. * Ethernet operation
  674. *
  675. ***************************************************************************
  676. */
  677. /** Number of flexboot_nodnic Ethernet send work queue entries */
  678. #define FLEXBOOT_NODNIC_ETH_NUM_SEND_WQES 64
  679. /** Number of flexboot_nodnic Ethernet receive work queue entries */
  680. #define FLEXBOOT_NODNIC_ETH_NUM_RECV_WQES 64
  681. /** flexboot nodnic Ethernet queue pair operations */
  682. static struct ib_queue_pair_operations flexboot_nodnic_eth_qp_op = {
  683. .alloc_iob = alloc_iob,
  684. };
  685. /**
  686. * Transmit packet via flexboot_nodnic Ethernet device
  687. *
  688. * @v netdev Network device
  689. * @v iobuf I/O buffer
  690. * @ret rc Return status code
  691. */
  692. static int flexboot_nodnic_eth_transmit ( struct net_device *netdev,
  693. struct io_buffer *iobuf) {
  694. struct flexboot_nodnic_port *port = netdev->priv;
  695. struct ib_device *ibdev = port->ibdev;
  696. struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
  697. int rc;
  698. rc = ib_post_send ( ibdev, port->eth_qp, NULL, iobuf);
  699. /* Transmit packet */
  700. if ( rc != 0) {
  701. DBGC ( flexboot_nodnic, "NODNIC %p port %d could not transmit: %s\n",
  702. flexboot_nodnic, ibdev->port, strerror ( rc ) );
  703. return rc;
  704. }
  705. return 0;
  706. }
  707. /**
  708. * Handle flexboot_nodnic Ethernet device send completion
  709. *
  710. * @v ibdev Infiniband device
  711. * @v qp Queue pair
  712. * @v iobuf I/O buffer
  713. * @v rc Completion status code
  714. */
  715. static void flexboot_nodnic_eth_complete_send ( struct ib_device *ibdev __unused,
  716. struct ib_queue_pair *qp,
  717. struct io_buffer *iobuf,
  718. int rc) {
  719. struct net_device *netdev = ib_qp_get_ownerdata ( qp );
  720. netdev_tx_complete_err ( netdev, iobuf, rc );
  721. }
  722. /**
  723. * Handle flexboot_nodnic Ethernet device receive completion
  724. *
  725. * @v ibdev Infiniband device
  726. * @v qp Queue pair
  727. * @v av Address vector, or NULL
  728. * @v iobuf I/O buffer
  729. * @v rc Completion status code
  730. */
  731. static void flexboot_nodnic_eth_complete_recv ( struct ib_device *ibdev __unused,
  732. struct ib_queue_pair *qp,
  733. struct ib_address_vector *dest __unused,
  734. struct ib_address_vector *source,
  735. struct io_buffer *iobuf,
  736. int rc) {
  737. struct net_device *netdev = ib_qp_get_ownerdata ( qp );
  738. if ( rc != 0 ) {
  739. DBG ( "Received packet with error\n" );
  740. netdev_rx_err ( netdev, iobuf, rc );
  741. return;
  742. }
  743. if ( source == NULL ) {
  744. DBG ( "Received packet without address vector\n" );
  745. netdev_rx_err ( netdev, iobuf, -ENOTTY );
  746. return;
  747. }
  748. netdev_rx ( netdev, iobuf );
  749. }
  750. /** flexboot_nodnic Ethernet device completion operations */
  751. static struct ib_completion_queue_operations flexboot_nodnic_eth_cq_op = {
  752. .complete_send = flexboot_nodnic_eth_complete_send,
  753. .complete_recv = flexboot_nodnic_eth_complete_recv,
  754. };
  755. /**
  756. * Poll flexboot_nodnic Ethernet device
  757. *
  758. * @v netdev Network device
  759. */
  760. static void flexboot_nodnic_eth_poll ( struct net_device *netdev) {
  761. struct flexboot_nodnic_port *port = netdev->priv;
  762. struct ib_device *ibdev = port->ibdev;
  763. ib_poll_eq ( ibdev );
  764. }
  765. /**
  766. * Open flexboot_nodnic Ethernet device
  767. *
  768. * @v netdev Network device
  769. * @ret rc Return status code
  770. */
  771. static int flexboot_nodnic_eth_open ( struct net_device *netdev ) {
  772. struct flexboot_nodnic_port *port = netdev->priv;
  773. struct ib_device *ibdev = port->ibdev;
  774. struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
  775. mlx_status status = MLX_SUCCESS;
  776. struct ib_completion_queue *dummy_cq = NULL;
  777. struct flexboot_nodnic_queue_pair *flexboot_nodnic_qp = NULL;
  778. mlx_uint64 cq_size = 0;
  779. mlx_uint32 qpn = 0;
  780. nodnic_port_state state = nodnic_port_state_down;
  781. int rc;
  782. if ( port->port_priv.port_state & NODNIC_PORT_OPENED ) {
  783. DBGC ( flexboot_nodnic, "%s: port %d is already opened\n",
  784. __FUNCTION__, port->ibdev->port );
  785. return 0;
  786. }
  787. port->port_priv.port_state |= NODNIC_PORT_OPENED;
  788. dummy_cq = zalloc ( sizeof ( struct ib_completion_queue ) );
  789. if ( dummy_cq == NULL ) {
  790. DBGC ( flexboot_nodnic, "%s: Failed to allocate dummy CQ\n", __FUNCTION__ );
  791. status = MLX_OUT_OF_RESOURCES;
  792. goto err_create_dummy_cq;
  793. }
  794. INIT_LIST_HEAD ( &dummy_cq->work_queues );
  795. if ( ( rc = ib_create_qp ( ibdev, IB_QPT_ETH,
  796. FLEXBOOT_NODNIC_ETH_NUM_SEND_WQES, dummy_cq,
  797. FLEXBOOT_NODNIC_ETH_NUM_RECV_WQES, dummy_cq,
  798. &flexboot_nodnic_eth_qp_op, netdev->name,
  799. &port->eth_qp ) ) != 0 ) {
  800. DBGC ( flexboot_nodnic, "flexboot_nodnic %p port %d could not create queue pair\n",
  801. flexboot_nodnic, ibdev->port );
  802. status = MLX_OUT_OF_RESOURCES;
  803. goto err_create_qp;
  804. }
  805. ib_qp_set_ownerdata ( port->eth_qp, netdev );
  806. status = nodnic_port_get_cq_size(&port->port_priv, &cq_size);
  807. MLX_FATAL_CHECK_STATUS(status, get_cq_size_err,
  808. "nodnic_port_get_cq_size failed");
  809. if ( ( rc = ib_create_cq ( ibdev, cq_size, &flexboot_nodnic_eth_cq_op,
  810. &port->eth_cq ) ) != 0 ) {
  811. DBGC ( flexboot_nodnic,
  812. "flexboot_nodnic %p port %d could not create completion queue\n",
  813. flexboot_nodnic, ibdev->port );
  814. status = MLX_OUT_OF_RESOURCES;
  815. goto err_create_cq;
  816. }
  817. port->eth_qp->send.cq = port->eth_cq;
  818. list_del(&port->eth_qp->send.list);
  819. list_add ( &port->eth_qp->send.list, &port->eth_cq->work_queues );
  820. port->eth_qp->recv.cq = port->eth_cq;
  821. port->cmdsn = 0;
  822. list_del(&port->eth_qp->recv.list);
  823. list_add ( &port->eth_qp->recv.list, &port->eth_cq->work_queues );
  824. status = nodnic_port_allocate_eq(&port->port_priv,
  825. flexboot_nodnic->device_priv.device_cap.log_working_buffer_size);
  826. MLX_FATAL_CHECK_STATUS(status, eq_alloc_err,
  827. "nodnic_port_allocate_eq failed");
  828. status = nodnic_port_init(&port->port_priv);
  829. MLX_FATAL_CHECK_STATUS(status, init_err,
  830. "nodnic_port_init failed");
  831. /* update qp - qpn */
  832. flexboot_nodnic_qp = ib_qp_get_drvdata ( port->eth_qp );
  833. status = nodnic_port_get_qpn(&port->port_priv,
  834. &flexboot_nodnic_qp->nodnic_queue_pair->send.nodnic_ring,
  835. &qpn);
  836. MLX_FATAL_CHECK_STATUS(status, qpn_err,
  837. "nodnic_port_get_qpn failed");
  838. port->eth_qp->qpn = qpn;
  839. /* Fill receive rings */
  840. ib_refill_recv ( ibdev, port->eth_qp );
  841. status = nodnic_port_enable_dma(&port->port_priv);
  842. MLX_FATAL_CHECK_STATUS(status, dma_err,
  843. "nodnic_port_enable_dma failed");
  844. if (flexboot_nodnic->device_priv.device_cap.support_promisc_filter) {
  845. status = nodnic_port_set_promisc(&port->port_priv, TRUE);
  846. MLX_FATAL_CHECK_STATUS(status, promisc_err,
  847. "nodnic_port_set_promisc failed");
  848. }
  849. status = nodnic_port_get_state(&port->port_priv, &state);
  850. MLX_FATAL_CHECK_STATUS(status, state_err,
  851. "nodnic_port_get_state failed");
  852. port->type->state_change (
  853. flexboot_nodnic, port, state == nodnic_port_state_active );
  854. DBGC ( flexboot_nodnic, "%s: port %d opened (link is %s)\n",
  855. __FUNCTION__, port->ibdev->port,
  856. ( ( state == nodnic_port_state_active ) ? "Up" : "Down" ) );
  857. free(dummy_cq);
  858. return 0;
  859. state_err:
  860. promisc_err:
  861. dma_err:
  862. qpn_err:
  863. nodnic_port_close(&port->port_priv);
  864. init_err:
  865. nodnic_port_free_eq(&port->port_priv);
  866. eq_alloc_err:
  867. err_create_cq:
  868. get_cq_size_err:
  869. ib_destroy_qp(ibdev, port->eth_qp );
  870. err_create_qp:
  871. free(dummy_cq);
  872. err_create_dummy_cq:
  873. port->port_priv.port_state &= ~NODNIC_PORT_OPENED;
  874. return status;
  875. }
  876. /**
  877. * Close flexboot_nodnic Ethernet device
  878. *
  879. * @v netdev Network device
  880. */
  881. static void flexboot_nodnic_eth_close ( struct net_device *netdev) {
  882. struct flexboot_nodnic_port *port = netdev->priv;
  883. struct ib_device *ibdev = port->ibdev;
  884. struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
  885. mlx_status status = MLX_SUCCESS;
  886. if ( ! ( port->port_priv.port_state & NODNIC_PORT_OPENED ) ) {
  887. DBGC ( flexboot_nodnic, "%s: port %d is already closed\n",
  888. __FUNCTION__, port->ibdev->port );
  889. return;
  890. }
  891. if (flexboot_nodnic->device_priv.device_cap.support_promisc_filter) {
  892. if ( ( status = nodnic_port_set_promisc( &port->port_priv, FALSE ) ) ) {
  893. DBGC ( flexboot_nodnic,
  894. "nodnic_port_set_promisc failed (status = %d)\n", status );
  895. }
  896. }
  897. flexboot_nodnic_port_disable_dma ( port );
  898. port->port_priv.port_state &= ~NODNIC_PORT_OPENED;
  899. port->type->state_change ( flexboot_nodnic, port, FALSE );
  900. /* Close port */
  901. status = nodnic_port_close(&port->port_priv);
  902. if ( status != MLX_SUCCESS ) {
  903. DBGC ( flexboot_nodnic, "flexboot_nodnic %p port %d could not close port: %s\n",
  904. flexboot_nodnic, ibdev->port, strerror ( status ) );
  905. /* Nothing we can do about this */
  906. }
  907. ib_destroy_qp ( ibdev, port->eth_qp );
  908. port->eth_qp = NULL;
  909. ib_destroy_cq ( ibdev, port->eth_cq );
  910. port->eth_cq = NULL;
  911. nodnic_port_free_eq(&port->port_priv);
  912. DBGC ( flexboot_nodnic, "%s: port %d closed\n", __FUNCTION__, port->ibdev->port );
  913. }
  914. void flexboot_nodnic_eth_irq ( struct net_device *netdev, int enable ) {
  915. struct flexboot_nodnic_port *port = netdev->priv;
  916. if ( enable ) {
  917. if ( ( port->port_priv.port_state & NODNIC_PORT_OPENED ) &&
  918. ! ( port->port_priv.port_state & NODNIC_PORT_DISABLING_DMA ) ) {
  919. flexboot_nodnic_arm_cq ( port );
  920. } else {
  921. /* do nothing */
  922. }
  923. } else {
  924. nodnic_device_clear_int( port->port_priv.device );
  925. }
  926. }
  927. /** flexboot_nodnic Ethernet network device operations */
  928. static struct net_device_operations flexboot_nodnic_eth_operations = {
  929. .open = flexboot_nodnic_eth_open,
  930. .close = flexboot_nodnic_eth_close,
  931. .transmit = flexboot_nodnic_eth_transmit,
  932. .poll = flexboot_nodnic_eth_poll,
  933. };
  934. /**
  935. * Register flexboot_nodnic Ethernet device
  936. */
  937. static int flexboot_nodnic_register_netdev ( struct flexboot_nodnic *flexboot_nodnic,
  938. struct flexboot_nodnic_port *port) {
  939. mlx_status status = MLX_SUCCESS;
  940. struct net_device *netdev;
  941. struct ib_device *ibdev = port->ibdev;
  942. union {
  943. uint8_t bytes[8];
  944. uint32_t dwords[2];
  945. } mac;
  946. /* Allocate network devices */
  947. netdev = alloc_etherdev ( 0 );
  948. if ( netdev == NULL ) {
  949. DBGC ( flexboot_nodnic, "flexboot_nodnic %p port %d could not allocate net device\n",
  950. flexboot_nodnic, ibdev->port );
  951. status = MLX_OUT_OF_RESOURCES;
  952. goto alloc_err;
  953. }
  954. port->netdev = netdev;
  955. netdev_init ( netdev, &flexboot_nodnic_eth_operations );
  956. netdev->dev = ibdev->dev;
  957. netdev->priv = port;
  958. status = nodnic_port_query(&port->port_priv,
  959. nodnic_port_option_mac_high,
  960. &mac.dwords[0]);
  961. MLX_FATAL_CHECK_STATUS(status, mac_err,
  962. "failed to query mac high");
  963. status = nodnic_port_query(&port->port_priv,
  964. nodnic_port_option_mac_low,
  965. &mac.dwords[1]);
  966. MLX_FATAL_CHECK_STATUS(status, mac_err,
  967. "failed to query mac low");
  968. mac.dwords[0] = htonl(mac.dwords[0]);
  969. mac.dwords[1] = htonl(mac.dwords[1]);
  970. memcpy ( netdev->hw_addr,
  971. &mac.bytes[2], ETH_ALEN);
  972. /* Register network device */
  973. status = register_netdev ( netdev );
  974. if ( status != MLX_SUCCESS ) {
  975. DBGC ( flexboot_nodnic,
  976. "flexboot_nodnic %p port %d could not register network device: %s\n",
  977. flexboot_nodnic, ibdev->port, strerror ( status ) );
  978. goto reg_err;
  979. }
  980. return status;
  981. reg_err:
  982. mac_err:
  983. netdev_put ( netdev );
  984. alloc_err:
  985. return status;
  986. }
  987. /**
  988. * Handle flexboot_nodnic Ethernet device port state change
  989. */
  990. static void flexboot_nodnic_state_change_netdev ( struct flexboot_nodnic *flexboot_nodnic __unused,
  991. struct flexboot_nodnic_port *port,
  992. int link_up ) {
  993. struct net_device *netdev = port->netdev;
  994. if ( link_up )
  995. netdev_link_up ( netdev );
  996. else
  997. netdev_link_down ( netdev );
  998. }
  999. /**
  1000. * Unregister flexboot_nodnic Ethernet device
  1001. */
  1002. static void flexboot_nodnic_unregister_netdev ( struct flexboot_nodnic *flexboot_nodnic __unused,
  1003. struct flexboot_nodnic_port *port ) {
  1004. struct net_device *netdev = port->netdev;
  1005. unregister_netdev ( netdev );
  1006. netdev_nullify ( netdev );
  1007. netdev_put ( netdev );
  1008. }
  1009. /** flexboot_nodnic Ethernet port type */
  1010. static struct flexboot_nodnic_port_type flexboot_nodnic_port_type_eth = {
  1011. .register_dev = flexboot_nodnic_register_netdev,
  1012. .state_change = flexboot_nodnic_state_change_netdev,
  1013. .unregister_dev = flexboot_nodnic_unregister_netdev,
  1014. };
  1015. /***************************************************************************
  1016. *
  1017. * PCI interface helper functions
  1018. *
  1019. ***************************************************************************
  1020. */
  1021. static
  1022. mlx_status
  1023. flexboot_nodnic_allocate_infiniband_devices( struct flexboot_nodnic *flexboot_nodnic_priv ) {
  1024. mlx_status status = MLX_SUCCESS;
  1025. nodnic_device_priv *device_priv = &flexboot_nodnic_priv->device_priv;
  1026. struct pci_device *pci = flexboot_nodnic_priv->pci;
  1027. struct ib_device *ibdev = NULL;
  1028. unsigned int i = 0;
  1029. /* Allocate Infiniband devices */
  1030. for (; i < device_priv->device_cap.num_ports; i++) {
  1031. if ( ! ( flexboot_nodnic_priv->port_mask & ( i + 1 ) ) )
  1032. continue;
  1033. ibdev = alloc_ibdev(0);
  1034. if (ibdev == NULL) {
  1035. status = MLX_OUT_OF_RESOURCES;
  1036. goto err_alloc_ibdev;
  1037. }
  1038. flexboot_nodnic_priv->port[i].ibdev = ibdev;
  1039. ibdev->op = &flexboot_nodnic_ib_operations;
  1040. ibdev->dev = &pci->dev;
  1041. ibdev->port = ( FLEXBOOT_NODNIC_PORT_BASE + i);
  1042. ib_set_drvdata(ibdev, flexboot_nodnic_priv);
  1043. }
  1044. return status;
  1045. err_alloc_ibdev:
  1046. for ( i-- ; ( signed int ) i >= 0 ; i-- )
  1047. ibdev_put ( flexboot_nodnic_priv->port[i].ibdev );
  1048. return status;
  1049. }
  1050. static
  1051. mlx_status
  1052. flexboot_nodnic_thin_init_ports( struct flexboot_nodnic *flexboot_nodnic_priv ) {
  1053. mlx_status status = MLX_SUCCESS;
  1054. nodnic_device_priv *device_priv = &flexboot_nodnic_priv->device_priv;
  1055. nodnic_port_priv *port_priv = NULL;
  1056. unsigned int i = 0;
  1057. for ( i = 0; i < device_priv->device_cap.num_ports; i++ ) {
  1058. if ( ! ( flexboot_nodnic_priv->port_mask & ( i + 1 ) ) )
  1059. continue;
  1060. port_priv = &flexboot_nodnic_priv->port[i].port_priv;
  1061. status = nodnic_port_thin_init( device_priv, port_priv, i );
  1062. MLX_FATAL_CHECK_STATUS(status, thin_init_err,
  1063. "flexboot_nodnic_thin_init_ports failed");
  1064. }
  1065. thin_init_err:
  1066. return status;
  1067. }
  1068. static
  1069. mlx_status
  1070. flexboot_nodnic_set_ports_type ( struct flexboot_nodnic *flexboot_nodnic_priv ) {
  1071. mlx_status status = MLX_SUCCESS;
  1072. nodnic_device_priv *device_priv = &flexboot_nodnic_priv->device_priv;
  1073. nodnic_port_priv *port_priv = NULL;
  1074. nodnic_port_type type = NODNIC_PORT_TYPE_UNKNOWN;
  1075. unsigned int i = 0;
  1076. for ( i = 0 ; i < device_priv->device_cap.num_ports ; i++ ) {
  1077. if ( ! ( flexboot_nodnic_priv->port_mask & ( i + 1 ) ) )
  1078. continue;
  1079. port_priv = &flexboot_nodnic_priv->port[i].port_priv;
  1080. status = nodnic_port_get_type(port_priv, &type);
  1081. MLX_FATAL_CHECK_STATUS(status, type_err,
  1082. "nodnic_port_get_type failed");
  1083. switch ( type ) {
  1084. case NODNIC_PORT_TYPE_ETH:
  1085. DBGC ( flexboot_nodnic_priv, "Port %d type is Ethernet\n", i );
  1086. flexboot_nodnic_priv->port[i].type = &flexboot_nodnic_port_type_eth;
  1087. break;
  1088. case NODNIC_PORT_TYPE_IB:
  1089. DBGC ( flexboot_nodnic_priv, "Port %d type is Infiniband\n", i );
  1090. status = MLX_UNSUPPORTED;
  1091. goto type_err;
  1092. default:
  1093. DBGC ( flexboot_nodnic_priv, "Port %d type is unknown\n", i );
  1094. status = MLX_UNSUPPORTED;
  1095. goto type_err;
  1096. }
  1097. }
  1098. type_err:
  1099. return status;
  1100. }
  1101. static
  1102. mlx_status
  1103. flexboot_nodnic_ports_register_dev( struct flexboot_nodnic *flexboot_nodnic_priv ) {
  1104. mlx_status status = MLX_SUCCESS;
  1105. nodnic_device_priv *device_priv = &flexboot_nodnic_priv->device_priv;
  1106. struct flexboot_nodnic_port *port = NULL;
  1107. unsigned int i = 0;
  1108. for (; i < device_priv->device_cap.num_ports; i++) {
  1109. if ( ! ( flexboot_nodnic_priv->port_mask & ( i + 1 ) ) )
  1110. continue;
  1111. port = &flexboot_nodnic_priv->port[i];
  1112. status = port->type->register_dev ( flexboot_nodnic_priv, port );
  1113. MLX_FATAL_CHECK_STATUS(status, reg_err,
  1114. "port register_dev failed");
  1115. }
  1116. reg_err:
  1117. return status;
  1118. }
  1119. static
  1120. mlx_status
  1121. flexboot_nodnic_ports_unregister_dev ( struct flexboot_nodnic *flexboot_nodnic_priv ) {
  1122. struct flexboot_nodnic_port *port;
  1123. nodnic_device_priv *device_priv = &flexboot_nodnic_priv->device_priv;
  1124. int i = (device_priv->device_cap.num_ports - 1);
  1125. for (; i >= 0; i--) {
  1126. if ( ! ( flexboot_nodnic_priv->port_mask & ( i + 1 ) ) )
  1127. continue;
  1128. port = &flexboot_nodnic_priv->port[i];
  1129. port->type->unregister_dev(flexboot_nodnic_priv, port);
  1130. ibdev_put(flexboot_nodnic_priv->port[i].ibdev);
  1131. }
  1132. return MLX_SUCCESS;
  1133. }
  1134. /***************************************************************************
  1135. *
  1136. * flexboot nodnic interface
  1137. *
  1138. ***************************************************************************
  1139. */
  1140. __unused static void flexboot_nodnic_enable_dma ( struct flexboot_nodnic *nodnic ) {
  1141. nodnic_port_priv *port_priv;
  1142. mlx_status status;
  1143. int i;
  1144. for ( i = 0; i < nodnic->device_priv.device_cap.num_ports; i++ ) {
  1145. if ( ! ( nodnic->port_mask & ( i + 1 ) ) )
  1146. continue;
  1147. port_priv = & ( nodnic->port[i].port_priv );
  1148. if ( ! ( port_priv->port_state & NODNIC_PORT_OPENED ) )
  1149. continue;
  1150. if ( ( status = nodnic_port_enable_dma ( port_priv ) ) ) {
  1151. MLX_DEBUG_WARN ( nodnic, "Failed to enable DMA %d\n", status );
  1152. }
  1153. }
  1154. }
  1155. __unused static void flexboot_nodnic_disable_dma ( struct flexboot_nodnic *nodnic ) {
  1156. int i;
  1157. for ( i = 0; i < nodnic->device_priv.device_cap.num_ports; i++ ) {
  1158. if ( ! ( nodnic->port_mask & ( i + 1 ) ) )
  1159. continue;
  1160. flexboot_nodnic_port_disable_dma ( & ( nodnic->port[i] ) );
  1161. }
  1162. }
  1163. int flexboot_nodnic_is_supported ( struct pci_device *pci ) {
  1164. mlx_utils utils;
  1165. mlx_pci_gw_buffer buffer;
  1166. mlx_status status;
  1167. int is_supported = 0;
  1168. DBG ( "%s: start\n", __FUNCTION__ );
  1169. memset ( &utils, 0, sizeof ( utils ) );
  1170. status = mlx_utils_init ( &utils, pci );
  1171. MLX_CHECK_STATUS ( pci, status, utils_init_err, "mlx_utils_init failed" );
  1172. status = mlx_pci_gw_init ( &utils );
  1173. MLX_CHECK_STATUS ( pci, status, pci_gw_init_err, "mlx_pci_gw_init failed" );
  1174. status = mlx_pci_gw_read ( &utils, PCI_GW_SPACE_NODNIC,
  1175. NODNIC_NIC_INTERFACE_SUPPORTED_OFFSET, &buffer );
  1176. if ( status == MLX_SUCCESS ) {
  1177. buffer >>= NODNIC_NIC_INTERFACE_SUPPORTED_BIT;
  1178. is_supported = ( buffer & 0x1 );
  1179. }
  1180. mlx_pci_gw_teardown( &utils );
  1181. pci_gw_init_err:
  1182. mlx_utils_teardown(&utils);
  1183. utils_init_err:
  1184. DBG ( "%s: NODNIC is %s supported (status = %d)\n",
  1185. __FUNCTION__, ( is_supported ? "": "not" ), status );
  1186. return is_supported;
  1187. }
  1188. void flexboot_nodnic_copy_mac ( uint8_t mac_addr[], uint32_t low_byte,
  1189. uint16_t high_byte ) {
  1190. union mac_addr {
  1191. struct {
  1192. uint32_t low_byte;
  1193. uint16_t high_byte;
  1194. };
  1195. uint8_t mac_addr[ETH_ALEN];
  1196. } mac_addr_aux;
  1197. mac_addr_aux.high_byte = high_byte;
  1198. mac_addr_aux.low_byte = low_byte;
  1199. mac_addr[0] = mac_addr_aux.mac_addr[5];
  1200. mac_addr[1] = mac_addr_aux.mac_addr[4];
  1201. mac_addr[2] = mac_addr_aux.mac_addr[3];
  1202. mac_addr[3] = mac_addr_aux.mac_addr[2];
  1203. mac_addr[4] = mac_addr_aux.mac_addr[1];
  1204. mac_addr[5] = mac_addr_aux.mac_addr[0];
  1205. }
  1206. static mlx_status flexboot_nodnic_get_factory_mac (
  1207. struct flexboot_nodnic *flexboot_nodnic_priv, uint8_t port __unused ) {
  1208. struct mlx_vmac_query_virt_mac virt_mac;
  1209. mlx_status status;
  1210. memset ( & virt_mac, 0, sizeof ( virt_mac ) );
  1211. status = mlx_vmac_query_virt_mac ( flexboot_nodnic_priv->device_priv.utils,
  1212. &virt_mac );
  1213. if ( ! status ) {
  1214. DBGC ( flexboot_nodnic_priv, "NODNIC %p Failed to set the virtual MAC\n"
  1215. ,flexboot_nodnic_priv );
  1216. }
  1217. return status;
  1218. }
  1219. /**
  1220. * Set port masking
  1221. *
  1222. * @v flexboot_nodnic nodnic device
  1223. * @ret rc Return status code
  1224. */
  1225. static int flexboot_nodnic_set_port_masking ( struct flexboot_nodnic *flexboot_nodnic ) {
  1226. unsigned int i;
  1227. nodnic_device_priv *device_priv = &flexboot_nodnic->device_priv;
  1228. flexboot_nodnic->port_mask = 0;
  1229. for ( i = 0; i < device_priv->device_cap.num_ports; i++ ) {
  1230. flexboot_nodnic->port_mask |= (i + 1);
  1231. }
  1232. if ( ! flexboot_nodnic->port_mask ) {
  1233. /* No port was enabled */
  1234. DBGC ( flexboot_nodnic, "NODNIC %p No port was enabled for "
  1235. "booting\n", flexboot_nodnic );
  1236. return -ENETUNREACH;
  1237. }
  1238. return 0;
  1239. }
  1240. int init_mlx_utils ( mlx_utils **utils, struct pci_device *pci ) {
  1241. int rc = 0;
  1242. *utils = ( mlx_utils * ) zalloc ( sizeof ( mlx_utils ) );
  1243. if ( *utils == NULL ) {
  1244. DBGC ( utils, "%s: Failed to allocate utils\n", __FUNCTION__ );
  1245. rc = -1;
  1246. goto err_utils_alloc;
  1247. }
  1248. if ( mlx_utils_init ( *utils, pci ) ) {
  1249. DBGC ( utils, "%s: mlx_utils_init failed\n", __FUNCTION__ );
  1250. rc = -1;
  1251. goto err_utils_init;
  1252. }
  1253. if ( mlx_pci_gw_init ( *utils ) ){
  1254. DBGC ( utils, "%s: mlx_pci_gw_init failed\n", __FUNCTION__ );
  1255. rc = -1;
  1256. goto err_cmd_init;
  1257. }
  1258. return 0;
  1259. mlx_pci_gw_teardown ( *utils );
  1260. err_cmd_init:
  1261. mlx_utils_teardown ( *utils );
  1262. err_utils_init:
  1263. free ( *utils );
  1264. err_utils_alloc:
  1265. *utils = NULL;
  1266. return rc;
  1267. }
  1268. void free_mlx_utils ( mlx_utils **utils ) {
  1269. mlx_pci_gw_teardown ( *utils );
  1270. mlx_utils_teardown ( *utils );
  1271. free ( *utils );
  1272. *utils = NULL;
  1273. }
  1274. /**
  1275. * Initialise Nodnic PCI parameters
  1276. *
  1277. * @v hermon Nodnic device
  1278. */
  1279. static int flexboot_nodnic_alloc_uar ( struct flexboot_nodnic *flexboot_nodnic ) {
  1280. mlx_status status = MLX_SUCCESS;
  1281. struct pci_device *pci = flexboot_nodnic->pci;
  1282. nodnic_uar *uar = &flexboot_nodnic->port[0].port_priv.device->uar;
  1283. if ( ! flexboot_nodnic->device_priv.device_cap.support_uar_tx_db ) {
  1284. DBGC ( flexboot_nodnic, "%s: tx db using uar is not supported \n", __FUNCTION__ );
  1285. return -ENOTSUP;
  1286. }
  1287. /* read uar offset then allocate */
  1288. if ( ( status = nodnic_port_set_send_uar_offset ( &flexboot_nodnic->port[0].port_priv ) ) ) {
  1289. DBGC ( flexboot_nodnic, "%s: nodnic_port_set_send_uar_offset failed,"
  1290. "status = %d\n", __FUNCTION__, status );
  1291. return -EINVAL;
  1292. }
  1293. uar->phys = ( pci_bar_start ( pci, FLEXBOOT_NODNIC_HCA_BAR ) + (mlx_uint32)uar->offset );
  1294. uar->virt = ( void * )( ioremap ( uar->phys, FLEXBOOT_NODNIC_PAGE_SIZE ) );
  1295. return status;
  1296. }
  1297. static int flexboot_nodnic_dealloc_uar ( struct flexboot_nodnic *flexboot_nodnic ) {
  1298. nodnic_uar *uar = &flexboot_nodnic->port[0].port_priv.device->uar;
  1299. if ( uar->virt ) {
  1300. iounmap( uar->virt );
  1301. uar->virt = NULL;
  1302. }
  1303. return MLX_SUCCESS;
  1304. }
  1305. int flexboot_nodnic_probe ( struct pci_device *pci,
  1306. struct flexboot_nodnic_callbacks *callbacks,
  1307. void *drv_priv __unused ) {
  1308. mlx_status status = MLX_SUCCESS;
  1309. struct flexboot_nodnic *flexboot_nodnic_priv = NULL;
  1310. nodnic_device_priv *device_priv = NULL;
  1311. int i = 0;
  1312. if ( ( pci == NULL ) || ( callbacks == NULL ) ) {
  1313. DBGC ( flexboot_nodnic_priv, "%s: Bad Parameter\n", __FUNCTION__ );
  1314. return -EINVAL;
  1315. }
  1316. flexboot_nodnic_priv = zalloc( sizeof ( *flexboot_nodnic_priv ) );
  1317. if ( flexboot_nodnic_priv == NULL ) {
  1318. DBGC ( flexboot_nodnic_priv, "%s: Failed to allocate priv data\n", __FUNCTION__ );
  1319. status = MLX_OUT_OF_RESOURCES;
  1320. goto device_err_alloc;
  1321. }
  1322. /* Register settings
  1323. * Note that pci->priv will be the device private data */
  1324. flexboot_nodnic_priv->pci = pci;
  1325. flexboot_nodnic_priv->callbacks = callbacks;
  1326. pci_set_drvdata ( pci, flexboot_nodnic_priv );
  1327. device_priv = &flexboot_nodnic_priv->device_priv;
  1328. /* init mlx utils */
  1329. status = init_mlx_utils ( & device_priv->utils, pci );
  1330. MLX_FATAL_CHECK_STATUS(status, err_utils_init,
  1331. "init_mlx_utils failed");
  1332. /* init device */
  1333. status = nodnic_device_init( device_priv );
  1334. MLX_FATAL_CHECK_STATUS(status, device_init_err,
  1335. "nodnic_device_init failed");
  1336. status = nodnic_device_get_cap( device_priv );
  1337. MLX_FATAL_CHECK_STATUS(status, get_cap_err,
  1338. "nodnic_device_get_cap failed");
  1339. if ( mlx_set_admin_mtu ( device_priv->utils, 1, EN_DEFAULT_ADMIN_MTU ) ) {
  1340. MLX_DEBUG_ERROR( device_priv->utils, "Failed to set admin mtu\n" );
  1341. }
  1342. status = flexboot_nodnic_set_port_masking ( flexboot_nodnic_priv );
  1343. MLX_FATAL_CHECK_STATUS(status, err_set_masking,
  1344. "flexboot_nodnic_set_port_masking failed");
  1345. status = flexboot_nodnic_allocate_infiniband_devices( flexboot_nodnic_priv );
  1346. MLX_FATAL_CHECK_STATUS(status, err_alloc_ibdev,
  1347. "flexboot_nodnic_allocate_infiniband_devices failed");
  1348. /* port init */
  1349. status = flexboot_nodnic_thin_init_ports( flexboot_nodnic_priv );
  1350. MLX_FATAL_CHECK_STATUS(status, err_thin_init_ports,
  1351. "flexboot_nodnic_thin_init_ports failed");
  1352. if ( ( status = flexboot_nodnic_alloc_uar ( flexboot_nodnic_priv ) ) ) {
  1353. DBGC(flexboot_nodnic_priv, "%s: flexboot_nodnic_alloc_uar failed"
  1354. " ( status = %d )\n",__FUNCTION__, status );
  1355. }
  1356. /* device reg */
  1357. status = flexboot_nodnic_set_ports_type( flexboot_nodnic_priv );
  1358. MLX_CHECK_STATUS( flexboot_nodnic_priv, status, err_set_ports_types,
  1359. "flexboot_nodnic_set_ports_type failed");
  1360. status = flexboot_nodnic_ports_register_dev( flexboot_nodnic_priv );
  1361. MLX_FATAL_CHECK_STATUS(status, reg_err,
  1362. "flexboot_nodnic_ports_register_dev failed");
  1363. for ( i = 0; i < device_priv->device_cap.num_ports; i++ ) {
  1364. if ( ! ( flexboot_nodnic_priv->port_mask & ( i + 1 ) ) )
  1365. continue;
  1366. flexboot_nodnic_get_factory_mac ( flexboot_nodnic_priv, i );
  1367. }
  1368. /* Update ETH operations with IRQ function if supported */
  1369. DBGC ( flexboot_nodnic_priv, "%s: %s IRQ function\n",
  1370. __FUNCTION__, ( callbacks->irq ? "Valid" : "No" ) );
  1371. flexboot_nodnic_eth_operations.irq = callbacks->irq;
  1372. return 0;
  1373. flexboot_nodnic_ports_unregister_dev ( flexboot_nodnic_priv );
  1374. reg_err:
  1375. err_set_ports_types:
  1376. flexboot_nodnic_dealloc_uar ( flexboot_nodnic_priv );
  1377. err_thin_init_ports:
  1378. err_alloc_ibdev:
  1379. err_set_masking:
  1380. get_cap_err:
  1381. nodnic_device_teardown ( device_priv );
  1382. device_init_err:
  1383. free_mlx_utils ( & device_priv->utils );
  1384. err_utils_init:
  1385. free ( flexboot_nodnic_priv );
  1386. device_err_alloc:
  1387. return status;
  1388. }
  1389. void flexboot_nodnic_remove ( struct pci_device *pci )
  1390. {
  1391. struct flexboot_nodnic *flexboot_nodnic_priv = pci_get_drvdata ( pci );
  1392. nodnic_device_priv *device_priv = & ( flexboot_nodnic_priv->device_priv );
  1393. flexboot_nodnic_dealloc_uar ( flexboot_nodnic_priv );
  1394. flexboot_nodnic_ports_unregister_dev ( flexboot_nodnic_priv );
  1395. nodnic_device_teardown( device_priv );
  1396. free_mlx_utils ( & device_priv->utils );
  1397. free( flexboot_nodnic_priv );
  1398. }