You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

vxge_main.c 18KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718
  1. /*
  2. * vxge-main.c: iPXE driver for Neterion Inc's X3100 Series 10GbE
  3. * PCIe I/O Virtualized Server Adapter.
  4. *
  5. * Copyright(c) 2002-2010 Neterion Inc.
  6. *
  7. * This software may be used and distributed according to the terms of
  8. * the GNU General Public License (GPL), incorporated herein by
  9. * reference. Drivers based on or derived from this code fall under
  10. * the GPL and must retain the authorship, copyright and license
  11. * notice.
  12. *
  13. */
  14. FILE_LICENCE(GPL2_ONLY);
  15. #include <stdlib.h>
  16. #include <stdio.h>
  17. #include <string.h>
  18. #include <ipxe/io.h>
  19. #include <errno.h>
  20. #include <byteswap.h>
  21. #include <ipxe/pci.h>
  22. #include <ipxe/malloc.h>
  23. #include <ipxe/if_ether.h>
  24. #include <ipxe/ethernet.h>
  25. #include <ipxe/iobuf.h>
  26. #include <ipxe/netdevice.h>
  27. #include <ipxe/timer.h>
  28. #include <nic.h>
  29. #include "vxge_main.h"
  30. #include "vxge_reg.h"
  31. /* function modes strings */
  32. static char *vxge_func_mode_names[] = {
  33. "Single Function - 1 func, 17 vpath",
  34. "Multi Function 8 - 8 func, 2 vpath per func",
  35. "SRIOV 17 - 17 VF, 1 vpath per VF",
  36. "WLPEX/SharedIO 17 - 17 VH, 1 vpath/func/hierarchy",
  37. "WLPEX/SharedIO 8 - 8 VH, 2 vpath/func/hierarchy",
  38. "Multi Function 17 - 17 func, 1 vpath per func",
  39. "SRIOV 8 - 1 PF, 7 VF, 2 vpath per VF",
  40. "SRIOV 4 - 1 PF, 3 VF, 4 vpath per VF",
  41. "Multi Function 2 - 2 func, 8 vpath per func",
  42. "Multi Function 4 - 4 func, 4 vpath per func",
  43. "WLPEX/SharedIO 4 - 17 func, 1 vpath per func (PCIe ARI)",
  44. "Multi Function 8 - For ESX DirectIO - 8 func, 2 vpath per func",
  45. };
  46. static inline int is_vxge_card_up(struct vxgedev *vdev)
  47. {
  48. return test_bit(__VXGE_STATE_CARD_UP, vdev->state);
  49. }
  50. /*
  51. * vxge_xmit_compl
  52. *
  53. * If an interrupt was raised to indicate DMA complete of the Tx packet,
  54. * this function is called. It identifies the last TxD whose buffer was
  55. * freed and frees all skbs whose data have already DMA'ed into the NICs
  56. * internal memory.
  57. */
  58. enum vxge_hw_status
  59. vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw,
  60. struct vxge_hw_fifo_txd *txdp, enum vxge_hw_fifo_tcode tcode)
  61. {
  62. struct net_device *netdev;
  63. struct io_buffer *tx_iob = NULL;
  64. vxge_trace();
  65. netdev = fifo_hw->vpathh->hldev->ndev;
  66. tx_iob = (struct io_buffer *)(intptr_t)txdp->host_control;
  67. if (tcode == VXGE_HW_FIFO_T_CODE_OK) {
  68. netdev_tx_complete(netdev, tx_iob);
  69. } else {
  70. netdev_tx_complete_err(netdev, tx_iob, -EINVAL);
  71. vxge_debug(VXGE_ERR, "%s: transmit failed, tcode %d\n",
  72. netdev->name, tcode);
  73. }
  74. memset(txdp, 0, sizeof(struct vxge_hw_fifo_txd));
  75. return VXGE_HW_OK;
  76. }
  77. /* reset vpaths */
  78. enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
  79. {
  80. enum vxge_hw_status status = VXGE_HW_OK;
  81. struct __vxge_hw_virtualpath *vpath;
  82. vxge_trace();
  83. vpath = vdev->vpath.vpathh;
  84. if (vpath) {
  85. if ((status = vxge_hw_vpath_reset(vpath)) == VXGE_HW_OK) {
  86. if (is_vxge_card_up(vdev) &&
  87. (status = vxge_hw_vpath_recover_from_reset(
  88. vpath)) != VXGE_HW_OK) {
  89. vxge_debug(VXGE_ERR, "vxge_hw_vpath_recover_"
  90. "from_reset failed\n");
  91. return status;
  92. } else {
  93. status = __vxge_hw_vpath_reset_check(vpath);
  94. if (status != VXGE_HW_OK) {
  95. vxge_debug(VXGE_ERR,
  96. "__vxge_hw_vpath_reset_check error\n");
  97. return status;
  98. }
  99. }
  100. } else {
  101. vxge_debug(VXGE_ERR, "vxge_hw_vpath_reset failed\n");
  102. return status;
  103. }
  104. }
  105. return status;
  106. }
  107. /* close vpaths */
  108. void vxge_close_vpaths(struct vxgedev *vdev)
  109. {
  110. if (vdev->vpath.vpathh && vdev->vpath.is_open)
  111. vxge_hw_vpath_close(vdev->vpath.vpathh);
  112. vdev->vpath.is_open = 0;
  113. vdev->vpath.vpathh = NULL;
  114. }
  115. /* open vpaths */
  116. int vxge_open_vpaths(struct vxgedev *vdev)
  117. {
  118. enum vxge_hw_status status;
  119. struct __vxge_hw_device *hldev;
  120. hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
  121. vdev->vpath.vpathh = &hldev->virtual_path;
  122. vdev->vpath.fifo.ndev = vdev->ndev;
  123. vdev->vpath.fifo.pdev = vdev->pdev;
  124. vdev->vpath.fifo.fifoh = &hldev->virtual_path.fifoh;
  125. vdev->vpath.ring.ndev = vdev->ndev;
  126. vdev->vpath.ring.pdev = vdev->pdev;
  127. vdev->vpath.ring.ringh = &hldev->virtual_path.ringh;
  128. status = vxge_hw_vpath_open(vdev->devh, &vdev->vpath);
  129. if (status == VXGE_HW_OK) {
  130. vdev->vpath.is_open = 1;
  131. } else {
  132. vxge_debug(VXGE_ERR,
  133. "%s: vpath: %d failed to open "
  134. "with status: %d\n",
  135. vdev->ndev->name, vdev->vpath.device_id,
  136. status);
  137. vxge_close_vpaths(vdev);
  138. return status;
  139. }
  140. hldev->vpaths_deployed |= vxge_mBIT(vdev->vpath.vpathh->vp_id);
  141. return VXGE_HW_OK;
  142. }
  143. /** Functions that implement the iPXE driver API **/
  144. /**
  145. * vxge_xmit
  146. * @skb : the socket buffer containing the Tx data.
  147. * @dev : device pointer.
  148. *
  149. * This function is the Tx entry point of the driver. Neterion NIC supports
  150. * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
  151. */
  152. static int
  153. vxge_xmit(struct net_device *dev, struct io_buffer *iobuf)
  154. {
  155. struct vxge_fifo *fifo = NULL;
  156. struct vxgedev *vdev = NULL;
  157. struct __vxge_hw_fifo *fifoh;
  158. struct vxge_hw_fifo_txd *txdp;
  159. vxge_trace();
  160. vdev = (struct vxgedev *)netdev_priv(dev);
  161. if (!is_vxge_card_up(vdev)) {
  162. vxge_debug(VXGE_ERR,
  163. "%s: vdev not initialized\n", dev->name);
  164. return -EIO;
  165. }
  166. if (!netdev_link_ok(dev)) {
  167. vxge_debug(VXGE_ERR,
  168. "%s: Link down, transmit failed\n", dev->name);
  169. return -ENETDOWN;
  170. }
  171. fifo = &vdev->vpath.fifo;
  172. fifoh = fifo->fifoh;
  173. txdp = vxge_hw_fifo_free_txdl_get(fifoh);
  174. if (!txdp) {
  175. vxge_debug(VXGE_ERR,
  176. "%s: Out of tx descriptors\n", dev->name);
  177. return -ENOBUFS;
  178. }
  179. vxge_debug(VXGE_XMIT, "%s: %s:%d fifoh offset= %d\n",
  180. dev->name, __func__, __LINE__, fifoh->sw_offset);
  181. vxge_hw_fifo_txdl_buffer_set(fifoh, txdp, iobuf);
  182. vxge_hw_fifo_txdl_post(fifoh, txdp);
  183. return 0;
  184. }
  185. /*
  186. * vxge_poll
  187. * @ndev: net device pointer
  188. *
  189. * This function acks the interrupt. It polls for rx packets
  190. * and send to upper layer. It also checks for tx completion
  191. * and frees iobs.
  192. */
  193. static void vxge_poll(struct net_device *ndev)
  194. {
  195. struct __vxge_hw_device *hldev;
  196. struct vxgedev *vdev;
  197. vxge_debug(VXGE_POLL, "%s:%d \n", __func__, __LINE__);
  198. vdev = (struct vxgedev *)netdev_priv(ndev);
  199. hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
  200. if (!is_vxge_card_up(vdev))
  201. return;
  202. /* process alarm and acknowledge the interrupts */
  203. vxge_hw_device_begin_irq(hldev);
  204. vxge_hw_vpath_poll_tx(&hldev->virtual_path.fifoh);
  205. vxge_hw_vpath_poll_rx(&hldev->virtual_path.ringh);
  206. }
  207. /*
  208. * vxge_irq - enable or Disable interrupts
  209. *
  210. * @netdev netdevice structure reference
  211. * @action requested interrupt action
  212. */
  213. static void vxge_irq(struct net_device *netdev __unused, int action)
  214. {
  215. struct __vxge_hw_device *hldev;
  216. struct vxgedev *vdev;
  217. vxge_debug(VXGE_INFO,
  218. "%s:%d action(%d)\n", __func__, __LINE__, action);
  219. vdev = (struct vxgedev *)netdev_priv(netdev);
  220. hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
  221. switch (action) {
  222. case DISABLE:
  223. vxge_hw_device_mask_all(hldev);
  224. break;
  225. default:
  226. vxge_hw_device_unmask_all(hldev);
  227. break;
  228. }
  229. }
  230. /**
  231. * vxge_open
  232. * @dev: pointer to the device structure.
  233. *
  234. * This function is the open entry point of the driver. It mainly calls a
  235. * function to allocate Rx buffers and inserts them into the buffer
  236. * descriptors and then enables the Rx part of the NIC.
  237. * Return value: '0' on success and an appropriate (-)ve integer as
  238. * defined in errno.h file on failure.
  239. */
  240. int
  241. vxge_open(struct net_device *dev)
  242. {
  243. enum vxge_hw_status status;
  244. struct vxgedev *vdev;
  245. struct __vxge_hw_device *hldev;
  246. int ret = 0;
  247. vxge_debug(VXGE_INFO, "%s: %s:%d\n",
  248. VXGE_DRIVER_NAME, __func__, __LINE__);
  249. vdev = (struct vxgedev *)netdev_priv(dev);
  250. hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
  251. /* make sure you have link off by default every time Nic is
  252. * initialized */
  253. netdev_link_down(dev);
  254. /* Open VPATHs */
  255. status = vxge_open_vpaths(vdev);
  256. if (status != VXGE_HW_OK) {
  257. vxge_debug(VXGE_ERR, "%s: fatal: Vpath open failed\n",
  258. VXGE_DRIVER_NAME);
  259. ret = -EPERM;
  260. goto out0;
  261. }
  262. vdev->mtu = VXGE_HW_DEFAULT_MTU;
  263. /* set initial mtu before enabling the device */
  264. status = vxge_hw_vpath_mtu_set(vdev->vpath.vpathh, vdev->mtu);
  265. if (status != VXGE_HW_OK) {
  266. vxge_debug(VXGE_ERR,
  267. "%s: fatal: can not set new MTU\n", dev->name);
  268. ret = -EPERM;
  269. goto out2;
  270. }
  271. vxge_debug(VXGE_INFO,
  272. "%s: MTU is %d\n", vdev->ndev->name, vdev->mtu);
  273. set_bit(__VXGE_STATE_CARD_UP, vdev->state);
  274. wmb();
  275. if (vxge_hw_device_link_state_get(vdev->devh) == VXGE_HW_LINK_UP) {
  276. netdev_link_up(vdev->ndev);
  277. vxge_debug(VXGE_INFO, "%s: Link Up\n", vdev->ndev->name);
  278. }
  279. vxge_hw_device_intr_enable(hldev);
  280. vxge_hw_vpath_enable(vdev->vpath.vpathh);
  281. wmb();
  282. vxge_hw_vpath_rx_doorbell_init(vdev->vpath.vpathh);
  283. goto out0;
  284. out2:
  285. vxge_close_vpaths(vdev);
  286. out0:
  287. vxge_debug(VXGE_INFO, "%s: %s:%d Exiting...\n",
  288. dev->name, __func__, __LINE__);
  289. return ret;
  290. }
  291. /**
  292. * vxge_close
  293. * @dev: device pointer.
  294. *
  295. * This is the stop entry point of the driver. It needs to undo exactly
  296. * whatever was done by the open entry point, thus it's usually referred to
  297. * as the close function.Among other things this function mainly stops the
  298. * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
  299. * Return value: '0' on success and an appropriate (-)ve integer as
  300. * defined in errno.h file on failure.
  301. */
  302. static void vxge_close(struct net_device *dev)
  303. {
  304. struct vxgedev *vdev;
  305. struct __vxge_hw_device *hldev;
  306. vxge_debug(VXGE_INFO, "%s: %s:%d\n",
  307. dev->name, __func__, __LINE__);
  308. vdev = (struct vxgedev *)netdev_priv(dev);
  309. hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
  310. if (!is_vxge_card_up(vdev))
  311. return;
  312. clear_bit(__VXGE_STATE_CARD_UP, vdev->state);
  313. vxge_hw_vpath_set_zero_rx_frm_len(hldev);
  314. netdev_link_down(vdev->ndev);
  315. vxge_debug(VXGE_INFO, "%s: Link Down\n", vdev->ndev->name);
  316. /* Note that at this point xmit() is stopped by upper layer */
  317. vxge_hw_device_intr_disable(hldev);
  318. /* Multi function shares INTA, hence we should
  319. * leave it in enabled state
  320. */
  321. if (is_mf(hldev->hw_info.function_mode))
  322. vxge_hw_device_unmask_all(hldev);
  323. vxge_reset_all_vpaths(vdev);
  324. vxge_close_vpaths(vdev);
  325. vxge_debug(VXGE_INFO,
  326. "%s: %s:%d Exiting...\n", dev->name, __func__, __LINE__);
  327. }
  328. static struct net_device_operations vxge_operations;
  329. int vxge_device_register(struct __vxge_hw_device *hldev,
  330. struct vxgedev **vdev_out)
  331. {
  332. struct net_device *ndev;
  333. struct vxgedev *vdev;
  334. int ret = 0;
  335. *vdev_out = NULL;
  336. ndev = alloc_etherdev(sizeof(struct vxgedev));
  337. if (ndev == NULL) {
  338. vxge_debug(VXGE_ERR, "%s : device allocation failed\n",
  339. __func__);
  340. ret = -ENODEV;
  341. goto _out0;
  342. }
  343. vxge_debug(VXGE_INFO, "%s:%d netdev registering\n",
  344. __func__, __LINE__);
  345. vdev = netdev_priv(ndev);
  346. memset(vdev, 0, sizeof(struct vxgedev));
  347. vdev->ndev = ndev;
  348. vdev->devh = hldev;
  349. vdev->pdev = hldev->pdev;
  350. ndev->dev = &vdev->pdev->dev;
  351. /* Associate vxge-specific network operations operations with
  352. * generic network device layer */
  353. netdev_init(ndev, &vxge_operations);
  354. memcpy(ndev->hw_addr,
  355. (u8 *)hldev->hw_info.mac_addrs[hldev->first_vp_id], ETH_ALEN);
  356. if (register_netdev(ndev)) {
  357. vxge_debug(VXGE_ERR, "%s : device registration failed!\n",
  358. __func__);
  359. ret = -ENODEV;
  360. goto _out2;
  361. }
  362. /* Leave link state as off at this point, when the link change
  363. * interrupt comes the state will be automatically changed to
  364. * the right state.
  365. */
  366. vxge_debug(VXGE_INFO, "%s: Ethernet device registered\n",
  367. VXGE_DRIVER_NAME);
  368. *vdev_out = vdev;
  369. return ret;
  370. _out2:
  371. netdev_put(ndev);
  372. _out0:
  373. return ret;
  374. }
  375. /*
  376. * vxge_device_unregister
  377. *
  378. * This function will unregister and free network device
  379. */
  380. void
  381. vxge_device_unregister(struct __vxge_hw_device *hldev)
  382. {
  383. struct net_device *ndev;
  384. ndev = hldev->ndev;
  385. unregister_netdev(ndev);
  386. netdev_nullify(ndev);
  387. netdev_put(ndev);
  388. vxge_debug(VXGE_INFO, "%s: ethernet device unregistered\n",
  389. VXGE_DRIVER_NAME);
  390. }
  391. /**
  392. * vxge_probe
  393. * @pdev : structure containing the PCI related information of the device.
  394. * @id: List of PCI devices supported by the driver listed in vxge_id_table.
  395. * Description:
  396. * This function is called when a new PCI device gets detected and initializes
  397. * it.
  398. * Return value:
  399. * returns 0 on success and negative on failure.
  400. *
  401. */
  402. static int
  403. vxge_probe(struct pci_device *pdev)
  404. {
  405. struct __vxge_hw_device *hldev;
  406. enum vxge_hw_status status;
  407. int ret = 0;
  408. u64 vpath_mask = 0;
  409. struct vxgedev *vdev;
  410. int i;
  411. u8 revision, titan1;
  412. u32 function_mode;
  413. unsigned long mmio_start, mmio_len;
  414. void *bar0;
  415. struct vxge_hw_device_hw_info hw_info;
  416. struct vxge_hw_device_version *fw_version;
  417. vxge_debug(VXGE_INFO, "vxge_probe for device " PCI_FMT "\n",
  418. PCI_ARGS(pdev));
  419. pci_read_config_byte(pdev, PCI_REVISION, &revision);
  420. titan1 = is_titan1(pdev->device, revision);
  421. mmio_start = pci_bar_start(pdev, PCI_BASE_ADDRESS_0);
  422. mmio_len = pci_bar_size(pdev, PCI_BASE_ADDRESS_0);
  423. vxge_debug(VXGE_INFO, "mmio_start: %#08lx, mmio_len: %#08lx\n",
  424. mmio_start, mmio_len);
  425. /* sets the bus master */
  426. adjust_pci_device(pdev);
  427. bar0 = ioremap(mmio_start, mmio_len);
  428. if (!bar0) {
  429. vxge_debug(VXGE_ERR,
  430. "%s : cannot remap io memory bar0\n", __func__);
  431. ret = -ENODEV;
  432. goto _exit0;
  433. }
  434. status = vxge_hw_device_hw_info_get(pdev, bar0, &hw_info);
  435. if (status != VXGE_HW_OK) {
  436. vxge_debug(VXGE_ERR,
  437. "%s: Reading of hardware info failed.\n",
  438. VXGE_DRIVER_NAME);
  439. ret = -EINVAL;
  440. goto _exit1;
  441. }
  442. if (hw_info.func_id != 0) {
  443. /* Non zero function, So do not load the driver */
  444. iounmap(bar0);
  445. pci_set_drvdata(pdev, NULL);
  446. return -EINVAL;
  447. }
  448. vpath_mask = hw_info.vpath_mask;
  449. if (vpath_mask == 0) {
  450. vxge_debug(VXGE_ERR,
  451. "%s: No vpaths available in device\n",
  452. VXGE_DRIVER_NAME);
  453. ret = -EINVAL;
  454. goto _exit1;
  455. }
  456. vxge_debug(VXGE_INFO,
  457. "%s:%d Vpath mask = %llx\n", __func__, __LINE__,
  458. (unsigned long long)vpath_mask);
  459. fw_version = &hw_info.fw_version;
  460. /* fail the driver loading if firmware is incompatible */
  461. if ((fw_version->major != VXGE_CERT_FW_VER_MAJOR) ||
  462. (fw_version->minor < VXGE_CERT_FW_VER_MINOR)) {
  463. printf("%s: Adapter's current firmware version: %d.%d.%d\n",
  464. VXGE_DRIVER_NAME, fw_version->major,
  465. fw_version->minor, fw_version->build);
  466. printf("%s: Upgrade firmware to version %d.%d.%d\n",
  467. VXGE_DRIVER_NAME, VXGE_CERT_FW_VER_MAJOR,
  468. VXGE_CERT_FW_VER_MINOR, VXGE_CERT_FW_VER_BUILD);
  469. ret = -EACCES;
  470. goto _exit1;
  471. }
  472. status = vxge_hw_device_initialize(&hldev, bar0, pdev, titan1);
  473. if (status != VXGE_HW_OK) {
  474. vxge_debug(VXGE_ERR,
  475. "Failed to initialize device (%d)\n", status);
  476. ret = -EINVAL;
  477. goto _exit1;
  478. }
  479. memcpy(&hldev->hw_info, &hw_info,
  480. sizeof(struct vxge_hw_device_hw_info));
  481. /* find the vpath id of the first available one */
  482. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
  483. if (vpath_mask & vxge_mBIT(i)) {
  484. hldev->first_vp_id = i;
  485. break;
  486. }
  487. /* if FCS stripping is not disabled in MAC fail driver load */
  488. if (vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask) != VXGE_HW_OK) {
  489. vxge_debug(VXGE_ERR,
  490. "%s: FCS stripping is not disabled in MAC"
  491. " failing driver load\n", VXGE_DRIVER_NAME);
  492. ret = -EINVAL;
  493. goto _exit2;
  494. }
  495. /* Read function mode */
  496. status = vxge_hw_get_func_mode(hldev, &function_mode);
  497. if (status != VXGE_HW_OK)
  498. goto _exit2;
  499. hldev->hw_info.function_mode = function_mode;
  500. /* set private device info */
  501. pci_set_drvdata(pdev, hldev);
  502. if (vxge_device_register(hldev, &vdev)) {
  503. ret = -EINVAL;
  504. goto _exit2;
  505. }
  506. /* set private HW device info */
  507. hldev->ndev = vdev->ndev;
  508. hldev->vdev = vdev;
  509. hldev->pdev = pdev;
  510. vdev->mtu = VXGE_HW_DEFAULT_MTU;
  511. vdev->bar0 = bar0;
  512. vdev->titan1 = titan1;
  513. /* Virtual Path count */
  514. vdev->vpath.device_id = hldev->first_vp_id;
  515. vdev->vpath.vdev = vdev;
  516. memcpy((u8 *)vdev->vpath.macaddr,
  517. (u8 *)hldev->hw_info.mac_addrs[hldev->first_vp_id],
  518. ETH_ALEN);
  519. hldev->hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0';
  520. hldev->hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0';
  521. hldev->hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0';
  522. vxge_debug(VXGE_INFO, "%s: Neterion %s Server Adapter\n",
  523. VXGE_DRIVER_NAME, hldev->hw_info.product_desc);
  524. vxge_debug(VXGE_INFO, "%s: SERIAL NUMBER: %s\n",
  525. VXGE_DRIVER_NAME, hldev->hw_info.serial_number);
  526. vxge_debug(VXGE_INFO, "%s: PART NUMBER: %s\n",
  527. VXGE_DRIVER_NAME, hldev->hw_info.part_number);
  528. vxge_debug(VXGE_INFO, "%s: MAC ADDR: %s\n",
  529. VXGE_DRIVER_NAME, eth_ntoa(vdev->vpath.macaddr));
  530. vxge_debug(VXGE_INFO,
  531. "%s: Firmware version : %s Date : %s\n", VXGE_DRIVER_NAME,
  532. hldev->hw_info.fw_version.version,
  533. hldev->hw_info.fw_date.date);
  534. vxge_debug(VXGE_INFO, "%s: %s Enabled\n",
  535. VXGE_DRIVER_NAME, vxge_func_mode_names[function_mode]);
  536. vxge_debug(VXGE_INFO, "%s: %s:%d Probe Exiting...\n",
  537. VXGE_DRIVER_NAME, __func__, __LINE__);
  538. return 0;
  539. _exit2:
  540. vxge_hw_device_terminate(hldev);
  541. _exit1:
  542. iounmap(bar0);
  543. _exit0:
  544. pci_set_drvdata(pdev, NULL);
  545. printf("%s: WARNING!! Driver loading failed!!\n",
  546. VXGE_DRIVER_NAME);
  547. return ret;
  548. }
  549. /**
  550. * vxge_remove - Free the PCI device
  551. * @pdev: structure containing the PCI related information of the device.
  552. * Description: This function is called by the Pci subsystem to release a
  553. * PCI device and free up all resource held up by the device.
  554. */
  555. static void
  556. vxge_remove(struct pci_device *pdev)
  557. {
  558. struct __vxge_hw_device *hldev;
  559. struct vxgedev *vdev = NULL;
  560. struct net_device *ndev;
  561. vxge_debug(VXGE_INFO,
  562. "%s:%d\n", __func__, __LINE__);
  563. hldev = (struct __vxge_hw_device *) pci_get_drvdata(pdev);
  564. if (hldev == NULL)
  565. return;
  566. ndev = hldev->ndev;
  567. vdev = netdev_priv(ndev);
  568. iounmap(vdev->bar0);
  569. vxge_device_unregister(hldev);
  570. vxge_debug(VXGE_INFO,
  571. "%s:%d Device unregistered\n", __func__, __LINE__);
  572. vxge_hw_device_terminate(hldev);
  573. pci_set_drvdata(pdev, NULL);
  574. }
  575. /* vxge net device operations */
  576. static struct net_device_operations vxge_operations = {
  577. .open = vxge_open,
  578. .close = vxge_close,
  579. .transmit = vxge_xmit,
  580. .poll = vxge_poll,
  581. .irq = vxge_irq,
  582. };
  583. static struct pci_device_id vxge_main_nics[] = {
  584. /* If you change this, also adjust vxge_nics[] in vxge.c */
  585. PCI_ID(0x17d5, 0x5833, "vxge-x3100", "Neterion X3100 Series", 0),
  586. };
  587. struct pci_driver vxge_driver __pci_driver = {
  588. .ids = vxge_main_nics,
  589. .id_count = (sizeof(vxge_main_nics) / sizeof(vxge_main_nics[0])),
  590. .probe = vxge_probe,
  591. .remove = vxge_remove,
  592. };