vxge_main.c 18KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724
  1. /*
  2. * vxge-main.c: iPXE driver for Neterion Inc's X3100 Series 10GbE
  3. * PCIe I/O Virtualized Server Adapter.
  4. *
  5. * Copyright(c) 2002-2010 Neterion Inc.
  6. *
  7. * This software may be used and distributed according to the terms of
  8. * the GNU General Public License (GPL), incorporated herein by
  9. * reference. Drivers based on or derived from this code fall under
  10. * the GPL and must retain the authorship, copyright and license
  11. * notice.
  12. *
  13. */
  14. FILE_LICENCE(GPL2_ONLY);
  15. #include <stdlib.h>
  16. #include <stdio.h>
  17. #include <string.h>
  18. #include <ipxe/io.h>
  19. #include <errno.h>
  20. #include <byteswap.h>
  21. #include <ipxe/pci.h>
  22. #include <ipxe/malloc.h>
  23. #include <ipxe/if_ether.h>
  24. #include <ipxe/ethernet.h>
  25. #include <ipxe/iobuf.h>
  26. #include <ipxe/netdevice.h>
  27. #include <ipxe/timer.h>
  28. #include <nic.h>
  29. #include "vxge_main.h"
  30. #include "vxge_reg.h"
  31. /* function modes strings */
  32. static char *vxge_func_mode_names[] = {
  33. "Single Function - 1 func, 17 vpath",
  34. "Multi Function 8 - 8 func, 2 vpath per func",
  35. "SRIOV 17 - 17 VF, 1 vpath per VF",
  36. "WLPEX/SharedIO 17 - 17 VH, 1 vpath/func/hierarchy",
  37. "WLPEX/SharedIO 8 - 8 VH, 2 vpath/func/hierarchy",
  38. "Multi Function 17 - 17 func, 1 vpath per func",
  39. "SRIOV 8 - 1 PF, 7 VF, 2 vpath per VF",
  40. "SRIOV 4 - 1 PF, 3 VF, 4 vpath per VF",
  41. "Multi Function 2 - 2 func, 8 vpath per func",
  42. "Multi Function 4 - 4 func, 4 vpath per func",
  43. "WLPEX/SharedIO 4 - 17 func, 1 vpath per func (PCIe ARI)",
  44. "Multi Function 8 - For ESX DirectIO - 8 func, 2 vpath per func",
  45. };
  46. static inline int is_vxge_card_up(struct vxgedev *vdev)
  47. {
  48. return test_bit(__VXGE_STATE_CARD_UP, vdev->state);
  49. }
  50. /*
  51. * vxge_xmit_compl
  52. *
  53. * If an interrupt was raised to indicate DMA complete of the Tx packet,
  54. * this function is called. It identifies the last TxD whose buffer was
  55. * freed and frees all skbs whose data have already DMA'ed into the NICs
  56. * internal memory.
  57. */
  58. enum vxge_hw_status
  59. vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw,
  60. struct vxge_hw_fifo_txd *txdp, enum vxge_hw_fifo_tcode tcode)
  61. {
  62. struct net_device *netdev;
  63. struct io_buffer *tx_iob = NULL;
  64. vxge_trace();
  65. netdev = fifo_hw->vpathh->hldev->ndev;
  66. tx_iob = (struct io_buffer *)(intptr_t)txdp->host_control;
  67. if (tcode == VXGE_HW_FIFO_T_CODE_OK) {
  68. netdev_tx_complete(netdev, tx_iob);
  69. } else {
  70. netdev_tx_complete_err(netdev, tx_iob, -EINVAL);
  71. vxge_debug(VXGE_ERR, "%s: transmit failed, tcode %d\n",
  72. netdev->name, tcode);
  73. }
  74. memset(txdp, 0, sizeof(struct vxge_hw_fifo_txd));
  75. return VXGE_HW_OK;
  76. }
  77. /* reset vpaths */
  78. enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
  79. {
  80. enum vxge_hw_status status = VXGE_HW_OK;
  81. struct __vxge_hw_virtualpath *vpath;
  82. vxge_trace();
  83. vpath = vdev->vpath.vpathh;
  84. if (vpath) {
  85. if ((status = vxge_hw_vpath_reset(vpath)) == VXGE_HW_OK) {
  86. if (is_vxge_card_up(vdev) &&
  87. (status = vxge_hw_vpath_recover_from_reset(
  88. vpath)) != VXGE_HW_OK) {
  89. vxge_debug(VXGE_ERR, "vxge_hw_vpath_recover_"
  90. "from_reset failed\n");
  91. return status;
  92. } else {
  93. status = __vxge_hw_vpath_reset_check(vpath);
  94. if (status != VXGE_HW_OK) {
  95. vxge_debug(VXGE_ERR,
  96. "__vxge_hw_vpath_reset_check error\n");
  97. return status;
  98. }
  99. }
  100. } else {
  101. vxge_debug(VXGE_ERR, "vxge_hw_vpath_reset failed\n");
  102. return status;
  103. }
  104. }
  105. return status;
  106. }
  107. /* close vpaths */
  108. void vxge_close_vpaths(struct vxgedev *vdev)
  109. {
  110. if (vdev->vpath.vpathh && vdev->vpath.is_open)
  111. vxge_hw_vpath_close(vdev->vpath.vpathh);
  112. vdev->vpath.is_open = 0;
  113. vdev->vpath.vpathh = NULL;
  114. }
  115. /* open vpaths */
  116. int vxge_open_vpaths(struct vxgedev *vdev)
  117. {
  118. enum vxge_hw_status status;
  119. struct __vxge_hw_device *hldev;
  120. hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
  121. vdev->vpath.vpathh = &hldev->virtual_path;
  122. vdev->vpath.fifo.ndev = vdev->ndev;
  123. vdev->vpath.fifo.pdev = vdev->pdev;
  124. vdev->vpath.fifo.fifoh = &hldev->virtual_path.fifoh;
  125. vdev->vpath.ring.ndev = vdev->ndev;
  126. vdev->vpath.ring.pdev = vdev->pdev;
  127. vdev->vpath.ring.ringh = &hldev->virtual_path.ringh;
  128. status = vxge_hw_vpath_open(vdev->devh, &vdev->vpath);
  129. if (status == VXGE_HW_OK) {
  130. vdev->vpath.is_open = 1;
  131. } else {
  132. vxge_debug(VXGE_ERR,
  133. "%s: vpath: %d failed to open "
  134. "with status: %d\n",
  135. vdev->ndev->name, vdev->vpath.device_id,
  136. status);
  137. vxge_close_vpaths(vdev);
  138. return status;
  139. }
  140. hldev->vpaths_deployed |= vxge_mBIT(vdev->vpath.vpathh->vp_id);
  141. return VXGE_HW_OK;
  142. }
  143. /** Functions that implement the iPXE driver API **/
  144. /**
  145. * vxge_xmit
  146. * @skb : the socket buffer containing the Tx data.
  147. * @dev : device pointer.
  148. *
  149. * This function is the Tx entry point of the driver. Neterion NIC supports
  150. * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
  151. */
  152. static int
  153. vxge_xmit(struct net_device *dev, struct io_buffer *iobuf)
  154. {
  155. struct vxge_fifo *fifo = NULL;
  156. struct vxgedev *vdev = NULL;
  157. struct __vxge_hw_fifo *fifoh;
  158. struct __vxge_hw_device *hldev;
  159. struct vxge_hw_fifo_txd *txdp;
  160. vxge_trace();
  161. vdev = (struct vxgedev *)netdev_priv(dev);
  162. hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
  163. if (!is_vxge_card_up(vdev)) {
  164. vxge_debug(VXGE_ERR,
  165. "%s: vdev not initialized\n", dev->name);
  166. return -EIO;
  167. }
  168. if (!netdev_link_ok(dev)) {
  169. vxge_debug(VXGE_ERR,
  170. "%s: Link down, transmit failed\n", dev->name);
  171. return -ENETDOWN;
  172. }
  173. fifo = &vdev->vpath.fifo;
  174. fifoh = fifo->fifoh;
  175. txdp = vxge_hw_fifo_free_txdl_get(fifoh);
  176. if (!txdp) {
  177. vxge_debug(VXGE_ERR,
  178. "%s: Out of tx descriptors\n", dev->name);
  179. return -ENOBUFS;
  180. }
  181. vxge_debug(VXGE_XMIT, "%s: %s:%d fifoh offset= %d\n",
  182. dev->name, __func__, __LINE__, fifoh->sw_offset);
  183. vxge_hw_fifo_txdl_buffer_set(fifoh, txdp, iobuf);
  184. vxge_hw_fifo_txdl_post(fifoh, txdp);
  185. return 0;
  186. }
  187. /*
  188. * vxge_poll
  189. * @ndev: net device pointer
  190. *
  191. * This function acks the interrupt. It polls for rx packets
  192. * and send to upper layer. It also checks for tx completion
  193. * and frees iobs.
  194. */
  195. static void vxge_poll(struct net_device *ndev)
  196. {
  197. struct __vxge_hw_device *hldev;
  198. struct vxgedev *vdev;
  199. vxge_debug(VXGE_POLL, "%s:%d \n", __func__, __LINE__);
  200. vdev = (struct vxgedev *)netdev_priv(ndev);
  201. hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
  202. if (!is_vxge_card_up(vdev))
  203. return;
  204. /* process alarm and acknowledge the interrupts */
  205. vxge_hw_device_begin_irq(hldev);
  206. vxge_hw_vpath_poll_tx(&hldev->virtual_path.fifoh);
  207. vxge_hw_vpath_poll_rx(&hldev->virtual_path.ringh);
  208. }
  209. /*
  210. * vxge_irq - enable or Disable interrupts
  211. *
  212. * @netdev netdevice sturcture reference
  213. * @action requested interrupt action
  214. */
  215. static void vxge_irq(struct net_device *netdev __unused, int action)
  216. {
  217. struct __vxge_hw_device *hldev;
  218. struct vxgedev *vdev;
  219. vxge_debug(VXGE_INFO,
  220. "%s:%d action(%d)\n", __func__, __LINE__, action);
  221. vdev = (struct vxgedev *)netdev_priv(netdev);
  222. hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
  223. switch (action) {
  224. case DISABLE:
  225. vxge_hw_device_mask_all(hldev);
  226. break;
  227. default:
  228. vxge_hw_device_unmask_all(hldev);
  229. break;
  230. }
  231. }
  232. /**
  233. * vxge_open
  234. * @dev: pointer to the device structure.
  235. *
  236. * This function is the open entry point of the driver. It mainly calls a
  237. * function to allocate Rx buffers and inserts them into the buffer
  238. * descriptors and then enables the Rx part of the NIC.
  239. * Return value: '0' on success and an appropriate (-)ve integer as
  240. * defined in errno.h file on failure.
  241. */
  242. int
  243. vxge_open(struct net_device *dev)
  244. {
  245. enum vxge_hw_status status;
  246. struct vxgedev *vdev;
  247. struct __vxge_hw_device *hldev;
  248. int ret = 0;
  249. vxge_debug(VXGE_INFO, "%s: %s:%d\n",
  250. VXGE_DRIVER_NAME, __func__, __LINE__);
  251. vdev = (struct vxgedev *)netdev_priv(dev);
  252. hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
  253. /* make sure you have link off by default every time Nic is
  254. * initialized */
  255. netdev_link_down(dev);
  256. /* Open VPATHs */
  257. status = vxge_open_vpaths(vdev);
  258. if (status != VXGE_HW_OK) {
  259. vxge_debug(VXGE_ERR, "%s: fatal: Vpath open failed\n",
  260. VXGE_DRIVER_NAME);
  261. ret = -EPERM;
  262. goto out0;
  263. }
  264. vdev->mtu = VXGE_HW_DEFAULT_MTU;
  265. /* set initial mtu before enabling the device */
  266. status = vxge_hw_vpath_mtu_set(vdev->vpath.vpathh, vdev->mtu);
  267. if (status != VXGE_HW_OK) {
  268. vxge_debug(VXGE_ERR,
  269. "%s: fatal: can not set new MTU\n", dev->name);
  270. ret = -EPERM;
  271. goto out2;
  272. }
  273. vxge_debug(VXGE_INFO,
  274. "%s: MTU is %d\n", vdev->ndev->name, vdev->mtu);
  275. set_bit(__VXGE_STATE_CARD_UP, vdev->state);
  276. wmb();
  277. if (vxge_hw_device_link_state_get(vdev->devh) == VXGE_HW_LINK_UP) {
  278. netdev_link_up(vdev->ndev);
  279. vxge_debug(VXGE_INFO, "%s: Link Up\n", vdev->ndev->name);
  280. }
  281. vxge_hw_device_intr_enable(hldev);
  282. vxge_hw_vpath_enable(vdev->vpath.vpathh);
  283. wmb();
  284. vxge_hw_vpath_rx_doorbell_init(vdev->vpath.vpathh);
  285. goto out0;
  286. out2:
  287. vxge_close_vpaths(vdev);
  288. out0:
  289. vxge_debug(VXGE_INFO, "%s: %s:%d Exiting...\n",
  290. dev->name, __func__, __LINE__);
  291. return ret;
  292. }
  293. /**
  294. * vxge_close
  295. * @dev: device pointer.
  296. *
  297. * This is the stop entry point of the driver. It needs to undo exactly
  298. * whatever was done by the open entry point, thus it's usually referred to
  299. * as the close function.Among other things this function mainly stops the
  300. * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
  301. * Return value: '0' on success and an appropriate (-)ve integer as
  302. * defined in errno.h file on failure.
  303. */
  304. static void vxge_close(struct net_device *dev)
  305. {
  306. struct vxgedev *vdev;
  307. struct __vxge_hw_device *hldev;
  308. vxge_debug(VXGE_INFO, "%s: %s:%d\n",
  309. dev->name, __func__, __LINE__);
  310. vdev = (struct vxgedev *)netdev_priv(dev);
  311. hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
  312. if (!is_vxge_card_up(vdev))
  313. return;
  314. clear_bit(__VXGE_STATE_CARD_UP, vdev->state);
  315. vxge_hw_vpath_set_zero_rx_frm_len(hldev);
  316. netdev_link_down(vdev->ndev);
  317. vxge_debug(VXGE_INFO, "%s: Link Down\n", vdev->ndev->name);
  318. /* Note that at this point xmit() is stopped by upper layer */
  319. vxge_hw_device_intr_disable(hldev);
  320. /* Multi function shares INTA, hence we should
  321. * leave it in enabled state
  322. */
  323. if (is_mf(hldev->hw_info.function_mode))
  324. vxge_hw_device_unmask_all(hldev);
  325. vxge_reset_all_vpaths(vdev);
  326. vxge_close_vpaths(vdev);
  327. vxge_debug(VXGE_INFO,
  328. "%s: %s:%d Exiting...\n", dev->name, __func__, __LINE__);
  329. }
  330. static struct net_device_operations vxge_operations;
  331. int vxge_device_register(struct __vxge_hw_device *hldev,
  332. struct vxgedev **vdev_out)
  333. {
  334. struct net_device *ndev;
  335. struct vxgedev *vdev;
  336. int ret = 0;
  337. *vdev_out = NULL;
  338. ndev = alloc_etherdev(sizeof(struct vxgedev));
  339. if (ndev == NULL) {
  340. vxge_debug(VXGE_ERR, "%s : device allocation failed\n",
  341. __func__);
  342. ret = -ENODEV;
  343. goto _out0;
  344. }
  345. vxge_debug(VXGE_INFO, "%s:%d netdev registering\n",
  346. __func__, __LINE__);
  347. vdev = netdev_priv(ndev);
  348. memset(vdev, 0, sizeof(struct vxgedev));
  349. vdev->ndev = ndev;
  350. vdev->devh = hldev;
  351. vdev->pdev = hldev->pdev;
  352. ndev->dev = &vdev->pdev->dev;
  353. /* Associate vxge-specific network operations operations with
  354. * generic network device layer */
  355. netdev_init(ndev, &vxge_operations);
  356. memcpy(ndev->hw_addr,
  357. (u8 *)hldev->hw_info.mac_addrs[hldev->first_vp_id], ETH_ALEN);
  358. if (register_netdev(ndev)) {
  359. vxge_debug(VXGE_ERR, "%s : device registration failed!\n",
  360. __func__);
  361. ret = -ENODEV;
  362. goto _out2;
  363. }
  364. /* Leave link state as off at this point, when the link change
  365. * interrupt comes the state will be automatically changed to
  366. * the right state.
  367. */
  368. vxge_debug(VXGE_INFO, "%s: Ethernet device registered\n",
  369. VXGE_DRIVER_NAME);
  370. *vdev_out = vdev;
  371. return ret;
  372. _out2:
  373. netdev_put(ndev);
  374. _out0:
  375. return ret;
  376. }
  377. /*
  378. * vxge_device_unregister
  379. *
  380. * This function will unregister and free network device
  381. */
  382. void
  383. vxge_device_unregister(struct __vxge_hw_device *hldev)
  384. {
  385. struct vxgedev *vdev;
  386. struct net_device *ndev;
  387. ndev = hldev->ndev;
  388. vdev = netdev_priv(ndev);
  389. unregister_netdev(ndev);
  390. netdev_nullify(ndev);
  391. netdev_put(ndev);
  392. vxge_debug(VXGE_INFO, "%s: ethernet device unregistered\n",
  393. VXGE_DRIVER_NAME);
  394. }
  395. /**
  396. * vxge_probe
  397. * @pdev : structure containing the PCI related information of the device.
  398. * @id: List of PCI devices supported by the driver listed in vxge_id_table.
  399. * Description:
  400. * This function is called when a new PCI device gets detected and initializes
  401. * it.
  402. * Return value:
  403. * returns 0 on success and negative on failure.
  404. *
  405. */
  406. static int
  407. vxge_probe(struct pci_device *pdev)
  408. {
  409. struct __vxge_hw_device *hldev;
  410. enum vxge_hw_status status;
  411. int ret = 0;
  412. u64 vpath_mask = 0;
  413. struct vxgedev *vdev;
  414. int i;
  415. u8 revision, titan1;
  416. u32 host_type;
  417. u32 function_mode;
  418. unsigned long mmio_start, mmio_len;
  419. void *bar0;
  420. struct vxge_hw_device_hw_info hw_info;
  421. struct vxge_hw_device_version *fw_version;
  422. vxge_debug(VXGE_INFO, "vxge_probe for device " PCI_FMT "\n",
  423. PCI_ARGS(pdev));
  424. pci_read_config_byte(pdev, PCI_REVISION_ID, &revision);
  425. titan1 = is_titan1(pdev->device, revision);
  426. mmio_start = pci_bar_start(pdev, PCI_BASE_ADDRESS_0);
  427. mmio_len = pci_bar_size(pdev, PCI_BASE_ADDRESS_0);
  428. vxge_debug(VXGE_INFO, "mmio_start: %#08lx, mmio_len: %#08lx\n",
  429. mmio_start, mmio_len);
  430. /* sets the bus master */
  431. adjust_pci_device(pdev);
  432. bar0 = ioremap(mmio_start, mmio_len);
  433. if (!bar0) {
  434. vxge_debug(VXGE_ERR,
  435. "%s : cannot remap io memory bar0\n", __func__);
  436. ret = -ENODEV;
  437. goto _exit0;
  438. }
  439. status = vxge_hw_device_hw_info_get(pdev, bar0, &hw_info);
  440. if (status != VXGE_HW_OK) {
  441. vxge_debug(VXGE_ERR,
  442. "%s: Reading of hardware info failed.\n",
  443. VXGE_DRIVER_NAME);
  444. ret = -EINVAL;
  445. goto _exit1;
  446. }
  447. if (hw_info.func_id != 0) {
  448. /* Non zero function, So do not load the driver */
  449. iounmap(bar0);
  450. pci_set_drvdata(pdev, NULL);
  451. return -EINVAL;
  452. }
  453. vpath_mask = hw_info.vpath_mask;
  454. if (vpath_mask == 0) {
  455. vxge_debug(VXGE_ERR,
  456. "%s: No vpaths available in device\n",
  457. VXGE_DRIVER_NAME);
  458. ret = -EINVAL;
  459. goto _exit1;
  460. }
  461. vxge_debug(VXGE_INFO,
  462. "%s:%d Vpath mask = %llx\n", __func__, __LINE__,
  463. (unsigned long long)vpath_mask);
  464. host_type = hw_info.host_type;
  465. fw_version = &hw_info.fw_version;
  466. /* fail the driver loading if firmware is incompatible */
  467. if ((fw_version->major != VXGE_CERT_FW_VER_MAJOR) ||
  468. (fw_version->minor < VXGE_CERT_FW_VER_MINOR)) {
  469. printf("%s: Adapter's current firmware version: %d.%d.%d\n",
  470. VXGE_DRIVER_NAME, fw_version->major,
  471. fw_version->minor, fw_version->build);
  472. printf("%s: Upgrade firmware to version %d.%d.%d\n",
  473. VXGE_DRIVER_NAME, VXGE_CERT_FW_VER_MAJOR,
  474. VXGE_CERT_FW_VER_MINOR, VXGE_CERT_FW_VER_BUILD);
  475. ret = -EACCES;
  476. goto _exit1;
  477. }
  478. status = vxge_hw_device_initialize(&hldev, bar0, pdev, titan1);
  479. if (status != VXGE_HW_OK) {
  480. vxge_debug(VXGE_ERR,
  481. "Failed to initialize device (%d)\n", status);
  482. ret = -EINVAL;
  483. goto _exit1;
  484. }
  485. memcpy(&hldev->hw_info, &hw_info,
  486. sizeof(struct vxge_hw_device_hw_info));
  487. /* find the vpath id of the first available one */
  488. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
  489. if (vpath_mask & vxge_mBIT(i)) {
  490. hldev->first_vp_id = i;
  491. break;
  492. }
  493. /* if FCS stripping is not disabled in MAC fail driver load */
  494. if (vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask) != VXGE_HW_OK) {
  495. vxge_debug(VXGE_ERR,
  496. "%s: FCS stripping is not disabled in MAC"
  497. " failing driver load\n", VXGE_DRIVER_NAME);
  498. ret = -EINVAL;
  499. goto _exit2;
  500. }
  501. /* Read function mode */
  502. status = vxge_hw_get_func_mode(hldev, &function_mode);
  503. if (status != VXGE_HW_OK)
  504. goto _exit2;
  505. hldev->hw_info.function_mode = function_mode;
  506. /* set private device info */
  507. pci_set_drvdata(pdev, hldev);
  508. if (vxge_device_register(hldev, &vdev)) {
  509. ret = -EINVAL;
  510. goto _exit2;
  511. }
  512. /* set private HW device info */
  513. hldev->ndev = vdev->ndev;
  514. hldev->vdev = vdev;
  515. hldev->pdev = pdev;
  516. vdev->mtu = VXGE_HW_DEFAULT_MTU;
  517. vdev->bar0 = bar0;
  518. vdev->titan1 = titan1;
  519. /* Virtual Path count */
  520. vdev->vpath.device_id = hldev->first_vp_id;
  521. vdev->vpath.vdev = vdev;
  522. memcpy((u8 *)vdev->vpath.macaddr,
  523. (u8 *)hldev->hw_info.mac_addrs[hldev->first_vp_id],
  524. ETH_ALEN);
  525. hldev->hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0';
  526. hldev->hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0';
  527. hldev->hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0';
  528. vxge_debug(VXGE_INFO, "%s: Neterion %s Server Adapter\n",
  529. VXGE_DRIVER_NAME, hldev->hw_info.product_desc);
  530. vxge_debug(VXGE_INFO, "%s: SERIAL NUMBER: %s\n",
  531. VXGE_DRIVER_NAME, hldev->hw_info.serial_number);
  532. vxge_debug(VXGE_INFO, "%s: PART NUMBER: %s\n",
  533. VXGE_DRIVER_NAME, hldev->hw_info.part_number);
  534. vxge_debug(VXGE_INFO, "%s: MAC ADDR: %s\n",
  535. VXGE_DRIVER_NAME, eth_ntoa(vdev->vpath.macaddr));
  536. vxge_debug(VXGE_INFO,
  537. "%s: Firmware version : %s Date : %s\n", VXGE_DRIVER_NAME,
  538. hldev->hw_info.fw_version.version,
  539. hldev->hw_info.fw_date.date);
  540. vxge_debug(VXGE_INFO, "%s: %s Enabled\n",
  541. VXGE_DRIVER_NAME, vxge_func_mode_names[function_mode]);
  542. vxge_debug(VXGE_INFO, "%s: %s:%d Probe Exiting...\n",
  543. VXGE_DRIVER_NAME, __func__, __LINE__);
  544. return 0;
  545. _exit2:
  546. vxge_hw_device_terminate(hldev);
  547. _exit1:
  548. iounmap(bar0);
  549. _exit0:
  550. pci_set_drvdata(pdev, NULL);
  551. printf("%s: WARNING!! Driver loading failed!!\n",
  552. VXGE_DRIVER_NAME);
  553. return ret;
  554. }
  555. /**
  556. * vxge_remove - Free the PCI device
  557. * @pdev: structure containing the PCI related information of the device.
  558. * Description: This function is called by the Pci subsystem to release a
  559. * PCI device and free up all resource held up by the device.
  560. */
  561. static void
  562. vxge_remove(struct pci_device *pdev)
  563. {
  564. struct __vxge_hw_device *hldev;
  565. struct vxgedev *vdev = NULL;
  566. struct net_device *ndev;
  567. vxge_debug(VXGE_INFO,
  568. "%s:%d\n", __func__, __LINE__);
  569. hldev = (struct __vxge_hw_device *) pci_get_drvdata(pdev);
  570. if (hldev == NULL)
  571. return;
  572. ndev = hldev->ndev;
  573. vdev = netdev_priv(ndev);
  574. iounmap(vdev->bar0);
  575. vxge_device_unregister(hldev);
  576. vxge_debug(VXGE_INFO,
  577. "%s:%d Device unregistered\n", __func__, __LINE__);
  578. vxge_hw_device_terminate(hldev);
  579. pci_set_drvdata(pdev, NULL);
  580. }
  581. /* vxge net device operations */
  582. static struct net_device_operations vxge_operations = {
  583. .open = vxge_open,
  584. .close = vxge_close,
  585. .transmit = vxge_xmit,
  586. .poll = vxge_poll,
  587. .irq = vxge_irq,
  588. };
  589. static struct pci_device_id vxge_main_nics[] = {
  590. /* If you change this, also adjust vxge_nics[] in vxge.c */
  591. PCI_ID(0x17d5, 0x5833, "vxge-x3100", "Neterion X3100 Series", 0),
  592. };
  593. struct pci_driver vxge_driver __pci_driver = {
  594. .ids = vxge_main_nics,
  595. .id_count = (sizeof(vxge_main_nics) / sizeof(vxge_main_nics[0])),
  596. .probe = vxge_probe,
  597. .remove = vxge_remove,
  598. };