You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669
  1. /*
  2. * Copyright (C) 2011 Michael Brown <mbrown@fensystems.co.uk>.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License as
  6. * published by the Free Software Foundation; either version 2 of the
  7. * License, or any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  17. */
  18. FILE_LICENCE ( GPL2_OR_LATER );
  19. #include <stdint.h>
  20. #include <errno.h>
  21. #include <assert.h>
  22. #include <byteswap.h>
  23. #include <ipxe/pci.h>
  24. #include <ipxe/io.h>
  25. #include <ipxe/malloc.h>
  26. #include <ipxe/iobuf.h>
  27. #include <ipxe/netdevice.h>
  28. #include <ipxe/if_ether.h>
  29. #include <ipxe/ethernet.h>
  30. #include "vmxnet3.h"
  31. /**
  32. * @file
  33. *
  34. * VMware vmxnet3 virtual NIC driver
  35. *
  36. */
  37. /**
  38. * Issue command
  39. *
  40. * @v vmxnet vmxnet3 NIC
  41. * @v command Command to issue
  42. * @ret result Command result
  43. */
  44. static inline uint32_t vmxnet3_command ( struct vmxnet3_nic *vmxnet,
  45. uint32_t command ) {
  46. /* Issue command */
  47. writel ( command, ( vmxnet->vd + VMXNET3_VD_CMD ) );
  48. return readl ( vmxnet->vd + VMXNET3_VD_CMD );
  49. }
  50. /**
  51. * Transmit packet
  52. *
  53. * @v netdev Network device
  54. * @v iobuf I/O buffer
  55. * @ret rc Return status code
  56. */
  57. static int vmxnet3_transmit ( struct net_device *netdev,
  58. struct io_buffer *iobuf ) {
  59. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  60. struct vmxnet3_tx_desc *tx_desc;
  61. unsigned int desc_idx;
  62. unsigned int generation;
  63. /* Check that we have a free transmit descriptor */
  64. desc_idx = ( vmxnet->count.tx_prod % VMXNET3_NUM_TX_DESC );
  65. generation = ( ( vmxnet->count.tx_prod & VMXNET3_NUM_TX_DESC ) ?
  66. 0 : cpu_to_le32 ( VMXNET3_TXF_GEN ) );
  67. if ( vmxnet->tx_iobuf[desc_idx] ) {
  68. DBGC ( vmxnet, "VMXNET3 %p out of transmit descriptors\n",
  69. vmxnet );
  70. return -ENOBUFS;
  71. }
  72. /* Increment producer counter */
  73. vmxnet->count.tx_prod++;
  74. /* Store I/O buffer for later completion */
  75. vmxnet->tx_iobuf[desc_idx] = iobuf;
  76. /* Populate transmit descriptor */
  77. tx_desc = &vmxnet->dma->tx_desc[desc_idx];
  78. tx_desc->address = cpu_to_le64 ( virt_to_bus ( iobuf->data ) );
  79. tx_desc->flags[0] = ( generation | cpu_to_le32 ( iob_len ( iobuf ) ) );
  80. tx_desc->flags[1] = cpu_to_le32 ( VMXNET3_TXF_CQ | VMXNET3_TXF_EOP );
  81. /* Hand over descriptor to NIC */
  82. wmb();
  83. writel ( ( vmxnet->count.tx_prod % VMXNET3_NUM_TX_DESC ),
  84. ( vmxnet->pt + VMXNET3_PT_TXPROD ) );
  85. return 0;
  86. }
  87. /**
  88. * Poll for completed transmissions
  89. *
  90. * @v netdev Network device
  91. */
  92. static void vmxnet3_poll_tx ( struct net_device *netdev ) {
  93. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  94. struct vmxnet3_tx_comp *tx_comp;
  95. struct io_buffer *iobuf;
  96. unsigned int comp_idx;
  97. unsigned int desc_idx;
  98. unsigned int generation;
  99. while ( 1 ) {
  100. /* Look for completed descriptors */
  101. comp_idx = ( vmxnet->count.tx_cons % VMXNET3_NUM_TX_COMP );
  102. generation = ( ( vmxnet->count.tx_cons & VMXNET3_NUM_TX_COMP ) ?
  103. 0 : cpu_to_le32 ( VMXNET3_TXCF_GEN ) );
  104. tx_comp = &vmxnet->dma->tx_comp[comp_idx];
  105. if ( generation != ( tx_comp->flags &
  106. cpu_to_le32 ( VMXNET3_TXCF_GEN ) ) ) {
  107. break;
  108. }
  109. /* Increment consumer counter */
  110. vmxnet->count.tx_cons++;
  111. /* Locate corresponding transmit descriptor */
  112. desc_idx = ( le32_to_cpu ( tx_comp->index ) %
  113. VMXNET3_NUM_TX_DESC );
  114. iobuf = vmxnet->tx_iobuf[desc_idx];
  115. if ( ! iobuf ) {
  116. DBGC ( vmxnet, "VMXNET3 %p completed on empty transmit "
  117. "buffer %#x/%#x\n", vmxnet, comp_idx, desc_idx );
  118. netdev_tx_err ( netdev, NULL, -ENOTTY );
  119. continue;
  120. }
  121. /* Remove I/O buffer from transmit queue */
  122. vmxnet->tx_iobuf[desc_idx] = NULL;
  123. /* Report transmission completion to network layer */
  124. DBGC2 ( vmxnet, "VMXNET3 %p completed TX %#x/%#x (len %#zx)\n",
  125. vmxnet, comp_idx, desc_idx, iob_len ( iobuf ) );
  126. netdev_tx_complete ( netdev, iobuf );
  127. }
  128. }
  129. /**
  130. * Flush any uncompleted transmit buffers
  131. *
  132. * @v netdev Network device
  133. */
  134. static void vmxnet3_flush_tx ( struct net_device *netdev ) {
  135. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  136. unsigned int i;
  137. for ( i = 0 ; i < VMXNET3_NUM_TX_DESC ; i++ ) {
  138. if ( vmxnet->tx_iobuf[i] ) {
  139. netdev_tx_complete_err ( netdev, vmxnet->tx_iobuf[i],
  140. -ECANCELED );
  141. vmxnet->tx_iobuf[i] = NULL;
  142. }
  143. }
  144. }
  145. /**
  146. * Refill receive ring
  147. *
  148. * @v netdev Network device
  149. */
  150. static void vmxnet3_refill_rx ( struct net_device *netdev ) {
  151. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  152. struct vmxnet3_rx_desc *rx_desc;
  153. struct io_buffer *iobuf;
  154. unsigned int orig_rx_prod = vmxnet->count.rx_prod;
  155. unsigned int desc_idx;
  156. unsigned int generation;
  157. /* Fill receive ring to specified fill level */
  158. while ( vmxnet->count.rx_fill < VMXNET3_RX_FILL ) {
  159. /* Locate receive descriptor */
  160. desc_idx = ( vmxnet->count.rx_prod % VMXNET3_NUM_RX_DESC );
  161. generation = ( ( vmxnet->count.rx_prod & VMXNET3_NUM_RX_DESC ) ?
  162. 0 : cpu_to_le32 ( VMXNET3_RXF_GEN ) );
  163. assert ( vmxnet->rx_iobuf[desc_idx] == NULL );
  164. /* Allocate I/O buffer */
  165. iobuf = alloc_iob ( VMXNET3_MTU + NET_IP_ALIGN );
  166. if ( ! iobuf ) {
  167. /* Non-fatal low memory condition */
  168. break;
  169. }
  170. iob_reserve ( iobuf, NET_IP_ALIGN );
  171. /* Increment producer counter and fill level */
  172. vmxnet->count.rx_prod++;
  173. vmxnet->count.rx_fill++;
  174. /* Store I/O buffer for later completion */
  175. vmxnet->rx_iobuf[desc_idx] = iobuf;
  176. /* Populate receive descriptor */
  177. rx_desc = &vmxnet->dma->rx_desc[desc_idx];
  178. rx_desc->address = cpu_to_le64 ( virt_to_bus ( iobuf->data ) );
  179. rx_desc->flags = ( generation | cpu_to_le32 ( VMXNET3_MTU ) );
  180. }
  181. /* Hand over any new descriptors to NIC */
  182. if ( vmxnet->count.rx_prod != orig_rx_prod ) {
  183. wmb();
  184. writel ( ( vmxnet->count.rx_prod % VMXNET3_NUM_RX_DESC ),
  185. ( vmxnet->pt + VMXNET3_PT_RXPROD ) );
  186. }
  187. }
  188. /**
  189. * Poll for received packets
  190. *
  191. * @v netdev Network device
  192. */
  193. static void vmxnet3_poll_rx ( struct net_device *netdev ) {
  194. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  195. struct vmxnet3_rx_comp *rx_comp;
  196. struct io_buffer *iobuf;
  197. unsigned int comp_idx;
  198. unsigned int desc_idx;
  199. unsigned int generation;
  200. size_t len;
  201. while ( 1 ) {
  202. /* Look for completed descriptors */
  203. comp_idx = ( vmxnet->count.rx_cons % VMXNET3_NUM_RX_COMP );
  204. generation = ( ( vmxnet->count.rx_cons & VMXNET3_NUM_RX_COMP ) ?
  205. 0 : cpu_to_le32 ( VMXNET3_RXCF_GEN ) );
  206. rx_comp = &vmxnet->dma->rx_comp[comp_idx];
  207. if ( generation != ( rx_comp->flags &
  208. cpu_to_le32 ( VMXNET3_RXCF_GEN ) ) ) {
  209. break;
  210. }
  211. /* Increment consumer counter */
  212. vmxnet->count.rx_cons++;
  213. /* Locate corresponding receive descriptor */
  214. desc_idx = ( le32_to_cpu ( rx_comp->index ) %
  215. VMXNET3_NUM_RX_DESC );
  216. iobuf = vmxnet->rx_iobuf[desc_idx];
  217. if ( ! iobuf ) {
  218. DBGC ( vmxnet, "VMXNET3 %p completed on empty receive "
  219. "buffer %#x/%#x\n", vmxnet, comp_idx, desc_idx );
  220. netdev_rx_err ( netdev, NULL, -ENOTTY );
  221. continue;
  222. }
  223. /* Remove I/O buffer from receive queue */
  224. vmxnet->rx_iobuf[desc_idx] = NULL;
  225. vmxnet->count.rx_fill--;
  226. /* Deliver packet to network layer */
  227. len = ( le32_to_cpu ( rx_comp->len ) &
  228. ( VMXNET3_MAX_PACKET_LEN - 1 ) );
  229. DBGC2 ( vmxnet, "VMXNET3 %p completed RX %#x/%#x (len %#zx)\n",
  230. vmxnet, comp_idx, desc_idx, len );
  231. iob_put ( iobuf, len );
  232. netdev_rx ( netdev, iobuf );
  233. }
  234. }
  235. /**
  236. * Flush any uncompleted receive buffers
  237. *
  238. * @v netdev Network device
  239. */
  240. static void vmxnet3_flush_rx ( struct net_device *netdev ) {
  241. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  242. struct io_buffer *iobuf;
  243. unsigned int i;
  244. for ( i = 0 ; i < VMXNET3_NUM_RX_DESC ; i++ ) {
  245. if ( ( iobuf = vmxnet->rx_iobuf[i] ) != NULL ) {
  246. netdev_rx_err ( netdev, iobuf, -ECANCELED );
  247. vmxnet->rx_iobuf[i] = NULL;
  248. }
  249. }
  250. }
  251. /**
  252. * Check link state
  253. *
  254. * @v netdev Network device
  255. */
  256. static void vmxnet3_check_link ( struct net_device *netdev ) {
  257. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  258. uint32_t state;
  259. int link_up;
  260. unsigned int link_speed;
  261. /* Get link state */
  262. state = vmxnet3_command ( vmxnet, VMXNET3_CMD_GET_LINK );
  263. link_up = ( state & 1 );
  264. link_speed = ( state >> 16 );
  265. /* Report link state to network device */
  266. if ( link_up ) {
  267. DBGC ( vmxnet, "VMXNET3 %p link is up at %d Mbps\n",
  268. vmxnet, link_speed );
  269. netdev_link_up ( netdev );
  270. } else {
  271. DBGC ( vmxnet, "VMXNET3 %p link is down\n", vmxnet );
  272. netdev_link_down ( netdev );
  273. }
  274. }
  275. /**
  276. * Poll for events
  277. *
  278. * @v netdev Network device
  279. */
  280. static void vmxnet3_poll_events ( struct net_device *netdev ) {
  281. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  282. uint32_t events;
  283. /* Do nothing unless there are events to process */
  284. if ( ! vmxnet->dma->shared.ecr )
  285. return;
  286. events = le32_to_cpu ( vmxnet->dma->shared.ecr );
  287. /* Acknowledge these events */
  288. writel ( events, ( vmxnet->vd + VMXNET3_VD_ECR ) );
  289. /* Check for link state change */
  290. if ( events & VMXNET3_ECR_LINK ) {
  291. vmxnet3_check_link ( netdev );
  292. events &= ~VMXNET3_ECR_LINK;
  293. }
  294. /* Check for queue errors */
  295. if ( events & ( VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR ) ) {
  296. vmxnet3_command ( vmxnet, VMXNET3_CMD_GET_QUEUE_STATUS );
  297. DBGC ( vmxnet, "VMXNET3 %p queue error status (TX %08x, RX "
  298. "%08x)\n", vmxnet,
  299. le32_to_cpu ( vmxnet->dma->queues.tx.status.error ),
  300. le32_to_cpu ( vmxnet->dma->queues.rx.status.error ) );
  301. /* Report errors to allow for visibility via "ifstat" */
  302. if ( events & VMXNET3_ECR_TQERR )
  303. netdev_tx_err ( netdev, NULL, -EPIPE );
  304. if ( events & VMXNET3_ECR_RQERR )
  305. netdev_rx_err ( netdev, NULL, -EPIPE );
  306. events &= ~( VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR );
  307. }
  308. /* Check for unknown events */
  309. if ( events ) {
  310. DBGC ( vmxnet, "VMXNET3 %p unknown events %08x\n",
  311. vmxnet, events );
  312. /* Report error to allow for visibility via "ifstat" */
  313. netdev_rx_err ( netdev, NULL, -ENODEV );
  314. }
  315. }
  316. /**
  317. * Poll network device
  318. *
  319. * @v netdev Network device
  320. */
  321. static void vmxnet3_poll ( struct net_device *netdev ) {
  322. vmxnet3_poll_events ( netdev );
  323. vmxnet3_poll_tx ( netdev );
  324. vmxnet3_poll_rx ( netdev );
  325. vmxnet3_refill_rx ( netdev );
  326. }
  327. /**
  328. * Enable/disable interrupts
  329. *
  330. * @v netdev Network device
  331. * @v enable Interrupts should be enabled
  332. */
  333. static void vmxnet3_irq ( struct net_device *netdev, int enable ) {
  334. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  335. DBGC ( vmxnet, "VMXNET3 %p %s IRQ not implemented\n",
  336. vmxnet, ( enable ? "enable" : "disable" ) );
  337. }
  338. /**
  339. * Set MAC address
  340. *
  341. * @v vmxnet vmxnet3 NIC
  342. * @v ll_addr Link-layer address to set
  343. */
  344. static void vmxnet3_set_ll_addr ( struct vmxnet3_nic *vmxnet,
  345. const void *ll_addr ) {
  346. struct {
  347. uint32_t low;
  348. uint32_t high;
  349. } __attribute__ (( packed )) mac;
  350. memset ( &mac, 0, sizeof ( mac ) );
  351. memcpy ( &mac, ll_addr, ETH_ALEN );
  352. writel ( cpu_to_le32 ( mac.low ), ( vmxnet->vd + VMXNET3_VD_MACL ) );
  353. writel ( cpu_to_le32 ( mac.high ), ( vmxnet->vd + VMXNET3_VD_MACH ) );
  354. }
  355. /**
  356. * Open NIC
  357. *
  358. * @v netdev Network device
  359. * @ret rc Return status code
  360. */
  361. static int vmxnet3_open ( struct net_device *netdev ) {
  362. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  363. struct vmxnet3_shared *shared;
  364. struct vmxnet3_queues *queues;
  365. uint64_t shared_bus;
  366. uint64_t queues_bus;
  367. uint32_t status;
  368. int rc;
  369. /* Allocate DMA areas */
  370. vmxnet->dma = malloc_dma ( sizeof ( *vmxnet->dma ), VMXNET3_DMA_ALIGN );
  371. if ( ! vmxnet->dma ) {
  372. DBGC ( vmxnet, "VMXNET3 %p could not allocate DMA area\n",
  373. vmxnet );
  374. rc = -ENOMEM;
  375. goto err_alloc_dma;
  376. }
  377. memset ( vmxnet->dma, 0, sizeof ( *vmxnet->dma ) );
  378. /* Populate queue descriptors */
  379. queues = &vmxnet->dma->queues;
  380. queues->tx.cfg.desc_address =
  381. cpu_to_le64 ( virt_to_bus ( &vmxnet->dma->tx_desc ) );
  382. queues->tx.cfg.comp_address =
  383. cpu_to_le64 ( virt_to_bus ( &vmxnet->dma->tx_comp ) );
  384. queues->tx.cfg.num_desc = cpu_to_le32 ( VMXNET3_NUM_TX_DESC );
  385. queues->tx.cfg.num_comp = cpu_to_le32 ( VMXNET3_NUM_TX_COMP );
  386. queues->rx.cfg.desc_address[0] =
  387. cpu_to_le64 ( virt_to_bus ( &vmxnet->dma->rx_desc ) );
  388. queues->rx.cfg.comp_address =
  389. cpu_to_le64 ( virt_to_bus ( &vmxnet->dma->rx_comp ) );
  390. queues->rx.cfg.num_desc[0] = cpu_to_le32 ( VMXNET3_NUM_RX_DESC );
  391. queues->rx.cfg.num_comp = cpu_to_le32 ( VMXNET3_NUM_RX_COMP );
  392. queues_bus = virt_to_bus ( queues );
  393. DBGC ( vmxnet, "VMXNET3 %p queue descriptors at %08llx+%zx\n",
  394. vmxnet, queues_bus, sizeof ( *queues ) );
  395. /* Populate shared area */
  396. shared = &vmxnet->dma->shared;
  397. shared->magic = cpu_to_le32 ( VMXNET3_SHARED_MAGIC );
  398. shared->misc.version = cpu_to_le32 ( VMXNET3_VERSION_MAGIC );
  399. shared->misc.version_support = cpu_to_le32 ( VMXNET3_VERSION_SELECT );
  400. shared->misc.upt_version_support =
  401. cpu_to_le32 ( VMXNET3_UPT_VERSION_SELECT );
  402. shared->misc.queue_desc_address = cpu_to_le64 ( queues_bus );
  403. shared->misc.queue_desc_len = cpu_to_le32 ( sizeof ( *queues ) );
  404. shared->misc.mtu = cpu_to_le32 ( VMXNET3_MTU );
  405. shared->misc.num_tx_queues = 1;
  406. shared->misc.num_rx_queues = 1;
  407. shared->interrupt.num_intrs = 1;
  408. shared->interrupt.control = cpu_to_le32 ( VMXNET3_IC_DISABLE_ALL );
  409. shared->rx_filter.mode = cpu_to_le32 ( VMXNET3_RXM_UCAST |
  410. VMXNET3_RXM_BCAST |
  411. VMXNET3_RXM_ALL_MULTI );
  412. shared_bus = virt_to_bus ( shared );
  413. DBGC ( vmxnet, "VMXNET3 %p shared area at %08llx+%zx\n",
  414. vmxnet, shared_bus, sizeof ( *shared ) );
  415. /* Zero counters */
  416. memset ( &vmxnet->count, 0, sizeof ( vmxnet->count ) );
  417. /* Set MAC address */
  418. vmxnet3_set_ll_addr ( vmxnet, &netdev->ll_addr );
  419. /* Pass shared area to device */
  420. writel ( ( shared_bus >> 0 ), ( vmxnet->vd + VMXNET3_VD_DSAL ) );
  421. writel ( ( shared_bus >> 32 ), ( vmxnet->vd + VMXNET3_VD_DSAH ) );
  422. /* Activate device */
  423. if ( ( status = vmxnet3_command ( vmxnet,
  424. VMXNET3_CMD_ACTIVATE_DEV ) ) != 0 ) {
  425. DBGC ( vmxnet, "VMXNET3 %p could not activate (status %#x)\n",
  426. vmxnet, status );
  427. rc = -EIO;
  428. goto err_activate;
  429. }
  430. /* Fill receive ring */
  431. vmxnet3_refill_rx ( netdev );
  432. return 0;
  433. vmxnet3_command ( vmxnet, VMXNET3_CMD_QUIESCE_DEV );
  434. vmxnet3_command ( vmxnet, VMXNET3_CMD_RESET_DEV );
  435. err_activate:
  436. vmxnet3_flush_tx ( netdev );
  437. vmxnet3_flush_rx ( netdev );
  438. free_dma ( vmxnet->dma, sizeof ( *vmxnet->dma ) );
  439. err_alloc_dma:
  440. return rc;
  441. }
  442. /**
  443. * Close NIC
  444. *
  445. * @v netdev Network device
  446. */
  447. static void vmxnet3_close ( struct net_device *netdev ) {
  448. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  449. vmxnet3_command ( vmxnet, VMXNET3_CMD_QUIESCE_DEV );
  450. vmxnet3_command ( vmxnet, VMXNET3_CMD_RESET_DEV );
  451. vmxnet3_flush_tx ( netdev );
  452. vmxnet3_flush_rx ( netdev );
  453. free_dma ( vmxnet->dma, sizeof ( *vmxnet->dma ) );
  454. }
  455. /** vmxnet3 net device operations */
  456. static struct net_device_operations vmxnet3_operations = {
  457. .open = vmxnet3_open,
  458. .close = vmxnet3_close,
  459. .transmit = vmxnet3_transmit,
  460. .poll = vmxnet3_poll,
  461. .irq = vmxnet3_irq,
  462. };
  463. /**
  464. * Check version
  465. *
  466. * @v vmxnet vmxnet3 NIC
  467. * @ret rc Return status code
  468. */
  469. static int vmxnet3_check_version ( struct vmxnet3_nic *vmxnet ) {
  470. uint32_t version;
  471. uint32_t upt_version;
  472. /* Read version */
  473. version = readl ( vmxnet->vd + VMXNET3_VD_VRRS );
  474. upt_version = readl ( vmxnet->vd + VMXNET3_VD_UVRS );
  475. DBGC ( vmxnet, "VMXNET3 %p is version %d (UPT version %d)\n",
  476. vmxnet, version, upt_version );
  477. /* Inform NIC of driver version */
  478. writel ( VMXNET3_VERSION_SELECT, ( vmxnet->vd + VMXNET3_VD_VRRS ) );
  479. writel ( VMXNET3_UPT_VERSION_SELECT, ( vmxnet->vd + VMXNET3_VD_UVRS ) );
  480. return 0;
  481. }
  482. /**
  483. * Get permanent MAC address
  484. *
  485. * @v vmxnet vmxnet3 NIC
  486. * @v hw_addr Hardware address to fill in
  487. */
  488. static void vmxnet3_get_hw_addr ( struct vmxnet3_nic *vmxnet, void *hw_addr ) {
  489. struct {
  490. uint32_t low;
  491. uint32_t high;
  492. } __attribute__ (( packed )) mac;
  493. mac.low = le32_to_cpu ( vmxnet3_command ( vmxnet,
  494. VMXNET3_CMD_GET_PERM_MAC_LO ) );
  495. mac.high = le32_to_cpu ( vmxnet3_command ( vmxnet,
  496. VMXNET3_CMD_GET_PERM_MAC_HI ) );
  497. memcpy ( hw_addr, &mac, ETH_ALEN );
  498. }
  499. /**
  500. * Probe PCI device
  501. *
  502. * @v pci PCI device
  503. * @v id PCI ID
  504. * @ret rc Return status code
  505. */
  506. static int vmxnet3_probe ( struct pci_device *pci ) {
  507. struct net_device *netdev;
  508. struct vmxnet3_nic *vmxnet;
  509. int rc;
  510. /* Allocate network device */
  511. netdev = alloc_etherdev ( sizeof ( *vmxnet ) );
  512. if ( ! netdev ) {
  513. rc = -ENOMEM;
  514. goto err_alloc_etherdev;
  515. }
  516. netdev_init ( netdev, &vmxnet3_operations );
  517. vmxnet = netdev_priv ( netdev );
  518. pci_set_drvdata ( pci, netdev );
  519. netdev->dev = &pci->dev;
  520. memset ( vmxnet, 0, sizeof ( *vmxnet ) );
  521. /* Fix up PCI device */
  522. adjust_pci_device ( pci );
  523. /* Map PCI BARs */
  524. vmxnet->pt = ioremap ( pci_bar_start ( pci, VMXNET3_PT_BAR ),
  525. VMXNET3_PT_LEN );
  526. vmxnet->vd = ioremap ( pci_bar_start ( pci, VMXNET3_VD_BAR ),
  527. VMXNET3_VD_LEN );
  528. /* Version check */
  529. if ( ( rc = vmxnet3_check_version ( vmxnet ) ) != 0 )
  530. goto err_check_version;
  531. /* Reset device */
  532. if ( ( rc = vmxnet3_command ( vmxnet, VMXNET3_CMD_RESET_DEV ) ) != 0 )
  533. goto err_reset;
  534. /* Read initial MAC address */
  535. vmxnet3_get_hw_addr ( vmxnet, &netdev->hw_addr );
  536. /* Register network device */
  537. if ( ( rc = register_netdev ( netdev ) ) != 0 ) {
  538. DBGC ( vmxnet, "VMXNET3 %p could not register net device: "
  539. "%s\n", vmxnet, strerror ( rc ) );
  540. goto err_register_netdev;
  541. }
  542. /* Get initial link state */
  543. vmxnet3_check_link ( netdev );
  544. return 0;
  545. unregister_netdev ( netdev );
  546. err_register_netdev:
  547. err_reset:
  548. err_check_version:
  549. iounmap ( vmxnet->vd );
  550. iounmap ( vmxnet->pt );
  551. netdev_nullify ( netdev );
  552. netdev_put ( netdev );
  553. err_alloc_etherdev:
  554. return rc;
  555. }
  556. /**
  557. * Remove PCI device
  558. *
  559. * @v pci PCI device
  560. */
  561. static void vmxnet3_remove ( struct pci_device *pci ) {
  562. struct net_device *netdev = pci_get_drvdata ( pci );
  563. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  564. unregister_netdev ( netdev );
  565. iounmap ( vmxnet->vd );
  566. iounmap ( vmxnet->pt );
  567. netdev_nullify ( netdev );
  568. netdev_put ( netdev );
  569. }
  570. /** vmxnet3 PCI IDs */
  571. static struct pci_device_id vmxnet3_nics[] = {
  572. PCI_ROM ( 0x15ad, 0x07b0, "vmxnet3", "vmxnet3 virtual NIC", 0 ),
  573. };
  574. /** vmxnet3 PCI driver */
  575. struct pci_driver vmxnet3_driver __pci_driver = {
  576. .ids = vmxnet3_nics,
  577. .id_count = ( sizeof ( vmxnet3_nics ) / sizeof ( vmxnet3_nics[0] ) ),
  578. .probe = vmxnet3_probe,
  579. .remove = vmxnet3_remove,
  580. };