You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

vmxnet3.c 18KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680
  1. /*
  2. * Copyright (C) 2011 Michael Brown <mbrown@fensystems.co.uk>.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License as
  6. * published by the Free Software Foundation; either version 2 of the
  7. * License, or any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  17. * 02110-1301, USA.
  18. */
  19. FILE_LICENCE ( GPL2_OR_LATER );
  20. #include <stdint.h>
  21. #include <errno.h>
  22. #include <assert.h>
  23. #include <byteswap.h>
  24. #include <ipxe/pci.h>
  25. #include <ipxe/io.h>
  26. #include <ipxe/malloc.h>
  27. #include <ipxe/iobuf.h>
  28. #include <ipxe/netdevice.h>
  29. #include <ipxe/if_ether.h>
  30. #include <ipxe/ethernet.h>
  31. #include "vmxnet3.h"
  32. /**
  33. * @file
  34. *
  35. * VMware vmxnet3 virtual NIC driver
  36. *
  37. */
  38. /**
  39. * Issue command
  40. *
  41. * @v vmxnet vmxnet3 NIC
  42. * @v command Command to issue
  43. * @ret result Command result
  44. */
  45. static inline uint32_t vmxnet3_command ( struct vmxnet3_nic *vmxnet,
  46. uint32_t command ) {
  47. /* Issue command */
  48. writel ( command, ( vmxnet->vd + VMXNET3_VD_CMD ) );
  49. return readl ( vmxnet->vd + VMXNET3_VD_CMD );
  50. }
  51. /**
  52. * Transmit packet
  53. *
  54. * @v netdev Network device
  55. * @v iobuf I/O buffer
  56. * @ret rc Return status code
  57. */
  58. static int vmxnet3_transmit ( struct net_device *netdev,
  59. struct io_buffer *iobuf ) {
  60. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  61. struct vmxnet3_tx_desc *tx_desc;
  62. unsigned int desc_idx;
  63. unsigned int generation;
  64. /* Check that we have a free transmit descriptor */
  65. desc_idx = ( vmxnet->count.tx_prod % VMXNET3_NUM_TX_DESC );
  66. generation = ( ( vmxnet->count.tx_prod & VMXNET3_NUM_TX_DESC ) ?
  67. 0 : cpu_to_le32 ( VMXNET3_TXF_GEN ) );
  68. if ( vmxnet->tx_iobuf[desc_idx] ) {
  69. DBGC ( vmxnet, "VMXNET3 %p out of transmit descriptors\n",
  70. vmxnet );
  71. return -ENOBUFS;
  72. }
  73. /* Increment producer counter */
  74. vmxnet->count.tx_prod++;
  75. /* Store I/O buffer for later completion */
  76. vmxnet->tx_iobuf[desc_idx] = iobuf;
  77. /* Populate transmit descriptor */
  78. tx_desc = &vmxnet->dma->tx_desc[desc_idx];
  79. tx_desc->address = cpu_to_le64 ( virt_to_bus ( iobuf->data ) );
  80. tx_desc->flags[0] = ( generation | cpu_to_le32 ( iob_len ( iobuf ) ) );
  81. tx_desc->flags[1] = cpu_to_le32 ( VMXNET3_TXF_CQ | VMXNET3_TXF_EOP );
  82. /* Hand over descriptor to NIC */
  83. wmb();
  84. writel ( ( vmxnet->count.tx_prod % VMXNET3_NUM_TX_DESC ),
  85. ( vmxnet->pt + VMXNET3_PT_TXPROD ) );
  86. return 0;
  87. }
  88. /**
  89. * Poll for completed transmissions
  90. *
  91. * @v netdev Network device
  92. */
  93. static void vmxnet3_poll_tx ( struct net_device *netdev ) {
  94. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  95. struct vmxnet3_tx_comp *tx_comp;
  96. struct io_buffer *iobuf;
  97. unsigned int comp_idx;
  98. unsigned int desc_idx;
  99. unsigned int generation;
  100. while ( 1 ) {
  101. /* Look for completed descriptors */
  102. comp_idx = ( vmxnet->count.tx_cons % VMXNET3_NUM_TX_COMP );
  103. generation = ( ( vmxnet->count.tx_cons & VMXNET3_NUM_TX_COMP ) ?
  104. 0 : cpu_to_le32 ( VMXNET3_TXCF_GEN ) );
  105. tx_comp = &vmxnet->dma->tx_comp[comp_idx];
  106. if ( generation != ( tx_comp->flags &
  107. cpu_to_le32 ( VMXNET3_TXCF_GEN ) ) ) {
  108. break;
  109. }
  110. /* Increment consumer counter */
  111. vmxnet->count.tx_cons++;
  112. /* Locate corresponding transmit descriptor */
  113. desc_idx = ( le32_to_cpu ( tx_comp->index ) %
  114. VMXNET3_NUM_TX_DESC );
  115. iobuf = vmxnet->tx_iobuf[desc_idx];
  116. if ( ! iobuf ) {
  117. DBGC ( vmxnet, "VMXNET3 %p completed on empty transmit "
  118. "buffer %#x/%#x\n", vmxnet, comp_idx, desc_idx );
  119. netdev_tx_err ( netdev, NULL, -ENOTTY );
  120. continue;
  121. }
  122. /* Remove I/O buffer from transmit queue */
  123. vmxnet->tx_iobuf[desc_idx] = NULL;
  124. /* Report transmission completion to network layer */
  125. DBGC2 ( vmxnet, "VMXNET3 %p completed TX %#x/%#x (len %#zx)\n",
  126. vmxnet, comp_idx, desc_idx, iob_len ( iobuf ) );
  127. netdev_tx_complete ( netdev, iobuf );
  128. }
  129. }
  130. /**
  131. * Flush any uncompleted transmit buffers
  132. *
  133. * @v netdev Network device
  134. */
  135. static void vmxnet3_flush_tx ( struct net_device *netdev ) {
  136. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  137. unsigned int i;
  138. for ( i = 0 ; i < VMXNET3_NUM_TX_DESC ; i++ ) {
  139. if ( vmxnet->tx_iobuf[i] ) {
  140. netdev_tx_complete_err ( netdev, vmxnet->tx_iobuf[i],
  141. -ECANCELED );
  142. vmxnet->tx_iobuf[i] = NULL;
  143. }
  144. }
  145. }
  146. /**
  147. * Refill receive ring
  148. *
  149. * @v netdev Network device
  150. */
  151. static void vmxnet3_refill_rx ( struct net_device *netdev ) {
  152. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  153. struct vmxnet3_rx_desc *rx_desc;
  154. struct io_buffer *iobuf;
  155. unsigned int orig_rx_prod = vmxnet->count.rx_prod;
  156. unsigned int desc_idx;
  157. unsigned int generation;
  158. /* Fill receive ring to specified fill level */
  159. while ( vmxnet->count.rx_fill < VMXNET3_RX_FILL ) {
  160. /* Locate receive descriptor */
  161. desc_idx = ( vmxnet->count.rx_prod % VMXNET3_NUM_RX_DESC );
  162. generation = ( ( vmxnet->count.rx_prod & VMXNET3_NUM_RX_DESC ) ?
  163. 0 : cpu_to_le32 ( VMXNET3_RXF_GEN ) );
  164. assert ( vmxnet->rx_iobuf[desc_idx] == NULL );
  165. /* Allocate I/O buffer */
  166. iobuf = alloc_iob ( VMXNET3_MTU + NET_IP_ALIGN );
  167. if ( ! iobuf ) {
  168. /* Non-fatal low memory condition */
  169. break;
  170. }
  171. iob_reserve ( iobuf, NET_IP_ALIGN );
  172. /* Increment producer counter and fill level */
  173. vmxnet->count.rx_prod++;
  174. vmxnet->count.rx_fill++;
  175. /* Store I/O buffer for later completion */
  176. vmxnet->rx_iobuf[desc_idx] = iobuf;
  177. /* Populate receive descriptor */
  178. rx_desc = &vmxnet->dma->rx_desc[desc_idx];
  179. rx_desc->address = cpu_to_le64 ( virt_to_bus ( iobuf->data ) );
  180. rx_desc->flags = ( generation | cpu_to_le32 ( VMXNET3_MTU ) );
  181. }
  182. /* Hand over any new descriptors to NIC */
  183. if ( vmxnet->count.rx_prod != orig_rx_prod ) {
  184. wmb();
  185. writel ( ( vmxnet->count.rx_prod % VMXNET3_NUM_RX_DESC ),
  186. ( vmxnet->pt + VMXNET3_PT_RXPROD ) );
  187. }
  188. }
  189. /**
  190. * Poll for received packets
  191. *
  192. * @v netdev Network device
  193. */
  194. static void vmxnet3_poll_rx ( struct net_device *netdev ) {
  195. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  196. struct vmxnet3_rx_comp *rx_comp;
  197. struct io_buffer *iobuf;
  198. unsigned int comp_idx;
  199. unsigned int desc_idx;
  200. unsigned int generation;
  201. size_t len;
  202. while ( 1 ) {
  203. /* Look for completed descriptors */
  204. comp_idx = ( vmxnet->count.rx_cons % VMXNET3_NUM_RX_COMP );
  205. generation = ( ( vmxnet->count.rx_cons & VMXNET3_NUM_RX_COMP ) ?
  206. 0 : cpu_to_le32 ( VMXNET3_RXCF_GEN ) );
  207. rx_comp = &vmxnet->dma->rx_comp[comp_idx];
  208. if ( generation != ( rx_comp->flags &
  209. cpu_to_le32 ( VMXNET3_RXCF_GEN ) ) ) {
  210. break;
  211. }
  212. /* Increment consumer counter */
  213. vmxnet->count.rx_cons++;
  214. /* Locate corresponding receive descriptor */
  215. desc_idx = ( le32_to_cpu ( rx_comp->index ) %
  216. VMXNET3_NUM_RX_DESC );
  217. iobuf = vmxnet->rx_iobuf[desc_idx];
  218. if ( ! iobuf ) {
  219. DBGC ( vmxnet, "VMXNET3 %p completed on empty receive "
  220. "buffer %#x/%#x\n", vmxnet, comp_idx, desc_idx );
  221. netdev_rx_err ( netdev, NULL, -ENOTTY );
  222. continue;
  223. }
  224. /* Remove I/O buffer from receive queue */
  225. vmxnet->rx_iobuf[desc_idx] = NULL;
  226. vmxnet->count.rx_fill--;
  227. /* Deliver packet to network layer */
  228. len = ( le32_to_cpu ( rx_comp->len ) &
  229. ( VMXNET3_MAX_PACKET_LEN - 1 ) );
  230. DBGC2 ( vmxnet, "VMXNET3 %p completed RX %#x/%#x (len %#zx)\n",
  231. vmxnet, comp_idx, desc_idx, len );
  232. iob_put ( iobuf, len );
  233. netdev_rx ( netdev, iobuf );
  234. }
  235. }
  236. /**
  237. * Flush any uncompleted receive buffers
  238. *
  239. * @v netdev Network device
  240. */
  241. static void vmxnet3_flush_rx ( struct net_device *netdev ) {
  242. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  243. struct io_buffer *iobuf;
  244. unsigned int i;
  245. for ( i = 0 ; i < VMXNET3_NUM_RX_DESC ; i++ ) {
  246. if ( ( iobuf = vmxnet->rx_iobuf[i] ) != NULL ) {
  247. netdev_rx_err ( netdev, iobuf, -ECANCELED );
  248. vmxnet->rx_iobuf[i] = NULL;
  249. }
  250. }
  251. }
  252. /**
  253. * Check link state
  254. *
  255. * @v netdev Network device
  256. */
  257. static void vmxnet3_check_link ( struct net_device *netdev ) {
  258. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  259. uint32_t state;
  260. int link_up;
  261. unsigned int link_speed;
  262. /* Get link state */
  263. state = vmxnet3_command ( vmxnet, VMXNET3_CMD_GET_LINK );
  264. link_up = ( state & 1 );
  265. link_speed = ( state >> 16 );
  266. /* Report link state to network device */
  267. if ( link_up ) {
  268. DBGC ( vmxnet, "VMXNET3 %p link is up at %d Mbps\n",
  269. vmxnet, link_speed );
  270. netdev_link_up ( netdev );
  271. } else {
  272. DBGC ( vmxnet, "VMXNET3 %p link is down\n", vmxnet );
  273. netdev_link_down ( netdev );
  274. }
  275. }
  276. /**
  277. * Poll for events
  278. *
  279. * @v netdev Network device
  280. */
  281. static void vmxnet3_poll_events ( struct net_device *netdev ) {
  282. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  283. uint32_t events;
  284. /* Do nothing unless there are events to process */
  285. if ( ! vmxnet->dma->shared.ecr )
  286. return;
  287. events = le32_to_cpu ( vmxnet->dma->shared.ecr );
  288. /* Acknowledge these events */
  289. writel ( events, ( vmxnet->vd + VMXNET3_VD_ECR ) );
  290. /* Check for link state change */
  291. if ( events & VMXNET3_ECR_LINK ) {
  292. vmxnet3_check_link ( netdev );
  293. events &= ~VMXNET3_ECR_LINK;
  294. }
  295. /* Check for queue errors */
  296. if ( events & ( VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR ) ) {
  297. vmxnet3_command ( vmxnet, VMXNET3_CMD_GET_QUEUE_STATUS );
  298. DBGC ( vmxnet, "VMXNET3 %p queue error status (TX %08x, RX "
  299. "%08x)\n", vmxnet,
  300. le32_to_cpu ( vmxnet->dma->queues.tx.status.error ),
  301. le32_to_cpu ( vmxnet->dma->queues.rx.status.error ) );
  302. /* Report errors to allow for visibility via "ifstat" */
  303. if ( events & VMXNET3_ECR_TQERR )
  304. netdev_tx_err ( netdev, NULL, -EPIPE );
  305. if ( events & VMXNET3_ECR_RQERR )
  306. netdev_rx_err ( netdev, NULL, -EPIPE );
  307. events &= ~( VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR );
  308. }
  309. /* Check for unknown events */
  310. if ( events ) {
  311. DBGC ( vmxnet, "VMXNET3 %p unknown events %08x\n",
  312. vmxnet, events );
  313. /* Report error to allow for visibility via "ifstat" */
  314. netdev_rx_err ( netdev, NULL, -ENODEV );
  315. }
  316. }
  317. /**
  318. * Poll network device
  319. *
  320. * @v netdev Network device
  321. */
  322. static void vmxnet3_poll ( struct net_device *netdev ) {
  323. vmxnet3_poll_events ( netdev );
  324. vmxnet3_poll_tx ( netdev );
  325. vmxnet3_poll_rx ( netdev );
  326. vmxnet3_refill_rx ( netdev );
  327. }
  328. /**
  329. * Enable/disable interrupts
  330. *
  331. * @v netdev Network device
  332. * @v enable Interrupts should be enabled
  333. */
  334. static void vmxnet3_irq ( struct net_device *netdev, int enable ) {
  335. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  336. DBGC ( vmxnet, "VMXNET3 %p %s IRQ not implemented\n",
  337. vmxnet, ( enable ? "enable" : "disable" ) );
  338. }
  339. /**
  340. * Set MAC address
  341. *
  342. * @v vmxnet vmxnet3 NIC
  343. * @v ll_addr Link-layer address to set
  344. */
  345. static void vmxnet3_set_ll_addr ( struct vmxnet3_nic *vmxnet,
  346. const void *ll_addr ) {
  347. struct {
  348. uint32_t low;
  349. uint32_t high;
  350. } __attribute__ (( packed )) mac;
  351. memset ( &mac, 0, sizeof ( mac ) );
  352. memcpy ( &mac, ll_addr, ETH_ALEN );
  353. writel ( cpu_to_le32 ( mac.low ), ( vmxnet->vd + VMXNET3_VD_MACL ) );
  354. writel ( cpu_to_le32 ( mac.high ), ( vmxnet->vd + VMXNET3_VD_MACH ) );
  355. }
  356. /**
  357. * Open NIC
  358. *
  359. * @v netdev Network device
  360. * @ret rc Return status code
  361. */
  362. static int vmxnet3_open ( struct net_device *netdev ) {
  363. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  364. struct vmxnet3_shared *shared;
  365. struct vmxnet3_queues *queues;
  366. uint64_t shared_bus;
  367. uint64_t queues_bus;
  368. uint32_t status;
  369. int rc;
  370. /* Allocate DMA areas */
  371. vmxnet->dma = malloc_dma ( sizeof ( *vmxnet->dma ), VMXNET3_DMA_ALIGN );
  372. if ( ! vmxnet->dma ) {
  373. DBGC ( vmxnet, "VMXNET3 %p could not allocate DMA area\n",
  374. vmxnet );
  375. rc = -ENOMEM;
  376. goto err_alloc_dma;
  377. }
  378. memset ( vmxnet->dma, 0, sizeof ( *vmxnet->dma ) );
  379. /* Populate queue descriptors */
  380. queues = &vmxnet->dma->queues;
  381. queues->tx.cfg.desc_address =
  382. cpu_to_le64 ( virt_to_bus ( &vmxnet->dma->tx_desc ) );
  383. queues->tx.cfg.comp_address =
  384. cpu_to_le64 ( virt_to_bus ( &vmxnet->dma->tx_comp ) );
  385. queues->tx.cfg.num_desc = cpu_to_le32 ( VMXNET3_NUM_TX_DESC );
  386. queues->tx.cfg.num_comp = cpu_to_le32 ( VMXNET3_NUM_TX_COMP );
  387. queues->rx.cfg.desc_address[0] =
  388. cpu_to_le64 ( virt_to_bus ( &vmxnet->dma->rx_desc ) );
  389. queues->rx.cfg.comp_address =
  390. cpu_to_le64 ( virt_to_bus ( &vmxnet->dma->rx_comp ) );
  391. queues->rx.cfg.num_desc[0] = cpu_to_le32 ( VMXNET3_NUM_RX_DESC );
  392. queues->rx.cfg.num_comp = cpu_to_le32 ( VMXNET3_NUM_RX_COMP );
  393. queues_bus = virt_to_bus ( queues );
  394. DBGC ( vmxnet, "VMXNET3 %p queue descriptors at %08llx+%zx\n",
  395. vmxnet, queues_bus, sizeof ( *queues ) );
  396. /* Populate shared area */
  397. shared = &vmxnet->dma->shared;
  398. shared->magic = cpu_to_le32 ( VMXNET3_SHARED_MAGIC );
  399. shared->misc.version = cpu_to_le32 ( VMXNET3_VERSION_MAGIC );
  400. shared->misc.version_support = cpu_to_le32 ( VMXNET3_VERSION_SELECT );
  401. shared->misc.upt_version_support =
  402. cpu_to_le32 ( VMXNET3_UPT_VERSION_SELECT );
  403. shared->misc.queue_desc_address = cpu_to_le64 ( queues_bus );
  404. shared->misc.queue_desc_len = cpu_to_le32 ( sizeof ( *queues ) );
  405. shared->misc.mtu = cpu_to_le32 ( VMXNET3_MTU );
  406. shared->misc.num_tx_queues = 1;
  407. shared->misc.num_rx_queues = 1;
  408. shared->interrupt.num_intrs = 1;
  409. shared->interrupt.control = cpu_to_le32 ( VMXNET3_IC_DISABLE_ALL );
  410. shared->rx_filter.mode = cpu_to_le32 ( VMXNET3_RXM_UCAST |
  411. VMXNET3_RXM_BCAST |
  412. VMXNET3_RXM_ALL_MULTI );
  413. shared_bus = virt_to_bus ( shared );
  414. DBGC ( vmxnet, "VMXNET3 %p shared area at %08llx+%zx\n",
  415. vmxnet, shared_bus, sizeof ( *shared ) );
  416. /* Zero counters */
  417. memset ( &vmxnet->count, 0, sizeof ( vmxnet->count ) );
  418. /* Set MAC address */
  419. vmxnet3_set_ll_addr ( vmxnet, &netdev->ll_addr );
  420. /* Pass shared area to device */
  421. writel ( ( shared_bus >> 0 ), ( vmxnet->vd + VMXNET3_VD_DSAL ) );
  422. writel ( ( shared_bus >> 32 ), ( vmxnet->vd + VMXNET3_VD_DSAH ) );
  423. /* Activate device */
  424. if ( ( status = vmxnet3_command ( vmxnet,
  425. VMXNET3_CMD_ACTIVATE_DEV ) ) != 0 ) {
  426. DBGC ( vmxnet, "VMXNET3 %p could not activate (status %#x)\n",
  427. vmxnet, status );
  428. rc = -EIO;
  429. goto err_activate;
  430. }
  431. /* Fill receive ring */
  432. vmxnet3_refill_rx ( netdev );
  433. return 0;
  434. vmxnet3_command ( vmxnet, VMXNET3_CMD_QUIESCE_DEV );
  435. vmxnet3_command ( vmxnet, VMXNET3_CMD_RESET_DEV );
  436. err_activate:
  437. vmxnet3_flush_tx ( netdev );
  438. vmxnet3_flush_rx ( netdev );
  439. free_dma ( vmxnet->dma, sizeof ( *vmxnet->dma ) );
  440. err_alloc_dma:
  441. return rc;
  442. }
  443. /**
  444. * Close NIC
  445. *
  446. * @v netdev Network device
  447. */
  448. static void vmxnet3_close ( struct net_device *netdev ) {
  449. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  450. vmxnet3_command ( vmxnet, VMXNET3_CMD_QUIESCE_DEV );
  451. vmxnet3_command ( vmxnet, VMXNET3_CMD_RESET_DEV );
  452. vmxnet3_flush_tx ( netdev );
  453. vmxnet3_flush_rx ( netdev );
  454. free_dma ( vmxnet->dma, sizeof ( *vmxnet->dma ) );
  455. }
  456. /** vmxnet3 net device operations */
  457. static struct net_device_operations vmxnet3_operations = {
  458. .open = vmxnet3_open,
  459. .close = vmxnet3_close,
  460. .transmit = vmxnet3_transmit,
  461. .poll = vmxnet3_poll,
  462. .irq = vmxnet3_irq,
  463. };
  464. /**
  465. * Check version
  466. *
  467. * @v vmxnet vmxnet3 NIC
  468. * @ret rc Return status code
  469. */
  470. static int vmxnet3_check_version ( struct vmxnet3_nic *vmxnet ) {
  471. uint32_t version;
  472. uint32_t upt_version;
  473. /* Read version */
  474. version = readl ( vmxnet->vd + VMXNET3_VD_VRRS );
  475. upt_version = readl ( vmxnet->vd + VMXNET3_VD_UVRS );
  476. DBGC ( vmxnet, "VMXNET3 %p is version %d (UPT version %d)\n",
  477. vmxnet, version, upt_version );
  478. /* Inform NIC of driver version */
  479. writel ( VMXNET3_VERSION_SELECT, ( vmxnet->vd + VMXNET3_VD_VRRS ) );
  480. writel ( VMXNET3_UPT_VERSION_SELECT, ( vmxnet->vd + VMXNET3_VD_UVRS ) );
  481. return 0;
  482. }
  483. /**
  484. * Get permanent MAC address
  485. *
  486. * @v vmxnet vmxnet3 NIC
  487. * @v hw_addr Hardware address to fill in
  488. */
  489. static void vmxnet3_get_hw_addr ( struct vmxnet3_nic *vmxnet, void *hw_addr ) {
  490. struct {
  491. uint32_t low;
  492. uint32_t high;
  493. } __attribute__ (( packed )) mac;
  494. mac.low = le32_to_cpu ( vmxnet3_command ( vmxnet,
  495. VMXNET3_CMD_GET_PERM_MAC_LO ) );
  496. mac.high = le32_to_cpu ( vmxnet3_command ( vmxnet,
  497. VMXNET3_CMD_GET_PERM_MAC_HI ) );
  498. memcpy ( hw_addr, &mac, ETH_ALEN );
  499. }
  500. /**
  501. * Probe PCI device
  502. *
  503. * @v pci PCI device
  504. * @v id PCI ID
  505. * @ret rc Return status code
  506. */
  507. static int vmxnet3_probe ( struct pci_device *pci ) {
  508. struct net_device *netdev;
  509. struct vmxnet3_nic *vmxnet;
  510. int rc;
  511. /* Allocate network device */
  512. netdev = alloc_etherdev ( sizeof ( *vmxnet ) );
  513. if ( ! netdev ) {
  514. rc = -ENOMEM;
  515. goto err_alloc_etherdev;
  516. }
  517. netdev_init ( netdev, &vmxnet3_operations );
  518. vmxnet = netdev_priv ( netdev );
  519. pci_set_drvdata ( pci, netdev );
  520. netdev->dev = &pci->dev;
  521. memset ( vmxnet, 0, sizeof ( *vmxnet ) );
  522. /* Fix up PCI device */
  523. adjust_pci_device ( pci );
  524. /* Map PCI BARs */
  525. vmxnet->pt = ioremap ( pci_bar_start ( pci, VMXNET3_PT_BAR ),
  526. VMXNET3_PT_LEN );
  527. if ( ! vmxnet->pt ) {
  528. rc = -ENODEV;
  529. goto err_ioremap_pt;
  530. }
  531. vmxnet->vd = ioremap ( pci_bar_start ( pci, VMXNET3_VD_BAR ),
  532. VMXNET3_VD_LEN );
  533. if ( ! vmxnet->vd ) {
  534. rc = -ENODEV;
  535. goto err_ioremap_vd;
  536. }
  537. /* Version check */
  538. if ( ( rc = vmxnet3_check_version ( vmxnet ) ) != 0 )
  539. goto err_check_version;
  540. /* Reset device */
  541. if ( ( rc = vmxnet3_command ( vmxnet, VMXNET3_CMD_RESET_DEV ) ) != 0 )
  542. goto err_reset;
  543. /* Read initial MAC address */
  544. vmxnet3_get_hw_addr ( vmxnet, &netdev->hw_addr );
  545. /* Register network device */
  546. if ( ( rc = register_netdev ( netdev ) ) != 0 ) {
  547. DBGC ( vmxnet, "VMXNET3 %p could not register net device: "
  548. "%s\n", vmxnet, strerror ( rc ) );
  549. goto err_register_netdev;
  550. }
  551. /* Get initial link state */
  552. vmxnet3_check_link ( netdev );
  553. return 0;
  554. unregister_netdev ( netdev );
  555. err_register_netdev:
  556. err_reset:
  557. err_check_version:
  558. iounmap ( vmxnet->vd );
  559. err_ioremap_vd:
  560. iounmap ( vmxnet->pt );
  561. err_ioremap_pt:
  562. netdev_nullify ( netdev );
  563. netdev_put ( netdev );
  564. err_alloc_etherdev:
  565. return rc;
  566. }
  567. /**
  568. * Remove PCI device
  569. *
  570. * @v pci PCI device
  571. */
  572. static void vmxnet3_remove ( struct pci_device *pci ) {
  573. struct net_device *netdev = pci_get_drvdata ( pci );
  574. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  575. unregister_netdev ( netdev );
  576. iounmap ( vmxnet->vd );
  577. iounmap ( vmxnet->pt );
  578. netdev_nullify ( netdev );
  579. netdev_put ( netdev );
  580. }
  581. /** vmxnet3 PCI IDs */
  582. static struct pci_device_id vmxnet3_nics[] = {
  583. PCI_ROM ( 0x15ad, 0x07b0, "vmxnet3", "vmxnet3 virtual NIC", 0 ),
  584. };
  585. /** vmxnet3 PCI driver */
  586. struct pci_driver vmxnet3_driver __pci_driver = {
  587. .ids = vmxnet3_nics,
  588. .id_count = ( sizeof ( vmxnet3_nics ) / sizeof ( vmxnet3_nics[0] ) ),
  589. .probe = vmxnet3_probe,
  590. .remove = vmxnet3_remove,
  591. };