You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

vmxnet3.c 20KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721
  1. /*
  2. * Copyright (C) 2011 Michael Brown <mbrown@fensystems.co.uk>.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License as
  6. * published by the Free Software Foundation; either version 2 of the
  7. * License, or any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  17. * 02110-1301, USA.
  18. *
  19. * You can also choose to distribute this program under the terms of
  20. * the Unmodified Binary Distribution Licence (as given in the file
  21. * COPYING.UBDL), provided that you have satisfied its requirements.
  22. */
  23. FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
  24. #include <stdint.h>
  25. #include <errno.h>
  26. #include <assert.h>
  27. #include <byteswap.h>
  28. #include <ipxe/pci.h>
  29. #include <ipxe/io.h>
  30. #include <ipxe/malloc.h>
  31. #include <ipxe/profile.h>
  32. #include <ipxe/iobuf.h>
  33. #include <ipxe/netdevice.h>
  34. #include <ipxe/if_ether.h>
  35. #include <ipxe/ethernet.h>
  36. #include "vmxnet3.h"
  37. /**
  38. * @file
  39. *
  40. * VMware vmxnet3 virtual NIC driver
  41. *
  42. */
  43. /** VM command profiler */
  44. static struct profiler vmxnet3_vm_command_profiler __profiler =
  45. { .name = "vmxnet3.vm_command" };
  46. /** VM transmit profiler */
  47. static struct profiler vmxnet3_vm_tx_profiler __profiler =
  48. { .name = "vmxnet3.vm_tx" };
  49. /** VM receive refill profiler */
  50. static struct profiler vmxnet3_vm_refill_profiler __profiler =
  51. { .name = "vmxnet3.vm_refill" };
  52. /** VM event profiler */
  53. static struct profiler vmxnet3_vm_event_profiler __profiler =
  54. { .name = "vmxnet3.vm_event" };
  55. /**
  56. * Issue command
  57. *
  58. * @v vmxnet vmxnet3 NIC
  59. * @v command Command to issue
  60. * @ret result Command result
  61. */
  62. static inline uint32_t vmxnet3_command ( struct vmxnet3_nic *vmxnet,
  63. uint32_t command ) {
  64. uint32_t result;
  65. /* Issue command */
  66. profile_start ( &vmxnet3_vm_command_profiler );
  67. writel ( command, ( vmxnet->vd + VMXNET3_VD_CMD ) );
  68. result = readl ( vmxnet->vd + VMXNET3_VD_CMD );
  69. profile_stop ( &vmxnet3_vm_command_profiler );
  70. profile_exclude ( &vmxnet3_vm_command_profiler );
  71. return result;
  72. }
  73. /**
  74. * Transmit packet
  75. *
  76. * @v netdev Network device
  77. * @v iobuf I/O buffer
  78. * @ret rc Return status code
  79. */
  80. static int vmxnet3_transmit ( struct net_device *netdev,
  81. struct io_buffer *iobuf ) {
  82. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  83. struct vmxnet3_tx_desc *tx_desc;
  84. unsigned int fill;
  85. unsigned int desc_idx;
  86. unsigned int generation;
  87. /* Check that we have a free transmit descriptor */
  88. fill = ( vmxnet->count.tx_prod - vmxnet->count.tx_cons );
  89. if ( fill >= VMXNET3_TX_FILL ) {
  90. DBGC ( vmxnet, "VMXNET3 %p out of transmit descriptors\n",
  91. vmxnet );
  92. return -ENOBUFS;
  93. }
  94. /* Locate transmit descriptor */
  95. desc_idx = ( vmxnet->count.tx_prod % VMXNET3_NUM_TX_DESC );
  96. generation = ( ( vmxnet->count.tx_prod & VMXNET3_NUM_TX_DESC ) ?
  97. 0 : cpu_to_le32 ( VMXNET3_TXF_GEN ) );
  98. assert ( vmxnet->tx_iobuf[desc_idx] == NULL );
  99. /* Increment producer counter */
  100. vmxnet->count.tx_prod++;
  101. /* Store I/O buffer for later completion */
  102. vmxnet->tx_iobuf[desc_idx] = iobuf;
  103. /* Populate transmit descriptor */
  104. tx_desc = &vmxnet->dma->tx_desc[desc_idx];
  105. tx_desc->address = cpu_to_le64 ( virt_to_bus ( iobuf->data ) );
  106. tx_desc->flags[0] = ( generation | cpu_to_le32 ( iob_len ( iobuf ) ) );
  107. tx_desc->flags[1] = cpu_to_le32 ( VMXNET3_TXF_CQ | VMXNET3_TXF_EOP );
  108. /* Hand over descriptor to NIC */
  109. wmb();
  110. profile_start ( &vmxnet3_vm_tx_profiler );
  111. writel ( ( vmxnet->count.tx_prod % VMXNET3_NUM_TX_DESC ),
  112. ( vmxnet->pt + VMXNET3_PT_TXPROD ) );
  113. profile_stop ( &vmxnet3_vm_tx_profiler );
  114. profile_exclude ( &vmxnet3_vm_tx_profiler );
  115. return 0;
  116. }
  117. /**
  118. * Poll for completed transmissions
  119. *
  120. * @v netdev Network device
  121. */
  122. static void vmxnet3_poll_tx ( struct net_device *netdev ) {
  123. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  124. struct vmxnet3_tx_comp *tx_comp;
  125. struct io_buffer *iobuf;
  126. unsigned int comp_idx;
  127. unsigned int desc_idx;
  128. unsigned int generation;
  129. while ( 1 ) {
  130. /* Look for completed descriptors */
  131. comp_idx = ( vmxnet->count.tx_cons % VMXNET3_NUM_TX_COMP );
  132. generation = ( ( vmxnet->count.tx_cons & VMXNET3_NUM_TX_COMP ) ?
  133. 0 : cpu_to_le32 ( VMXNET3_TXCF_GEN ) );
  134. tx_comp = &vmxnet->dma->tx_comp[comp_idx];
  135. if ( generation != ( tx_comp->flags &
  136. cpu_to_le32 ( VMXNET3_TXCF_GEN ) ) ) {
  137. break;
  138. }
  139. /* Increment consumer counter */
  140. vmxnet->count.tx_cons++;
  141. /* Locate corresponding transmit descriptor */
  142. desc_idx = ( le32_to_cpu ( tx_comp->index ) %
  143. VMXNET3_NUM_TX_DESC );
  144. iobuf = vmxnet->tx_iobuf[desc_idx];
  145. if ( ! iobuf ) {
  146. DBGC ( vmxnet, "VMXNET3 %p completed on empty transmit "
  147. "buffer %#x/%#x\n", vmxnet, comp_idx, desc_idx );
  148. netdev_tx_err ( netdev, NULL, -ENOTTY );
  149. continue;
  150. }
  151. /* Remove I/O buffer from transmit queue */
  152. vmxnet->tx_iobuf[desc_idx] = NULL;
  153. /* Report transmission completion to network layer */
  154. DBGC2 ( vmxnet, "VMXNET3 %p completed TX %#x/%#x (len %#zx)\n",
  155. vmxnet, comp_idx, desc_idx, iob_len ( iobuf ) );
  156. netdev_tx_complete ( netdev, iobuf );
  157. }
  158. }
  159. /**
  160. * Flush any uncompleted transmit buffers
  161. *
  162. * @v netdev Network device
  163. */
  164. static void vmxnet3_flush_tx ( struct net_device *netdev ) {
  165. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  166. unsigned int i;
  167. for ( i = 0 ; i < VMXNET3_NUM_TX_DESC ; i++ ) {
  168. if ( vmxnet->tx_iobuf[i] ) {
  169. netdev_tx_complete_err ( netdev, vmxnet->tx_iobuf[i],
  170. -ECANCELED );
  171. vmxnet->tx_iobuf[i] = NULL;
  172. }
  173. }
  174. }
  175. /**
  176. * Refill receive ring
  177. *
  178. * @v netdev Network device
  179. */
  180. static void vmxnet3_refill_rx ( struct net_device *netdev ) {
  181. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  182. struct vmxnet3_rx_desc *rx_desc;
  183. struct io_buffer *iobuf;
  184. unsigned int orig_rx_prod = vmxnet->count.rx_prod;
  185. unsigned int desc_idx;
  186. unsigned int generation;
  187. /* Fill receive ring to specified fill level */
  188. while ( vmxnet->count.rx_fill < VMXNET3_RX_FILL ) {
  189. /* Locate receive descriptor */
  190. desc_idx = ( vmxnet->count.rx_prod % VMXNET3_NUM_RX_DESC );
  191. generation = ( ( vmxnet->count.rx_prod & VMXNET3_NUM_RX_DESC ) ?
  192. 0 : cpu_to_le32 ( VMXNET3_RXF_GEN ) );
  193. assert ( vmxnet->rx_iobuf[desc_idx] == NULL );
  194. /* Allocate I/O buffer */
  195. iobuf = alloc_iob ( VMXNET3_MTU + NET_IP_ALIGN );
  196. if ( ! iobuf ) {
  197. /* Non-fatal low memory condition */
  198. break;
  199. }
  200. iob_reserve ( iobuf, NET_IP_ALIGN );
  201. /* Increment producer counter and fill level */
  202. vmxnet->count.rx_prod++;
  203. vmxnet->count.rx_fill++;
  204. /* Store I/O buffer for later completion */
  205. vmxnet->rx_iobuf[desc_idx] = iobuf;
  206. /* Populate receive descriptor */
  207. rx_desc = &vmxnet->dma->rx_desc[desc_idx];
  208. rx_desc->address = cpu_to_le64 ( virt_to_bus ( iobuf->data ) );
  209. rx_desc->flags = ( generation | cpu_to_le32 ( VMXNET3_MTU ) );
  210. }
  211. /* Hand over any new descriptors to NIC */
  212. if ( vmxnet->count.rx_prod != orig_rx_prod ) {
  213. wmb();
  214. profile_start ( &vmxnet3_vm_refill_profiler );
  215. writel ( ( vmxnet->count.rx_prod % VMXNET3_NUM_RX_DESC ),
  216. ( vmxnet->pt + VMXNET3_PT_RXPROD ) );
  217. profile_stop ( &vmxnet3_vm_refill_profiler );
  218. profile_exclude ( &vmxnet3_vm_refill_profiler );
  219. }
  220. }
  221. /**
  222. * Poll for received packets
  223. *
  224. * @v netdev Network device
  225. */
  226. static void vmxnet3_poll_rx ( struct net_device *netdev ) {
  227. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  228. struct vmxnet3_rx_comp *rx_comp;
  229. struct io_buffer *iobuf;
  230. unsigned int comp_idx;
  231. unsigned int desc_idx;
  232. unsigned int generation;
  233. size_t len;
  234. while ( 1 ) {
  235. /* Look for completed descriptors */
  236. comp_idx = ( vmxnet->count.rx_cons % VMXNET3_NUM_RX_COMP );
  237. generation = ( ( vmxnet->count.rx_cons & VMXNET3_NUM_RX_COMP ) ?
  238. 0 : cpu_to_le32 ( VMXNET3_RXCF_GEN ) );
  239. rx_comp = &vmxnet->dma->rx_comp[comp_idx];
  240. if ( generation != ( rx_comp->flags &
  241. cpu_to_le32 ( VMXNET3_RXCF_GEN ) ) ) {
  242. break;
  243. }
  244. /* Increment consumer counter */
  245. vmxnet->count.rx_cons++;
  246. /* Locate corresponding receive descriptor */
  247. desc_idx = ( le32_to_cpu ( rx_comp->index ) %
  248. VMXNET3_NUM_RX_DESC );
  249. iobuf = vmxnet->rx_iobuf[desc_idx];
  250. if ( ! iobuf ) {
  251. DBGC ( vmxnet, "VMXNET3 %p completed on empty receive "
  252. "buffer %#x/%#x\n", vmxnet, comp_idx, desc_idx );
  253. netdev_rx_err ( netdev, NULL, -ENOTTY );
  254. continue;
  255. }
  256. /* Remove I/O buffer from receive queue */
  257. vmxnet->rx_iobuf[desc_idx] = NULL;
  258. vmxnet->count.rx_fill--;
  259. /* Deliver packet to network layer */
  260. len = ( le32_to_cpu ( rx_comp->len ) &
  261. ( VMXNET3_MAX_PACKET_LEN - 1 ) );
  262. DBGC2 ( vmxnet, "VMXNET3 %p completed RX %#x/%#x (len %#zx)\n",
  263. vmxnet, comp_idx, desc_idx, len );
  264. iob_put ( iobuf, len );
  265. netdev_rx ( netdev, iobuf );
  266. }
  267. }
  268. /**
  269. * Flush any uncompleted receive buffers
  270. *
  271. * @v netdev Network device
  272. */
  273. static void vmxnet3_flush_rx ( struct net_device *netdev ) {
  274. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  275. struct io_buffer *iobuf;
  276. unsigned int i;
  277. for ( i = 0 ; i < VMXNET3_NUM_RX_DESC ; i++ ) {
  278. if ( ( iobuf = vmxnet->rx_iobuf[i] ) != NULL ) {
  279. netdev_rx_err ( netdev, iobuf, -ECANCELED );
  280. vmxnet->rx_iobuf[i] = NULL;
  281. }
  282. }
  283. }
  284. /**
  285. * Check link state
  286. *
  287. * @v netdev Network device
  288. */
  289. static void vmxnet3_check_link ( struct net_device *netdev ) {
  290. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  291. uint32_t state;
  292. int link_up;
  293. unsigned int link_speed;
  294. /* Get link state */
  295. state = vmxnet3_command ( vmxnet, VMXNET3_CMD_GET_LINK );
  296. link_up = ( state & 1 );
  297. link_speed = ( state >> 16 );
  298. /* Report link state to network device */
  299. if ( link_up ) {
  300. DBGC ( vmxnet, "VMXNET3 %p link is up at %d Mbps\n",
  301. vmxnet, link_speed );
  302. netdev_link_up ( netdev );
  303. } else {
  304. DBGC ( vmxnet, "VMXNET3 %p link is down\n", vmxnet );
  305. netdev_link_down ( netdev );
  306. }
  307. }
  308. /**
  309. * Poll for events
  310. *
  311. * @v netdev Network device
  312. */
  313. static void vmxnet3_poll_events ( struct net_device *netdev ) {
  314. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  315. uint32_t events;
  316. /* Do nothing unless there are events to process */
  317. if ( ! vmxnet->dma->shared.ecr )
  318. return;
  319. events = le32_to_cpu ( vmxnet->dma->shared.ecr );
  320. /* Acknowledge these events */
  321. profile_start ( &vmxnet3_vm_event_profiler );
  322. writel ( events, ( vmxnet->vd + VMXNET3_VD_ECR ) );
  323. profile_stop ( &vmxnet3_vm_event_profiler );
  324. profile_exclude ( &vmxnet3_vm_event_profiler );
  325. /* Check for link state change */
  326. if ( events & VMXNET3_ECR_LINK ) {
  327. vmxnet3_check_link ( netdev );
  328. events &= ~VMXNET3_ECR_LINK;
  329. }
  330. /* Check for queue errors */
  331. if ( events & ( VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR ) ) {
  332. vmxnet3_command ( vmxnet, VMXNET3_CMD_GET_QUEUE_STATUS );
  333. DBGC ( vmxnet, "VMXNET3 %p queue error status (TX %08x, RX "
  334. "%08x)\n", vmxnet,
  335. le32_to_cpu ( vmxnet->dma->queues.tx.status.error ),
  336. le32_to_cpu ( vmxnet->dma->queues.rx.status.error ) );
  337. /* Report errors to allow for visibility via "ifstat" */
  338. if ( events & VMXNET3_ECR_TQERR )
  339. netdev_tx_err ( netdev, NULL, -EPIPE );
  340. if ( events & VMXNET3_ECR_RQERR )
  341. netdev_rx_err ( netdev, NULL, -EPIPE );
  342. events &= ~( VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR );
  343. }
  344. /* Check for unknown events */
  345. if ( events ) {
  346. DBGC ( vmxnet, "VMXNET3 %p unknown events %08x\n",
  347. vmxnet, events );
  348. /* Report error to allow for visibility via "ifstat" */
  349. netdev_rx_err ( netdev, NULL, -ENODEV );
  350. }
  351. }
  352. /**
  353. * Poll network device
  354. *
  355. * @v netdev Network device
  356. */
  357. static void vmxnet3_poll ( struct net_device *netdev ) {
  358. vmxnet3_poll_events ( netdev );
  359. vmxnet3_poll_tx ( netdev );
  360. vmxnet3_poll_rx ( netdev );
  361. vmxnet3_refill_rx ( netdev );
  362. }
  363. /**
  364. * Enable/disable interrupts
  365. *
  366. * @v netdev Network device
  367. * @v enable Interrupts should be enabled
  368. */
  369. static void vmxnet3_irq ( struct net_device *netdev, int enable ) {
  370. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  371. DBGC ( vmxnet, "VMXNET3 %p %s IRQ not implemented\n",
  372. vmxnet, ( enable ? "enable" : "disable" ) );
  373. }
  374. /**
  375. * Set MAC address
  376. *
  377. * @v vmxnet vmxnet3 NIC
  378. * @v ll_addr Link-layer address to set
  379. */
  380. static void vmxnet3_set_ll_addr ( struct vmxnet3_nic *vmxnet,
  381. const void *ll_addr ) {
  382. struct {
  383. uint32_t low;
  384. uint32_t high;
  385. } __attribute__ (( packed )) mac;
  386. memset ( &mac, 0, sizeof ( mac ) );
  387. memcpy ( &mac, ll_addr, ETH_ALEN );
  388. writel ( cpu_to_le32 ( mac.low ), ( vmxnet->vd + VMXNET3_VD_MACL ) );
  389. writel ( cpu_to_le32 ( mac.high ), ( vmxnet->vd + VMXNET3_VD_MACH ) );
  390. }
  391. /**
  392. * Open NIC
  393. *
  394. * @v netdev Network device
  395. * @ret rc Return status code
  396. */
  397. static int vmxnet3_open ( struct net_device *netdev ) {
  398. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  399. struct vmxnet3_shared *shared;
  400. struct vmxnet3_queues *queues;
  401. uint64_t shared_bus;
  402. uint64_t queues_bus;
  403. uint32_t status;
  404. int rc;
  405. /* Allocate DMA areas */
  406. vmxnet->dma = malloc_dma ( sizeof ( *vmxnet->dma ), VMXNET3_DMA_ALIGN );
  407. if ( ! vmxnet->dma ) {
  408. DBGC ( vmxnet, "VMXNET3 %p could not allocate DMA area\n",
  409. vmxnet );
  410. rc = -ENOMEM;
  411. goto err_alloc_dma;
  412. }
  413. memset ( vmxnet->dma, 0, sizeof ( *vmxnet->dma ) );
  414. /* Populate queue descriptors */
  415. queues = &vmxnet->dma->queues;
  416. queues->tx.cfg.desc_address =
  417. cpu_to_le64 ( virt_to_bus ( &vmxnet->dma->tx_desc ) );
  418. queues->tx.cfg.comp_address =
  419. cpu_to_le64 ( virt_to_bus ( &vmxnet->dma->tx_comp ) );
  420. queues->tx.cfg.num_desc = cpu_to_le32 ( VMXNET3_NUM_TX_DESC );
  421. queues->tx.cfg.num_comp = cpu_to_le32 ( VMXNET3_NUM_TX_COMP );
  422. queues->rx.cfg.desc_address[0] =
  423. cpu_to_le64 ( virt_to_bus ( &vmxnet->dma->rx_desc ) );
  424. queues->rx.cfg.comp_address =
  425. cpu_to_le64 ( virt_to_bus ( &vmxnet->dma->rx_comp ) );
  426. queues->rx.cfg.num_desc[0] = cpu_to_le32 ( VMXNET3_NUM_RX_DESC );
  427. queues->rx.cfg.num_comp = cpu_to_le32 ( VMXNET3_NUM_RX_COMP );
  428. queues_bus = virt_to_bus ( queues );
  429. DBGC ( vmxnet, "VMXNET3 %p queue descriptors at %08llx+%zx\n",
  430. vmxnet, queues_bus, sizeof ( *queues ) );
  431. /* Populate shared area */
  432. shared = &vmxnet->dma->shared;
  433. shared->magic = cpu_to_le32 ( VMXNET3_SHARED_MAGIC );
  434. shared->misc.version = cpu_to_le32 ( VMXNET3_VERSION_MAGIC );
  435. shared->misc.version_support = cpu_to_le32 ( VMXNET3_VERSION_SELECT );
  436. shared->misc.upt_version_support =
  437. cpu_to_le32 ( VMXNET3_UPT_VERSION_SELECT );
  438. shared->misc.queue_desc_address = cpu_to_le64 ( queues_bus );
  439. shared->misc.queue_desc_len = cpu_to_le32 ( sizeof ( *queues ) );
  440. shared->misc.mtu = cpu_to_le32 ( VMXNET3_MTU );
  441. shared->misc.num_tx_queues = 1;
  442. shared->misc.num_rx_queues = 1;
  443. shared->interrupt.num_intrs = 1;
  444. shared->interrupt.control = cpu_to_le32 ( VMXNET3_IC_DISABLE_ALL );
  445. shared->rx_filter.mode = cpu_to_le32 ( VMXNET3_RXM_UCAST |
  446. VMXNET3_RXM_BCAST |
  447. VMXNET3_RXM_ALL_MULTI );
  448. shared_bus = virt_to_bus ( shared );
  449. DBGC ( vmxnet, "VMXNET3 %p shared area at %08llx+%zx\n",
  450. vmxnet, shared_bus, sizeof ( *shared ) );
  451. /* Zero counters */
  452. memset ( &vmxnet->count, 0, sizeof ( vmxnet->count ) );
  453. /* Set MAC address */
  454. vmxnet3_set_ll_addr ( vmxnet, &netdev->ll_addr );
  455. /* Pass shared area to device */
  456. writel ( ( shared_bus >> 0 ), ( vmxnet->vd + VMXNET3_VD_DSAL ) );
  457. writel ( ( shared_bus >> 32 ), ( vmxnet->vd + VMXNET3_VD_DSAH ) );
  458. /* Activate device */
  459. if ( ( status = vmxnet3_command ( vmxnet,
  460. VMXNET3_CMD_ACTIVATE_DEV ) ) != 0 ) {
  461. DBGC ( vmxnet, "VMXNET3 %p could not activate (status %#x)\n",
  462. vmxnet, status );
  463. rc = -EIO;
  464. goto err_activate;
  465. }
  466. /* Fill receive ring */
  467. vmxnet3_refill_rx ( netdev );
  468. return 0;
  469. vmxnet3_command ( vmxnet, VMXNET3_CMD_QUIESCE_DEV );
  470. vmxnet3_command ( vmxnet, VMXNET3_CMD_RESET_DEV );
  471. err_activate:
  472. vmxnet3_flush_tx ( netdev );
  473. vmxnet3_flush_rx ( netdev );
  474. free_dma ( vmxnet->dma, sizeof ( *vmxnet->dma ) );
  475. err_alloc_dma:
  476. return rc;
  477. }
  478. /**
  479. * Close NIC
  480. *
  481. * @v netdev Network device
  482. */
  483. static void vmxnet3_close ( struct net_device *netdev ) {
  484. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  485. vmxnet3_command ( vmxnet, VMXNET3_CMD_QUIESCE_DEV );
  486. vmxnet3_command ( vmxnet, VMXNET3_CMD_RESET_DEV );
  487. vmxnet3_flush_tx ( netdev );
  488. vmxnet3_flush_rx ( netdev );
  489. free_dma ( vmxnet->dma, sizeof ( *vmxnet->dma ) );
  490. }
  491. /** vmxnet3 net device operations */
  492. static struct net_device_operations vmxnet3_operations = {
  493. .open = vmxnet3_open,
  494. .close = vmxnet3_close,
  495. .transmit = vmxnet3_transmit,
  496. .poll = vmxnet3_poll,
  497. .irq = vmxnet3_irq,
  498. };
  499. /**
  500. * Check version
  501. *
  502. * @v vmxnet vmxnet3 NIC
  503. * @ret rc Return status code
  504. */
  505. static int vmxnet3_check_version ( struct vmxnet3_nic *vmxnet ) {
  506. uint32_t version;
  507. uint32_t upt_version;
  508. /* Read version */
  509. version = readl ( vmxnet->vd + VMXNET3_VD_VRRS );
  510. upt_version = readl ( vmxnet->vd + VMXNET3_VD_UVRS );
  511. DBGC ( vmxnet, "VMXNET3 %p is version %d (UPT version %d)\n",
  512. vmxnet, version, upt_version );
  513. /* Inform NIC of driver version */
  514. writel ( VMXNET3_VERSION_SELECT, ( vmxnet->vd + VMXNET3_VD_VRRS ) );
  515. writel ( VMXNET3_UPT_VERSION_SELECT, ( vmxnet->vd + VMXNET3_VD_UVRS ) );
  516. return 0;
  517. }
  518. /**
  519. * Get permanent MAC address
  520. *
  521. * @v vmxnet vmxnet3 NIC
  522. * @v hw_addr Hardware address to fill in
  523. */
  524. static void vmxnet3_get_hw_addr ( struct vmxnet3_nic *vmxnet, void *hw_addr ) {
  525. struct {
  526. uint32_t low;
  527. uint32_t high;
  528. } __attribute__ (( packed )) mac;
  529. mac.low = le32_to_cpu ( vmxnet3_command ( vmxnet,
  530. VMXNET3_CMD_GET_PERM_MAC_LO ) );
  531. mac.high = le32_to_cpu ( vmxnet3_command ( vmxnet,
  532. VMXNET3_CMD_GET_PERM_MAC_HI ) );
  533. memcpy ( hw_addr, &mac, ETH_ALEN );
  534. }
  535. /**
  536. * Probe PCI device
  537. *
  538. * @v pci PCI device
  539. * @v id PCI ID
  540. * @ret rc Return status code
  541. */
  542. static int vmxnet3_probe ( struct pci_device *pci ) {
  543. struct net_device *netdev;
  544. struct vmxnet3_nic *vmxnet;
  545. int rc;
  546. /* Allocate network device */
  547. netdev = alloc_etherdev ( sizeof ( *vmxnet ) );
  548. if ( ! netdev ) {
  549. rc = -ENOMEM;
  550. goto err_alloc_etherdev;
  551. }
  552. netdev_init ( netdev, &vmxnet3_operations );
  553. vmxnet = netdev_priv ( netdev );
  554. pci_set_drvdata ( pci, netdev );
  555. netdev->dev = &pci->dev;
  556. memset ( vmxnet, 0, sizeof ( *vmxnet ) );
  557. /* Fix up PCI device */
  558. adjust_pci_device ( pci );
  559. /* Map PCI BARs */
  560. vmxnet->pt = ioremap ( pci_bar_start ( pci, VMXNET3_PT_BAR ),
  561. VMXNET3_PT_LEN );
  562. if ( ! vmxnet->pt ) {
  563. rc = -ENODEV;
  564. goto err_ioremap_pt;
  565. }
  566. vmxnet->vd = ioremap ( pci_bar_start ( pci, VMXNET3_VD_BAR ),
  567. VMXNET3_VD_LEN );
  568. if ( ! vmxnet->vd ) {
  569. rc = -ENODEV;
  570. goto err_ioremap_vd;
  571. }
  572. /* Version check */
  573. if ( ( rc = vmxnet3_check_version ( vmxnet ) ) != 0 )
  574. goto err_check_version;
  575. /* Reset device */
  576. if ( ( rc = vmxnet3_command ( vmxnet, VMXNET3_CMD_RESET_DEV ) ) != 0 )
  577. goto err_reset;
  578. /* Read initial MAC address */
  579. vmxnet3_get_hw_addr ( vmxnet, &netdev->hw_addr );
  580. /* Register network device */
  581. if ( ( rc = register_netdev ( netdev ) ) != 0 ) {
  582. DBGC ( vmxnet, "VMXNET3 %p could not register net device: "
  583. "%s\n", vmxnet, strerror ( rc ) );
  584. goto err_register_netdev;
  585. }
  586. /* Get initial link state */
  587. vmxnet3_check_link ( netdev );
  588. return 0;
  589. unregister_netdev ( netdev );
  590. err_register_netdev:
  591. err_reset:
  592. err_check_version:
  593. iounmap ( vmxnet->vd );
  594. err_ioremap_vd:
  595. iounmap ( vmxnet->pt );
  596. err_ioremap_pt:
  597. netdev_nullify ( netdev );
  598. netdev_put ( netdev );
  599. err_alloc_etherdev:
  600. return rc;
  601. }
  602. /**
  603. * Remove PCI device
  604. *
  605. * @v pci PCI device
  606. */
  607. static void vmxnet3_remove ( struct pci_device *pci ) {
  608. struct net_device *netdev = pci_get_drvdata ( pci );
  609. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  610. unregister_netdev ( netdev );
  611. iounmap ( vmxnet->vd );
  612. iounmap ( vmxnet->pt );
  613. netdev_nullify ( netdev );
  614. netdev_put ( netdev );
  615. }
  616. /** vmxnet3 PCI IDs */
  617. static struct pci_device_id vmxnet3_nics[] = {
  618. PCI_ROM ( 0x15ad, 0x07b0, "vmxnet3", "vmxnet3 virtual NIC", 0 ),
  619. };
  620. /** vmxnet3 PCI driver */
  621. struct pci_driver vmxnet3_driver __pci_driver = {
  622. .ids = vmxnet3_nics,
  623. .id_count = ( sizeof ( vmxnet3_nics ) / sizeof ( vmxnet3_nics[0] ) ),
  624. .probe = vmxnet3_probe,
  625. .remove = vmxnet3_remove,
  626. };