You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

vmxnet3.c 20KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716
  1. /*
  2. * Copyright (C) 2011 Michael Brown <mbrown@fensystems.co.uk>.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License as
  6. * published by the Free Software Foundation; either version 2 of the
  7. * License, or any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  17. * 02110-1301, USA.
  18. *
  19. * You can also choose to distribute this program under the terms of
  20. * the Unmodified Binary Distribution Licence (as given in the file
  21. * COPYING.UBDL), provided that you have satisfied its requirements.
  22. */
  23. FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
  24. #include <stdint.h>
  25. #include <errno.h>
  26. #include <assert.h>
  27. #include <byteswap.h>
  28. #include <ipxe/pci.h>
  29. #include <ipxe/io.h>
  30. #include <ipxe/malloc.h>
  31. #include <ipxe/profile.h>
  32. #include <ipxe/iobuf.h>
  33. #include <ipxe/netdevice.h>
  34. #include <ipxe/if_ether.h>
  35. #include <ipxe/ethernet.h>
  36. #include "vmxnet3.h"
  37. /**
  38. * @file
  39. *
  40. * VMware vmxnet3 virtual NIC driver
  41. *
  42. */
  43. /** VM command profiler */
  44. static struct profiler vmxnet3_vm_command_profiler __profiler =
  45. { .name = "vmxnet3.vm_command" };
  46. /** VM transmit profiler */
  47. static struct profiler vmxnet3_vm_tx_profiler __profiler =
  48. { .name = "vmxnet3.vm_tx" };
  49. /** VM receive refill profiler */
  50. static struct profiler vmxnet3_vm_refill_profiler __profiler =
  51. { .name = "vmxnet3.vm_refill" };
  52. /** VM event profiler */
  53. static struct profiler vmxnet3_vm_event_profiler __profiler =
  54. { .name = "vmxnet3.vm_event" };
  55. /**
  56. * Issue command
  57. *
  58. * @v vmxnet vmxnet3 NIC
  59. * @v command Command to issue
  60. * @ret result Command result
  61. */
  62. static inline uint32_t vmxnet3_command ( struct vmxnet3_nic *vmxnet,
  63. uint32_t command ) {
  64. uint32_t result;
  65. /* Issue command */
  66. profile_start ( &vmxnet3_vm_command_profiler );
  67. writel ( command, ( vmxnet->vd + VMXNET3_VD_CMD ) );
  68. result = readl ( vmxnet->vd + VMXNET3_VD_CMD );
  69. profile_stop ( &vmxnet3_vm_command_profiler );
  70. profile_exclude ( &vmxnet3_vm_command_profiler );
  71. return result;
  72. }
  73. /**
  74. * Transmit packet
  75. *
  76. * @v netdev Network device
  77. * @v iobuf I/O buffer
  78. * @ret rc Return status code
  79. */
  80. static int vmxnet3_transmit ( struct net_device *netdev,
  81. struct io_buffer *iobuf ) {
  82. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  83. struct vmxnet3_tx_desc *tx_desc;
  84. unsigned int desc_idx;
  85. unsigned int generation;
  86. /* Check that we have a free transmit descriptor */
  87. desc_idx = ( vmxnet->count.tx_prod % VMXNET3_NUM_TX_DESC );
  88. generation = ( ( vmxnet->count.tx_prod & VMXNET3_NUM_TX_DESC ) ?
  89. 0 : cpu_to_le32 ( VMXNET3_TXF_GEN ) );
  90. if ( vmxnet->tx_iobuf[desc_idx] ) {
  91. DBGC ( vmxnet, "VMXNET3 %p out of transmit descriptors\n",
  92. vmxnet );
  93. return -ENOBUFS;
  94. }
  95. /* Increment producer counter */
  96. vmxnet->count.tx_prod++;
  97. /* Store I/O buffer for later completion */
  98. vmxnet->tx_iobuf[desc_idx] = iobuf;
  99. /* Populate transmit descriptor */
  100. tx_desc = &vmxnet->dma->tx_desc[desc_idx];
  101. tx_desc->address = cpu_to_le64 ( virt_to_bus ( iobuf->data ) );
  102. tx_desc->flags[0] = ( generation | cpu_to_le32 ( iob_len ( iobuf ) ) );
  103. tx_desc->flags[1] = cpu_to_le32 ( VMXNET3_TXF_CQ | VMXNET3_TXF_EOP );
  104. /* Hand over descriptor to NIC */
  105. wmb();
  106. profile_start ( &vmxnet3_vm_tx_profiler );
  107. writel ( ( vmxnet->count.tx_prod % VMXNET3_NUM_TX_DESC ),
  108. ( vmxnet->pt + VMXNET3_PT_TXPROD ) );
  109. profile_stop ( &vmxnet3_vm_tx_profiler );
  110. profile_exclude ( &vmxnet3_vm_tx_profiler );
  111. return 0;
  112. }
  113. /**
  114. * Poll for completed transmissions
  115. *
  116. * @v netdev Network device
  117. */
  118. static void vmxnet3_poll_tx ( struct net_device *netdev ) {
  119. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  120. struct vmxnet3_tx_comp *tx_comp;
  121. struct io_buffer *iobuf;
  122. unsigned int comp_idx;
  123. unsigned int desc_idx;
  124. unsigned int generation;
  125. while ( 1 ) {
  126. /* Look for completed descriptors */
  127. comp_idx = ( vmxnet->count.tx_cons % VMXNET3_NUM_TX_COMP );
  128. generation = ( ( vmxnet->count.tx_cons & VMXNET3_NUM_TX_COMP ) ?
  129. 0 : cpu_to_le32 ( VMXNET3_TXCF_GEN ) );
  130. tx_comp = &vmxnet->dma->tx_comp[comp_idx];
  131. if ( generation != ( tx_comp->flags &
  132. cpu_to_le32 ( VMXNET3_TXCF_GEN ) ) ) {
  133. break;
  134. }
  135. /* Increment consumer counter */
  136. vmxnet->count.tx_cons++;
  137. /* Locate corresponding transmit descriptor */
  138. desc_idx = ( le32_to_cpu ( tx_comp->index ) %
  139. VMXNET3_NUM_TX_DESC );
  140. iobuf = vmxnet->tx_iobuf[desc_idx];
  141. if ( ! iobuf ) {
  142. DBGC ( vmxnet, "VMXNET3 %p completed on empty transmit "
  143. "buffer %#x/%#x\n", vmxnet, comp_idx, desc_idx );
  144. netdev_tx_err ( netdev, NULL, -ENOTTY );
  145. continue;
  146. }
  147. /* Remove I/O buffer from transmit queue */
  148. vmxnet->tx_iobuf[desc_idx] = NULL;
  149. /* Report transmission completion to network layer */
  150. DBGC2 ( vmxnet, "VMXNET3 %p completed TX %#x/%#x (len %#zx)\n",
  151. vmxnet, comp_idx, desc_idx, iob_len ( iobuf ) );
  152. netdev_tx_complete ( netdev, iobuf );
  153. }
  154. }
  155. /**
  156. * Flush any uncompleted transmit buffers
  157. *
  158. * @v netdev Network device
  159. */
  160. static void vmxnet3_flush_tx ( struct net_device *netdev ) {
  161. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  162. unsigned int i;
  163. for ( i = 0 ; i < VMXNET3_NUM_TX_DESC ; i++ ) {
  164. if ( vmxnet->tx_iobuf[i] ) {
  165. netdev_tx_complete_err ( netdev, vmxnet->tx_iobuf[i],
  166. -ECANCELED );
  167. vmxnet->tx_iobuf[i] = NULL;
  168. }
  169. }
  170. }
  171. /**
  172. * Refill receive ring
  173. *
  174. * @v netdev Network device
  175. */
  176. static void vmxnet3_refill_rx ( struct net_device *netdev ) {
  177. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  178. struct vmxnet3_rx_desc *rx_desc;
  179. struct io_buffer *iobuf;
  180. unsigned int orig_rx_prod = vmxnet->count.rx_prod;
  181. unsigned int desc_idx;
  182. unsigned int generation;
  183. /* Fill receive ring to specified fill level */
  184. while ( vmxnet->count.rx_fill < VMXNET3_RX_FILL ) {
  185. /* Locate receive descriptor */
  186. desc_idx = ( vmxnet->count.rx_prod % VMXNET3_NUM_RX_DESC );
  187. generation = ( ( vmxnet->count.rx_prod & VMXNET3_NUM_RX_DESC ) ?
  188. 0 : cpu_to_le32 ( VMXNET3_RXF_GEN ) );
  189. assert ( vmxnet->rx_iobuf[desc_idx] == NULL );
  190. /* Allocate I/O buffer */
  191. iobuf = alloc_iob ( VMXNET3_MTU + NET_IP_ALIGN );
  192. if ( ! iobuf ) {
  193. /* Non-fatal low memory condition */
  194. break;
  195. }
  196. iob_reserve ( iobuf, NET_IP_ALIGN );
  197. /* Increment producer counter and fill level */
  198. vmxnet->count.rx_prod++;
  199. vmxnet->count.rx_fill++;
  200. /* Store I/O buffer for later completion */
  201. vmxnet->rx_iobuf[desc_idx] = iobuf;
  202. /* Populate receive descriptor */
  203. rx_desc = &vmxnet->dma->rx_desc[desc_idx];
  204. rx_desc->address = cpu_to_le64 ( virt_to_bus ( iobuf->data ) );
  205. rx_desc->flags = ( generation | cpu_to_le32 ( VMXNET3_MTU ) );
  206. }
  207. /* Hand over any new descriptors to NIC */
  208. if ( vmxnet->count.rx_prod != orig_rx_prod ) {
  209. wmb();
  210. profile_start ( &vmxnet3_vm_refill_profiler );
  211. writel ( ( vmxnet->count.rx_prod % VMXNET3_NUM_RX_DESC ),
  212. ( vmxnet->pt + VMXNET3_PT_RXPROD ) );
  213. profile_stop ( &vmxnet3_vm_refill_profiler );
  214. profile_exclude ( &vmxnet3_vm_refill_profiler );
  215. }
  216. }
  217. /**
  218. * Poll for received packets
  219. *
  220. * @v netdev Network device
  221. */
  222. static void vmxnet3_poll_rx ( struct net_device *netdev ) {
  223. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  224. struct vmxnet3_rx_comp *rx_comp;
  225. struct io_buffer *iobuf;
  226. unsigned int comp_idx;
  227. unsigned int desc_idx;
  228. unsigned int generation;
  229. size_t len;
  230. while ( 1 ) {
  231. /* Look for completed descriptors */
  232. comp_idx = ( vmxnet->count.rx_cons % VMXNET3_NUM_RX_COMP );
  233. generation = ( ( vmxnet->count.rx_cons & VMXNET3_NUM_RX_COMP ) ?
  234. 0 : cpu_to_le32 ( VMXNET3_RXCF_GEN ) );
  235. rx_comp = &vmxnet->dma->rx_comp[comp_idx];
  236. if ( generation != ( rx_comp->flags &
  237. cpu_to_le32 ( VMXNET3_RXCF_GEN ) ) ) {
  238. break;
  239. }
  240. /* Increment consumer counter */
  241. vmxnet->count.rx_cons++;
  242. /* Locate corresponding receive descriptor */
  243. desc_idx = ( le32_to_cpu ( rx_comp->index ) %
  244. VMXNET3_NUM_RX_DESC );
  245. iobuf = vmxnet->rx_iobuf[desc_idx];
  246. if ( ! iobuf ) {
  247. DBGC ( vmxnet, "VMXNET3 %p completed on empty receive "
  248. "buffer %#x/%#x\n", vmxnet, comp_idx, desc_idx );
  249. netdev_rx_err ( netdev, NULL, -ENOTTY );
  250. continue;
  251. }
  252. /* Remove I/O buffer from receive queue */
  253. vmxnet->rx_iobuf[desc_idx] = NULL;
  254. vmxnet->count.rx_fill--;
  255. /* Deliver packet to network layer */
  256. len = ( le32_to_cpu ( rx_comp->len ) &
  257. ( VMXNET3_MAX_PACKET_LEN - 1 ) );
  258. DBGC2 ( vmxnet, "VMXNET3 %p completed RX %#x/%#x (len %#zx)\n",
  259. vmxnet, comp_idx, desc_idx, len );
  260. iob_put ( iobuf, len );
  261. netdev_rx ( netdev, iobuf );
  262. }
  263. }
  264. /**
  265. * Flush any uncompleted receive buffers
  266. *
  267. * @v netdev Network device
  268. */
  269. static void vmxnet3_flush_rx ( struct net_device *netdev ) {
  270. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  271. struct io_buffer *iobuf;
  272. unsigned int i;
  273. for ( i = 0 ; i < VMXNET3_NUM_RX_DESC ; i++ ) {
  274. if ( ( iobuf = vmxnet->rx_iobuf[i] ) != NULL ) {
  275. netdev_rx_err ( netdev, iobuf, -ECANCELED );
  276. vmxnet->rx_iobuf[i] = NULL;
  277. }
  278. }
  279. }
  280. /**
  281. * Check link state
  282. *
  283. * @v netdev Network device
  284. */
  285. static void vmxnet3_check_link ( struct net_device *netdev ) {
  286. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  287. uint32_t state;
  288. int link_up;
  289. unsigned int link_speed;
  290. /* Get link state */
  291. state = vmxnet3_command ( vmxnet, VMXNET3_CMD_GET_LINK );
  292. link_up = ( state & 1 );
  293. link_speed = ( state >> 16 );
  294. /* Report link state to network device */
  295. if ( link_up ) {
  296. DBGC ( vmxnet, "VMXNET3 %p link is up at %d Mbps\n",
  297. vmxnet, link_speed );
  298. netdev_link_up ( netdev );
  299. } else {
  300. DBGC ( vmxnet, "VMXNET3 %p link is down\n", vmxnet );
  301. netdev_link_down ( netdev );
  302. }
  303. }
  304. /**
  305. * Poll for events
  306. *
  307. * @v netdev Network device
  308. */
  309. static void vmxnet3_poll_events ( struct net_device *netdev ) {
  310. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  311. uint32_t events;
  312. /* Do nothing unless there are events to process */
  313. if ( ! vmxnet->dma->shared.ecr )
  314. return;
  315. events = le32_to_cpu ( vmxnet->dma->shared.ecr );
  316. /* Acknowledge these events */
  317. profile_start ( &vmxnet3_vm_event_profiler );
  318. writel ( events, ( vmxnet->vd + VMXNET3_VD_ECR ) );
  319. profile_stop ( &vmxnet3_vm_event_profiler );
  320. profile_exclude ( &vmxnet3_vm_event_profiler );
  321. /* Check for link state change */
  322. if ( events & VMXNET3_ECR_LINK ) {
  323. vmxnet3_check_link ( netdev );
  324. events &= ~VMXNET3_ECR_LINK;
  325. }
  326. /* Check for queue errors */
  327. if ( events & ( VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR ) ) {
  328. vmxnet3_command ( vmxnet, VMXNET3_CMD_GET_QUEUE_STATUS );
  329. DBGC ( vmxnet, "VMXNET3 %p queue error status (TX %08x, RX "
  330. "%08x)\n", vmxnet,
  331. le32_to_cpu ( vmxnet->dma->queues.tx.status.error ),
  332. le32_to_cpu ( vmxnet->dma->queues.rx.status.error ) );
  333. /* Report errors to allow for visibility via "ifstat" */
  334. if ( events & VMXNET3_ECR_TQERR )
  335. netdev_tx_err ( netdev, NULL, -EPIPE );
  336. if ( events & VMXNET3_ECR_RQERR )
  337. netdev_rx_err ( netdev, NULL, -EPIPE );
  338. events &= ~( VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR );
  339. }
  340. /* Check for unknown events */
  341. if ( events ) {
  342. DBGC ( vmxnet, "VMXNET3 %p unknown events %08x\n",
  343. vmxnet, events );
  344. /* Report error to allow for visibility via "ifstat" */
  345. netdev_rx_err ( netdev, NULL, -ENODEV );
  346. }
  347. }
  348. /**
  349. * Poll network device
  350. *
  351. * @v netdev Network device
  352. */
  353. static void vmxnet3_poll ( struct net_device *netdev ) {
  354. vmxnet3_poll_events ( netdev );
  355. vmxnet3_poll_tx ( netdev );
  356. vmxnet3_poll_rx ( netdev );
  357. vmxnet3_refill_rx ( netdev );
  358. }
  359. /**
  360. * Enable/disable interrupts
  361. *
  362. * @v netdev Network device
  363. * @v enable Interrupts should be enabled
  364. */
  365. static void vmxnet3_irq ( struct net_device *netdev, int enable ) {
  366. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  367. DBGC ( vmxnet, "VMXNET3 %p %s IRQ not implemented\n",
  368. vmxnet, ( enable ? "enable" : "disable" ) );
  369. }
  370. /**
  371. * Set MAC address
  372. *
  373. * @v vmxnet vmxnet3 NIC
  374. * @v ll_addr Link-layer address to set
  375. */
  376. static void vmxnet3_set_ll_addr ( struct vmxnet3_nic *vmxnet,
  377. const void *ll_addr ) {
  378. struct {
  379. uint32_t low;
  380. uint32_t high;
  381. } __attribute__ (( packed )) mac;
  382. memset ( &mac, 0, sizeof ( mac ) );
  383. memcpy ( &mac, ll_addr, ETH_ALEN );
  384. writel ( cpu_to_le32 ( mac.low ), ( vmxnet->vd + VMXNET3_VD_MACL ) );
  385. writel ( cpu_to_le32 ( mac.high ), ( vmxnet->vd + VMXNET3_VD_MACH ) );
  386. }
  387. /**
  388. * Open NIC
  389. *
  390. * @v netdev Network device
  391. * @ret rc Return status code
  392. */
  393. static int vmxnet3_open ( struct net_device *netdev ) {
  394. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  395. struct vmxnet3_shared *shared;
  396. struct vmxnet3_queues *queues;
  397. uint64_t shared_bus;
  398. uint64_t queues_bus;
  399. uint32_t status;
  400. int rc;
  401. /* Allocate DMA areas */
  402. vmxnet->dma = malloc_dma ( sizeof ( *vmxnet->dma ), VMXNET3_DMA_ALIGN );
  403. if ( ! vmxnet->dma ) {
  404. DBGC ( vmxnet, "VMXNET3 %p could not allocate DMA area\n",
  405. vmxnet );
  406. rc = -ENOMEM;
  407. goto err_alloc_dma;
  408. }
  409. memset ( vmxnet->dma, 0, sizeof ( *vmxnet->dma ) );
  410. /* Populate queue descriptors */
  411. queues = &vmxnet->dma->queues;
  412. queues->tx.cfg.desc_address =
  413. cpu_to_le64 ( virt_to_bus ( &vmxnet->dma->tx_desc ) );
  414. queues->tx.cfg.comp_address =
  415. cpu_to_le64 ( virt_to_bus ( &vmxnet->dma->tx_comp ) );
  416. queues->tx.cfg.num_desc = cpu_to_le32 ( VMXNET3_NUM_TX_DESC );
  417. queues->tx.cfg.num_comp = cpu_to_le32 ( VMXNET3_NUM_TX_COMP );
  418. queues->rx.cfg.desc_address[0] =
  419. cpu_to_le64 ( virt_to_bus ( &vmxnet->dma->rx_desc ) );
  420. queues->rx.cfg.comp_address =
  421. cpu_to_le64 ( virt_to_bus ( &vmxnet->dma->rx_comp ) );
  422. queues->rx.cfg.num_desc[0] = cpu_to_le32 ( VMXNET3_NUM_RX_DESC );
  423. queues->rx.cfg.num_comp = cpu_to_le32 ( VMXNET3_NUM_RX_COMP );
  424. queues_bus = virt_to_bus ( queues );
  425. DBGC ( vmxnet, "VMXNET3 %p queue descriptors at %08llx+%zx\n",
  426. vmxnet, queues_bus, sizeof ( *queues ) );
  427. /* Populate shared area */
  428. shared = &vmxnet->dma->shared;
  429. shared->magic = cpu_to_le32 ( VMXNET3_SHARED_MAGIC );
  430. shared->misc.version = cpu_to_le32 ( VMXNET3_VERSION_MAGIC );
  431. shared->misc.version_support = cpu_to_le32 ( VMXNET3_VERSION_SELECT );
  432. shared->misc.upt_version_support =
  433. cpu_to_le32 ( VMXNET3_UPT_VERSION_SELECT );
  434. shared->misc.queue_desc_address = cpu_to_le64 ( queues_bus );
  435. shared->misc.queue_desc_len = cpu_to_le32 ( sizeof ( *queues ) );
  436. shared->misc.mtu = cpu_to_le32 ( VMXNET3_MTU );
  437. shared->misc.num_tx_queues = 1;
  438. shared->misc.num_rx_queues = 1;
  439. shared->interrupt.num_intrs = 1;
  440. shared->interrupt.control = cpu_to_le32 ( VMXNET3_IC_DISABLE_ALL );
  441. shared->rx_filter.mode = cpu_to_le32 ( VMXNET3_RXM_UCAST |
  442. VMXNET3_RXM_BCAST |
  443. VMXNET3_RXM_ALL_MULTI );
  444. shared_bus = virt_to_bus ( shared );
  445. DBGC ( vmxnet, "VMXNET3 %p shared area at %08llx+%zx\n",
  446. vmxnet, shared_bus, sizeof ( *shared ) );
  447. /* Zero counters */
  448. memset ( &vmxnet->count, 0, sizeof ( vmxnet->count ) );
  449. /* Set MAC address */
  450. vmxnet3_set_ll_addr ( vmxnet, &netdev->ll_addr );
  451. /* Pass shared area to device */
  452. writel ( ( shared_bus >> 0 ), ( vmxnet->vd + VMXNET3_VD_DSAL ) );
  453. writel ( ( shared_bus >> 32 ), ( vmxnet->vd + VMXNET3_VD_DSAH ) );
  454. /* Activate device */
  455. if ( ( status = vmxnet3_command ( vmxnet,
  456. VMXNET3_CMD_ACTIVATE_DEV ) ) != 0 ) {
  457. DBGC ( vmxnet, "VMXNET3 %p could not activate (status %#x)\n",
  458. vmxnet, status );
  459. rc = -EIO;
  460. goto err_activate;
  461. }
  462. /* Fill receive ring */
  463. vmxnet3_refill_rx ( netdev );
  464. return 0;
  465. vmxnet3_command ( vmxnet, VMXNET3_CMD_QUIESCE_DEV );
  466. vmxnet3_command ( vmxnet, VMXNET3_CMD_RESET_DEV );
  467. err_activate:
  468. vmxnet3_flush_tx ( netdev );
  469. vmxnet3_flush_rx ( netdev );
  470. free_dma ( vmxnet->dma, sizeof ( *vmxnet->dma ) );
  471. err_alloc_dma:
  472. return rc;
  473. }
  474. /**
  475. * Close NIC
  476. *
  477. * @v netdev Network device
  478. */
  479. static void vmxnet3_close ( struct net_device *netdev ) {
  480. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  481. vmxnet3_command ( vmxnet, VMXNET3_CMD_QUIESCE_DEV );
  482. vmxnet3_command ( vmxnet, VMXNET3_CMD_RESET_DEV );
  483. vmxnet3_flush_tx ( netdev );
  484. vmxnet3_flush_rx ( netdev );
  485. free_dma ( vmxnet->dma, sizeof ( *vmxnet->dma ) );
  486. }
  487. /** vmxnet3 net device operations */
  488. static struct net_device_operations vmxnet3_operations = {
  489. .open = vmxnet3_open,
  490. .close = vmxnet3_close,
  491. .transmit = vmxnet3_transmit,
  492. .poll = vmxnet3_poll,
  493. .irq = vmxnet3_irq,
  494. };
  495. /**
  496. * Check version
  497. *
  498. * @v vmxnet vmxnet3 NIC
  499. * @ret rc Return status code
  500. */
  501. static int vmxnet3_check_version ( struct vmxnet3_nic *vmxnet ) {
  502. uint32_t version;
  503. uint32_t upt_version;
  504. /* Read version */
  505. version = readl ( vmxnet->vd + VMXNET3_VD_VRRS );
  506. upt_version = readl ( vmxnet->vd + VMXNET3_VD_UVRS );
  507. DBGC ( vmxnet, "VMXNET3 %p is version %d (UPT version %d)\n",
  508. vmxnet, version, upt_version );
  509. /* Inform NIC of driver version */
  510. writel ( VMXNET3_VERSION_SELECT, ( vmxnet->vd + VMXNET3_VD_VRRS ) );
  511. writel ( VMXNET3_UPT_VERSION_SELECT, ( vmxnet->vd + VMXNET3_VD_UVRS ) );
  512. return 0;
  513. }
  514. /**
  515. * Get permanent MAC address
  516. *
  517. * @v vmxnet vmxnet3 NIC
  518. * @v hw_addr Hardware address to fill in
  519. */
  520. static void vmxnet3_get_hw_addr ( struct vmxnet3_nic *vmxnet, void *hw_addr ) {
  521. struct {
  522. uint32_t low;
  523. uint32_t high;
  524. } __attribute__ (( packed )) mac;
  525. mac.low = le32_to_cpu ( vmxnet3_command ( vmxnet,
  526. VMXNET3_CMD_GET_PERM_MAC_LO ) );
  527. mac.high = le32_to_cpu ( vmxnet3_command ( vmxnet,
  528. VMXNET3_CMD_GET_PERM_MAC_HI ) );
  529. memcpy ( hw_addr, &mac, ETH_ALEN );
  530. }
  531. /**
  532. * Probe PCI device
  533. *
  534. * @v pci PCI device
  535. * @v id PCI ID
  536. * @ret rc Return status code
  537. */
  538. static int vmxnet3_probe ( struct pci_device *pci ) {
  539. struct net_device *netdev;
  540. struct vmxnet3_nic *vmxnet;
  541. int rc;
  542. /* Allocate network device */
  543. netdev = alloc_etherdev ( sizeof ( *vmxnet ) );
  544. if ( ! netdev ) {
  545. rc = -ENOMEM;
  546. goto err_alloc_etherdev;
  547. }
  548. netdev_init ( netdev, &vmxnet3_operations );
  549. vmxnet = netdev_priv ( netdev );
  550. pci_set_drvdata ( pci, netdev );
  551. netdev->dev = &pci->dev;
  552. memset ( vmxnet, 0, sizeof ( *vmxnet ) );
  553. /* Fix up PCI device */
  554. adjust_pci_device ( pci );
  555. /* Map PCI BARs */
  556. vmxnet->pt = ioremap ( pci_bar_start ( pci, VMXNET3_PT_BAR ),
  557. VMXNET3_PT_LEN );
  558. if ( ! vmxnet->pt ) {
  559. rc = -ENODEV;
  560. goto err_ioremap_pt;
  561. }
  562. vmxnet->vd = ioremap ( pci_bar_start ( pci, VMXNET3_VD_BAR ),
  563. VMXNET3_VD_LEN );
  564. if ( ! vmxnet->vd ) {
  565. rc = -ENODEV;
  566. goto err_ioremap_vd;
  567. }
  568. /* Version check */
  569. if ( ( rc = vmxnet3_check_version ( vmxnet ) ) != 0 )
  570. goto err_check_version;
  571. /* Reset device */
  572. if ( ( rc = vmxnet3_command ( vmxnet, VMXNET3_CMD_RESET_DEV ) ) != 0 )
  573. goto err_reset;
  574. /* Read initial MAC address */
  575. vmxnet3_get_hw_addr ( vmxnet, &netdev->hw_addr );
  576. /* Register network device */
  577. if ( ( rc = register_netdev ( netdev ) ) != 0 ) {
  578. DBGC ( vmxnet, "VMXNET3 %p could not register net device: "
  579. "%s\n", vmxnet, strerror ( rc ) );
  580. goto err_register_netdev;
  581. }
  582. /* Get initial link state */
  583. vmxnet3_check_link ( netdev );
  584. return 0;
  585. unregister_netdev ( netdev );
  586. err_register_netdev:
  587. err_reset:
  588. err_check_version:
  589. iounmap ( vmxnet->vd );
  590. err_ioremap_vd:
  591. iounmap ( vmxnet->pt );
  592. err_ioremap_pt:
  593. netdev_nullify ( netdev );
  594. netdev_put ( netdev );
  595. err_alloc_etherdev:
  596. return rc;
  597. }
  598. /**
  599. * Remove PCI device
  600. *
  601. * @v pci PCI device
  602. */
  603. static void vmxnet3_remove ( struct pci_device *pci ) {
  604. struct net_device *netdev = pci_get_drvdata ( pci );
  605. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  606. unregister_netdev ( netdev );
  607. iounmap ( vmxnet->vd );
  608. iounmap ( vmxnet->pt );
  609. netdev_nullify ( netdev );
  610. netdev_put ( netdev );
  611. }
  612. /** vmxnet3 PCI IDs */
  613. static struct pci_device_id vmxnet3_nics[] = {
  614. PCI_ROM ( 0x15ad, 0x07b0, "vmxnet3", "vmxnet3 virtual NIC", 0 ),
  615. };
  616. /** vmxnet3 PCI driver */
  617. struct pci_driver vmxnet3_driver __pci_driver = {
  618. .ids = vmxnet3_nics,
  619. .id_count = ( sizeof ( vmxnet3_nics ) / sizeof ( vmxnet3_nics[0] ) ),
  620. .probe = vmxnet3_probe,
  621. .remove = vmxnet3_remove,
  622. };