Nelze vybrat více než 25 témat Téma musí začínat písmenem nebo číslem, může obsahovat pomlčky („-“) a může být dlouhé až 35 znaků.

vmxnet3.c 20KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712
  1. /*
  2. * Copyright (C) 2011 Michael Brown <mbrown@fensystems.co.uk>.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License as
  6. * published by the Free Software Foundation; either version 2 of the
  7. * License, or any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  17. * 02110-1301, USA.
  18. */
  19. FILE_LICENCE ( GPL2_OR_LATER );
  20. #include <stdint.h>
  21. #include <errno.h>
  22. #include <assert.h>
  23. #include <byteswap.h>
  24. #include <ipxe/pci.h>
  25. #include <ipxe/io.h>
  26. #include <ipxe/malloc.h>
  27. #include <ipxe/profile.h>
  28. #include <ipxe/iobuf.h>
  29. #include <ipxe/netdevice.h>
  30. #include <ipxe/if_ether.h>
  31. #include <ipxe/ethernet.h>
  32. #include "vmxnet3.h"
  33. /**
  34. * @file
  35. *
  36. * VMware vmxnet3 virtual NIC driver
  37. *
  38. */
  39. /** VM command profiler */
  40. static struct profiler vmxnet3_vm_command_profiler __profiler =
  41. { .name = "vmxnet3.vm_command" };
  42. /** VM transmit profiler */
  43. static struct profiler vmxnet3_vm_tx_profiler __profiler =
  44. { .name = "vmxnet3.vm_tx" };
  45. /** VM receive refill profiler */
  46. static struct profiler vmxnet3_vm_refill_profiler __profiler =
  47. { .name = "vmxnet3.vm_refill" };
  48. /** VM event profiler */
  49. static struct profiler vmxnet3_vm_event_profiler __profiler =
  50. { .name = "vmxnet3.vm_event" };
  51. /**
  52. * Issue command
  53. *
  54. * @v vmxnet vmxnet3 NIC
  55. * @v command Command to issue
  56. * @ret result Command result
  57. */
  58. static inline uint32_t vmxnet3_command ( struct vmxnet3_nic *vmxnet,
  59. uint32_t command ) {
  60. uint32_t result;
  61. /* Issue command */
  62. profile_start ( &vmxnet3_vm_command_profiler );
  63. writel ( command, ( vmxnet->vd + VMXNET3_VD_CMD ) );
  64. result = readl ( vmxnet->vd + VMXNET3_VD_CMD );
  65. profile_stop ( &vmxnet3_vm_command_profiler );
  66. profile_exclude ( &vmxnet3_vm_command_profiler );
  67. return result;
  68. }
  69. /**
  70. * Transmit packet
  71. *
  72. * @v netdev Network device
  73. * @v iobuf I/O buffer
  74. * @ret rc Return status code
  75. */
  76. static int vmxnet3_transmit ( struct net_device *netdev,
  77. struct io_buffer *iobuf ) {
  78. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  79. struct vmxnet3_tx_desc *tx_desc;
  80. unsigned int desc_idx;
  81. unsigned int generation;
  82. /* Check that we have a free transmit descriptor */
  83. desc_idx = ( vmxnet->count.tx_prod % VMXNET3_NUM_TX_DESC );
  84. generation = ( ( vmxnet->count.tx_prod & VMXNET3_NUM_TX_DESC ) ?
  85. 0 : cpu_to_le32 ( VMXNET3_TXF_GEN ) );
  86. if ( vmxnet->tx_iobuf[desc_idx] ) {
  87. DBGC ( vmxnet, "VMXNET3 %p out of transmit descriptors\n",
  88. vmxnet );
  89. return -ENOBUFS;
  90. }
  91. /* Increment producer counter */
  92. vmxnet->count.tx_prod++;
  93. /* Store I/O buffer for later completion */
  94. vmxnet->tx_iobuf[desc_idx] = iobuf;
  95. /* Populate transmit descriptor */
  96. tx_desc = &vmxnet->dma->tx_desc[desc_idx];
  97. tx_desc->address = cpu_to_le64 ( virt_to_bus ( iobuf->data ) );
  98. tx_desc->flags[0] = ( generation | cpu_to_le32 ( iob_len ( iobuf ) ) );
  99. tx_desc->flags[1] = cpu_to_le32 ( VMXNET3_TXF_CQ | VMXNET3_TXF_EOP );
  100. /* Hand over descriptor to NIC */
  101. wmb();
  102. profile_start ( &vmxnet3_vm_tx_profiler );
  103. writel ( ( vmxnet->count.tx_prod % VMXNET3_NUM_TX_DESC ),
  104. ( vmxnet->pt + VMXNET3_PT_TXPROD ) );
  105. profile_stop ( &vmxnet3_vm_tx_profiler );
  106. profile_exclude ( &vmxnet3_vm_tx_profiler );
  107. return 0;
  108. }
  109. /**
  110. * Poll for completed transmissions
  111. *
  112. * @v netdev Network device
  113. */
  114. static void vmxnet3_poll_tx ( struct net_device *netdev ) {
  115. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  116. struct vmxnet3_tx_comp *tx_comp;
  117. struct io_buffer *iobuf;
  118. unsigned int comp_idx;
  119. unsigned int desc_idx;
  120. unsigned int generation;
  121. while ( 1 ) {
  122. /* Look for completed descriptors */
  123. comp_idx = ( vmxnet->count.tx_cons % VMXNET3_NUM_TX_COMP );
  124. generation = ( ( vmxnet->count.tx_cons & VMXNET3_NUM_TX_COMP ) ?
  125. 0 : cpu_to_le32 ( VMXNET3_TXCF_GEN ) );
  126. tx_comp = &vmxnet->dma->tx_comp[comp_idx];
  127. if ( generation != ( tx_comp->flags &
  128. cpu_to_le32 ( VMXNET3_TXCF_GEN ) ) ) {
  129. break;
  130. }
  131. /* Increment consumer counter */
  132. vmxnet->count.tx_cons++;
  133. /* Locate corresponding transmit descriptor */
  134. desc_idx = ( le32_to_cpu ( tx_comp->index ) %
  135. VMXNET3_NUM_TX_DESC );
  136. iobuf = vmxnet->tx_iobuf[desc_idx];
  137. if ( ! iobuf ) {
  138. DBGC ( vmxnet, "VMXNET3 %p completed on empty transmit "
  139. "buffer %#x/%#x\n", vmxnet, comp_idx, desc_idx );
  140. netdev_tx_err ( netdev, NULL, -ENOTTY );
  141. continue;
  142. }
  143. /* Remove I/O buffer from transmit queue */
  144. vmxnet->tx_iobuf[desc_idx] = NULL;
  145. /* Report transmission completion to network layer */
  146. DBGC2 ( vmxnet, "VMXNET3 %p completed TX %#x/%#x (len %#zx)\n",
  147. vmxnet, comp_idx, desc_idx, iob_len ( iobuf ) );
  148. netdev_tx_complete ( netdev, iobuf );
  149. }
  150. }
  151. /**
  152. * Flush any uncompleted transmit buffers
  153. *
  154. * @v netdev Network device
  155. */
  156. static void vmxnet3_flush_tx ( struct net_device *netdev ) {
  157. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  158. unsigned int i;
  159. for ( i = 0 ; i < VMXNET3_NUM_TX_DESC ; i++ ) {
  160. if ( vmxnet->tx_iobuf[i] ) {
  161. netdev_tx_complete_err ( netdev, vmxnet->tx_iobuf[i],
  162. -ECANCELED );
  163. vmxnet->tx_iobuf[i] = NULL;
  164. }
  165. }
  166. }
  167. /**
  168. * Refill receive ring
  169. *
  170. * @v netdev Network device
  171. */
  172. static void vmxnet3_refill_rx ( struct net_device *netdev ) {
  173. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  174. struct vmxnet3_rx_desc *rx_desc;
  175. struct io_buffer *iobuf;
  176. unsigned int orig_rx_prod = vmxnet->count.rx_prod;
  177. unsigned int desc_idx;
  178. unsigned int generation;
  179. /* Fill receive ring to specified fill level */
  180. while ( vmxnet->count.rx_fill < VMXNET3_RX_FILL ) {
  181. /* Locate receive descriptor */
  182. desc_idx = ( vmxnet->count.rx_prod % VMXNET3_NUM_RX_DESC );
  183. generation = ( ( vmxnet->count.rx_prod & VMXNET3_NUM_RX_DESC ) ?
  184. 0 : cpu_to_le32 ( VMXNET3_RXF_GEN ) );
  185. assert ( vmxnet->rx_iobuf[desc_idx] == NULL );
  186. /* Allocate I/O buffer */
  187. iobuf = alloc_iob ( VMXNET3_MTU + NET_IP_ALIGN );
  188. if ( ! iobuf ) {
  189. /* Non-fatal low memory condition */
  190. break;
  191. }
  192. iob_reserve ( iobuf, NET_IP_ALIGN );
  193. /* Increment producer counter and fill level */
  194. vmxnet->count.rx_prod++;
  195. vmxnet->count.rx_fill++;
  196. /* Store I/O buffer for later completion */
  197. vmxnet->rx_iobuf[desc_idx] = iobuf;
  198. /* Populate receive descriptor */
  199. rx_desc = &vmxnet->dma->rx_desc[desc_idx];
  200. rx_desc->address = cpu_to_le64 ( virt_to_bus ( iobuf->data ) );
  201. rx_desc->flags = ( generation | cpu_to_le32 ( VMXNET3_MTU ) );
  202. }
  203. /* Hand over any new descriptors to NIC */
  204. if ( vmxnet->count.rx_prod != orig_rx_prod ) {
  205. wmb();
  206. profile_start ( &vmxnet3_vm_refill_profiler );
  207. writel ( ( vmxnet->count.rx_prod % VMXNET3_NUM_RX_DESC ),
  208. ( vmxnet->pt + VMXNET3_PT_RXPROD ) );
  209. profile_stop ( &vmxnet3_vm_refill_profiler );
  210. profile_exclude ( &vmxnet3_vm_refill_profiler );
  211. }
  212. }
  213. /**
  214. * Poll for received packets
  215. *
  216. * @v netdev Network device
  217. */
  218. static void vmxnet3_poll_rx ( struct net_device *netdev ) {
  219. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  220. struct vmxnet3_rx_comp *rx_comp;
  221. struct io_buffer *iobuf;
  222. unsigned int comp_idx;
  223. unsigned int desc_idx;
  224. unsigned int generation;
  225. size_t len;
  226. while ( 1 ) {
  227. /* Look for completed descriptors */
  228. comp_idx = ( vmxnet->count.rx_cons % VMXNET3_NUM_RX_COMP );
  229. generation = ( ( vmxnet->count.rx_cons & VMXNET3_NUM_RX_COMP ) ?
  230. 0 : cpu_to_le32 ( VMXNET3_RXCF_GEN ) );
  231. rx_comp = &vmxnet->dma->rx_comp[comp_idx];
  232. if ( generation != ( rx_comp->flags &
  233. cpu_to_le32 ( VMXNET3_RXCF_GEN ) ) ) {
  234. break;
  235. }
  236. /* Increment consumer counter */
  237. vmxnet->count.rx_cons++;
  238. /* Locate corresponding receive descriptor */
  239. desc_idx = ( le32_to_cpu ( rx_comp->index ) %
  240. VMXNET3_NUM_RX_DESC );
  241. iobuf = vmxnet->rx_iobuf[desc_idx];
  242. if ( ! iobuf ) {
  243. DBGC ( vmxnet, "VMXNET3 %p completed on empty receive "
  244. "buffer %#x/%#x\n", vmxnet, comp_idx, desc_idx );
  245. netdev_rx_err ( netdev, NULL, -ENOTTY );
  246. continue;
  247. }
  248. /* Remove I/O buffer from receive queue */
  249. vmxnet->rx_iobuf[desc_idx] = NULL;
  250. vmxnet->count.rx_fill--;
  251. /* Deliver packet to network layer */
  252. len = ( le32_to_cpu ( rx_comp->len ) &
  253. ( VMXNET3_MAX_PACKET_LEN - 1 ) );
  254. DBGC2 ( vmxnet, "VMXNET3 %p completed RX %#x/%#x (len %#zx)\n",
  255. vmxnet, comp_idx, desc_idx, len );
  256. iob_put ( iobuf, len );
  257. netdev_rx ( netdev, iobuf );
  258. }
  259. }
  260. /**
  261. * Flush any uncompleted receive buffers
  262. *
  263. * @v netdev Network device
  264. */
  265. static void vmxnet3_flush_rx ( struct net_device *netdev ) {
  266. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  267. struct io_buffer *iobuf;
  268. unsigned int i;
  269. for ( i = 0 ; i < VMXNET3_NUM_RX_DESC ; i++ ) {
  270. if ( ( iobuf = vmxnet->rx_iobuf[i] ) != NULL ) {
  271. netdev_rx_err ( netdev, iobuf, -ECANCELED );
  272. vmxnet->rx_iobuf[i] = NULL;
  273. }
  274. }
  275. }
  276. /**
  277. * Check link state
  278. *
  279. * @v netdev Network device
  280. */
  281. static void vmxnet3_check_link ( struct net_device *netdev ) {
  282. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  283. uint32_t state;
  284. int link_up;
  285. unsigned int link_speed;
  286. /* Get link state */
  287. state = vmxnet3_command ( vmxnet, VMXNET3_CMD_GET_LINK );
  288. link_up = ( state & 1 );
  289. link_speed = ( state >> 16 );
  290. /* Report link state to network device */
  291. if ( link_up ) {
  292. DBGC ( vmxnet, "VMXNET3 %p link is up at %d Mbps\n",
  293. vmxnet, link_speed );
  294. netdev_link_up ( netdev );
  295. } else {
  296. DBGC ( vmxnet, "VMXNET3 %p link is down\n", vmxnet );
  297. netdev_link_down ( netdev );
  298. }
  299. }
  300. /**
  301. * Poll for events
  302. *
  303. * @v netdev Network device
  304. */
  305. static void vmxnet3_poll_events ( struct net_device *netdev ) {
  306. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  307. uint32_t events;
  308. /* Do nothing unless there are events to process */
  309. if ( ! vmxnet->dma->shared.ecr )
  310. return;
  311. events = le32_to_cpu ( vmxnet->dma->shared.ecr );
  312. /* Acknowledge these events */
  313. profile_start ( &vmxnet3_vm_event_profiler );
  314. writel ( events, ( vmxnet->vd + VMXNET3_VD_ECR ) );
  315. profile_stop ( &vmxnet3_vm_event_profiler );
  316. profile_exclude ( &vmxnet3_vm_event_profiler );
  317. /* Check for link state change */
  318. if ( events & VMXNET3_ECR_LINK ) {
  319. vmxnet3_check_link ( netdev );
  320. events &= ~VMXNET3_ECR_LINK;
  321. }
  322. /* Check for queue errors */
  323. if ( events & ( VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR ) ) {
  324. vmxnet3_command ( vmxnet, VMXNET3_CMD_GET_QUEUE_STATUS );
  325. DBGC ( vmxnet, "VMXNET3 %p queue error status (TX %08x, RX "
  326. "%08x)\n", vmxnet,
  327. le32_to_cpu ( vmxnet->dma->queues.tx.status.error ),
  328. le32_to_cpu ( vmxnet->dma->queues.rx.status.error ) );
  329. /* Report errors to allow for visibility via "ifstat" */
  330. if ( events & VMXNET3_ECR_TQERR )
  331. netdev_tx_err ( netdev, NULL, -EPIPE );
  332. if ( events & VMXNET3_ECR_RQERR )
  333. netdev_rx_err ( netdev, NULL, -EPIPE );
  334. events &= ~( VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR );
  335. }
  336. /* Check for unknown events */
  337. if ( events ) {
  338. DBGC ( vmxnet, "VMXNET3 %p unknown events %08x\n",
  339. vmxnet, events );
  340. /* Report error to allow for visibility via "ifstat" */
  341. netdev_rx_err ( netdev, NULL, -ENODEV );
  342. }
  343. }
  344. /**
  345. * Poll network device
  346. *
  347. * @v netdev Network device
  348. */
  349. static void vmxnet3_poll ( struct net_device *netdev ) {
  350. vmxnet3_poll_events ( netdev );
  351. vmxnet3_poll_tx ( netdev );
  352. vmxnet3_poll_rx ( netdev );
  353. vmxnet3_refill_rx ( netdev );
  354. }
  355. /**
  356. * Enable/disable interrupts
  357. *
  358. * @v netdev Network device
  359. * @v enable Interrupts should be enabled
  360. */
  361. static void vmxnet3_irq ( struct net_device *netdev, int enable ) {
  362. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  363. DBGC ( vmxnet, "VMXNET3 %p %s IRQ not implemented\n",
  364. vmxnet, ( enable ? "enable" : "disable" ) );
  365. }
  366. /**
  367. * Set MAC address
  368. *
  369. * @v vmxnet vmxnet3 NIC
  370. * @v ll_addr Link-layer address to set
  371. */
  372. static void vmxnet3_set_ll_addr ( struct vmxnet3_nic *vmxnet,
  373. const void *ll_addr ) {
  374. struct {
  375. uint32_t low;
  376. uint32_t high;
  377. } __attribute__ (( packed )) mac;
  378. memset ( &mac, 0, sizeof ( mac ) );
  379. memcpy ( &mac, ll_addr, ETH_ALEN );
  380. writel ( cpu_to_le32 ( mac.low ), ( vmxnet->vd + VMXNET3_VD_MACL ) );
  381. writel ( cpu_to_le32 ( mac.high ), ( vmxnet->vd + VMXNET3_VD_MACH ) );
  382. }
  383. /**
  384. * Open NIC
  385. *
  386. * @v netdev Network device
  387. * @ret rc Return status code
  388. */
  389. static int vmxnet3_open ( struct net_device *netdev ) {
  390. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  391. struct vmxnet3_shared *shared;
  392. struct vmxnet3_queues *queues;
  393. uint64_t shared_bus;
  394. uint64_t queues_bus;
  395. uint32_t status;
  396. int rc;
  397. /* Allocate DMA areas */
  398. vmxnet->dma = malloc_dma ( sizeof ( *vmxnet->dma ), VMXNET3_DMA_ALIGN );
  399. if ( ! vmxnet->dma ) {
  400. DBGC ( vmxnet, "VMXNET3 %p could not allocate DMA area\n",
  401. vmxnet );
  402. rc = -ENOMEM;
  403. goto err_alloc_dma;
  404. }
  405. memset ( vmxnet->dma, 0, sizeof ( *vmxnet->dma ) );
  406. /* Populate queue descriptors */
  407. queues = &vmxnet->dma->queues;
  408. queues->tx.cfg.desc_address =
  409. cpu_to_le64 ( virt_to_bus ( &vmxnet->dma->tx_desc ) );
  410. queues->tx.cfg.comp_address =
  411. cpu_to_le64 ( virt_to_bus ( &vmxnet->dma->tx_comp ) );
  412. queues->tx.cfg.num_desc = cpu_to_le32 ( VMXNET3_NUM_TX_DESC );
  413. queues->tx.cfg.num_comp = cpu_to_le32 ( VMXNET3_NUM_TX_COMP );
  414. queues->rx.cfg.desc_address[0] =
  415. cpu_to_le64 ( virt_to_bus ( &vmxnet->dma->rx_desc ) );
  416. queues->rx.cfg.comp_address =
  417. cpu_to_le64 ( virt_to_bus ( &vmxnet->dma->rx_comp ) );
  418. queues->rx.cfg.num_desc[0] = cpu_to_le32 ( VMXNET3_NUM_RX_DESC );
  419. queues->rx.cfg.num_comp = cpu_to_le32 ( VMXNET3_NUM_RX_COMP );
  420. queues_bus = virt_to_bus ( queues );
  421. DBGC ( vmxnet, "VMXNET3 %p queue descriptors at %08llx+%zx\n",
  422. vmxnet, queues_bus, sizeof ( *queues ) );
  423. /* Populate shared area */
  424. shared = &vmxnet->dma->shared;
  425. shared->magic = cpu_to_le32 ( VMXNET3_SHARED_MAGIC );
  426. shared->misc.version = cpu_to_le32 ( VMXNET3_VERSION_MAGIC );
  427. shared->misc.version_support = cpu_to_le32 ( VMXNET3_VERSION_SELECT );
  428. shared->misc.upt_version_support =
  429. cpu_to_le32 ( VMXNET3_UPT_VERSION_SELECT );
  430. shared->misc.queue_desc_address = cpu_to_le64 ( queues_bus );
  431. shared->misc.queue_desc_len = cpu_to_le32 ( sizeof ( *queues ) );
  432. shared->misc.mtu = cpu_to_le32 ( VMXNET3_MTU );
  433. shared->misc.num_tx_queues = 1;
  434. shared->misc.num_rx_queues = 1;
  435. shared->interrupt.num_intrs = 1;
  436. shared->interrupt.control = cpu_to_le32 ( VMXNET3_IC_DISABLE_ALL );
  437. shared->rx_filter.mode = cpu_to_le32 ( VMXNET3_RXM_UCAST |
  438. VMXNET3_RXM_BCAST |
  439. VMXNET3_RXM_ALL_MULTI );
  440. shared_bus = virt_to_bus ( shared );
  441. DBGC ( vmxnet, "VMXNET3 %p shared area at %08llx+%zx\n",
  442. vmxnet, shared_bus, sizeof ( *shared ) );
  443. /* Zero counters */
  444. memset ( &vmxnet->count, 0, sizeof ( vmxnet->count ) );
  445. /* Set MAC address */
  446. vmxnet3_set_ll_addr ( vmxnet, &netdev->ll_addr );
  447. /* Pass shared area to device */
  448. writel ( ( shared_bus >> 0 ), ( vmxnet->vd + VMXNET3_VD_DSAL ) );
  449. writel ( ( shared_bus >> 32 ), ( vmxnet->vd + VMXNET3_VD_DSAH ) );
  450. /* Activate device */
  451. if ( ( status = vmxnet3_command ( vmxnet,
  452. VMXNET3_CMD_ACTIVATE_DEV ) ) != 0 ) {
  453. DBGC ( vmxnet, "VMXNET3 %p could not activate (status %#x)\n",
  454. vmxnet, status );
  455. rc = -EIO;
  456. goto err_activate;
  457. }
  458. /* Fill receive ring */
  459. vmxnet3_refill_rx ( netdev );
  460. return 0;
  461. vmxnet3_command ( vmxnet, VMXNET3_CMD_QUIESCE_DEV );
  462. vmxnet3_command ( vmxnet, VMXNET3_CMD_RESET_DEV );
  463. err_activate:
  464. vmxnet3_flush_tx ( netdev );
  465. vmxnet3_flush_rx ( netdev );
  466. free_dma ( vmxnet->dma, sizeof ( *vmxnet->dma ) );
  467. err_alloc_dma:
  468. return rc;
  469. }
  470. /**
  471. * Close NIC
  472. *
  473. * @v netdev Network device
  474. */
  475. static void vmxnet3_close ( struct net_device *netdev ) {
  476. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  477. vmxnet3_command ( vmxnet, VMXNET3_CMD_QUIESCE_DEV );
  478. vmxnet3_command ( vmxnet, VMXNET3_CMD_RESET_DEV );
  479. vmxnet3_flush_tx ( netdev );
  480. vmxnet3_flush_rx ( netdev );
  481. free_dma ( vmxnet->dma, sizeof ( *vmxnet->dma ) );
  482. }
  483. /** vmxnet3 net device operations */
  484. static struct net_device_operations vmxnet3_operations = {
  485. .open = vmxnet3_open,
  486. .close = vmxnet3_close,
  487. .transmit = vmxnet3_transmit,
  488. .poll = vmxnet3_poll,
  489. .irq = vmxnet3_irq,
  490. };
  491. /**
  492. * Check version
  493. *
  494. * @v vmxnet vmxnet3 NIC
  495. * @ret rc Return status code
  496. */
  497. static int vmxnet3_check_version ( struct vmxnet3_nic *vmxnet ) {
  498. uint32_t version;
  499. uint32_t upt_version;
  500. /* Read version */
  501. version = readl ( vmxnet->vd + VMXNET3_VD_VRRS );
  502. upt_version = readl ( vmxnet->vd + VMXNET3_VD_UVRS );
  503. DBGC ( vmxnet, "VMXNET3 %p is version %d (UPT version %d)\n",
  504. vmxnet, version, upt_version );
  505. /* Inform NIC of driver version */
  506. writel ( VMXNET3_VERSION_SELECT, ( vmxnet->vd + VMXNET3_VD_VRRS ) );
  507. writel ( VMXNET3_UPT_VERSION_SELECT, ( vmxnet->vd + VMXNET3_VD_UVRS ) );
  508. return 0;
  509. }
  510. /**
  511. * Get permanent MAC address
  512. *
  513. * @v vmxnet vmxnet3 NIC
  514. * @v hw_addr Hardware address to fill in
  515. */
  516. static void vmxnet3_get_hw_addr ( struct vmxnet3_nic *vmxnet, void *hw_addr ) {
  517. struct {
  518. uint32_t low;
  519. uint32_t high;
  520. } __attribute__ (( packed )) mac;
  521. mac.low = le32_to_cpu ( vmxnet3_command ( vmxnet,
  522. VMXNET3_CMD_GET_PERM_MAC_LO ) );
  523. mac.high = le32_to_cpu ( vmxnet3_command ( vmxnet,
  524. VMXNET3_CMD_GET_PERM_MAC_HI ) );
  525. memcpy ( hw_addr, &mac, ETH_ALEN );
  526. }
  527. /**
  528. * Probe PCI device
  529. *
  530. * @v pci PCI device
  531. * @v id PCI ID
  532. * @ret rc Return status code
  533. */
  534. static int vmxnet3_probe ( struct pci_device *pci ) {
  535. struct net_device *netdev;
  536. struct vmxnet3_nic *vmxnet;
  537. int rc;
  538. /* Allocate network device */
  539. netdev = alloc_etherdev ( sizeof ( *vmxnet ) );
  540. if ( ! netdev ) {
  541. rc = -ENOMEM;
  542. goto err_alloc_etherdev;
  543. }
  544. netdev_init ( netdev, &vmxnet3_operations );
  545. vmxnet = netdev_priv ( netdev );
  546. pci_set_drvdata ( pci, netdev );
  547. netdev->dev = &pci->dev;
  548. memset ( vmxnet, 0, sizeof ( *vmxnet ) );
  549. /* Fix up PCI device */
  550. adjust_pci_device ( pci );
  551. /* Map PCI BARs */
  552. vmxnet->pt = ioremap ( pci_bar_start ( pci, VMXNET3_PT_BAR ),
  553. VMXNET3_PT_LEN );
  554. if ( ! vmxnet->pt ) {
  555. rc = -ENODEV;
  556. goto err_ioremap_pt;
  557. }
  558. vmxnet->vd = ioremap ( pci_bar_start ( pci, VMXNET3_VD_BAR ),
  559. VMXNET3_VD_LEN );
  560. if ( ! vmxnet->vd ) {
  561. rc = -ENODEV;
  562. goto err_ioremap_vd;
  563. }
  564. /* Version check */
  565. if ( ( rc = vmxnet3_check_version ( vmxnet ) ) != 0 )
  566. goto err_check_version;
  567. /* Reset device */
  568. if ( ( rc = vmxnet3_command ( vmxnet, VMXNET3_CMD_RESET_DEV ) ) != 0 )
  569. goto err_reset;
  570. /* Read initial MAC address */
  571. vmxnet3_get_hw_addr ( vmxnet, &netdev->hw_addr );
  572. /* Register network device */
  573. if ( ( rc = register_netdev ( netdev ) ) != 0 ) {
  574. DBGC ( vmxnet, "VMXNET3 %p could not register net device: "
  575. "%s\n", vmxnet, strerror ( rc ) );
  576. goto err_register_netdev;
  577. }
  578. /* Get initial link state */
  579. vmxnet3_check_link ( netdev );
  580. return 0;
  581. unregister_netdev ( netdev );
  582. err_register_netdev:
  583. err_reset:
  584. err_check_version:
  585. iounmap ( vmxnet->vd );
  586. err_ioremap_vd:
  587. iounmap ( vmxnet->pt );
  588. err_ioremap_pt:
  589. netdev_nullify ( netdev );
  590. netdev_put ( netdev );
  591. err_alloc_etherdev:
  592. return rc;
  593. }
  594. /**
  595. * Remove PCI device
  596. *
  597. * @v pci PCI device
  598. */
  599. static void vmxnet3_remove ( struct pci_device *pci ) {
  600. struct net_device *netdev = pci_get_drvdata ( pci );
  601. struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
  602. unregister_netdev ( netdev );
  603. iounmap ( vmxnet->vd );
  604. iounmap ( vmxnet->pt );
  605. netdev_nullify ( netdev );
  606. netdev_put ( netdev );
  607. }
  608. /** vmxnet3 PCI IDs */
  609. static struct pci_device_id vmxnet3_nics[] = {
  610. PCI_ROM ( 0x15ad, 0x07b0, "vmxnet3", "vmxnet3 virtual NIC", 0 ),
  611. };
  612. /** vmxnet3 PCI driver */
  613. struct pci_driver vmxnet3_driver __pci_driver = {
  614. .ids = vmxnet3_nics,
  615. .id_count = ( sizeof ( vmxnet3_nics ) / sizeof ( vmxnet3_nics[0] ) ),
  616. .probe = vmxnet3_probe,
  617. .remove = vmxnet3_remove,
  618. };