You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

virtio-net.c 19KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690
  1. /*
  2. * (c) Copyright 2010 Stefan Hajnoczi <stefanha@gmail.com>
  3. *
  4. * based on the Etherboot virtio-net driver
  5. *
  6. * (c) Copyright 2008 Bull S.A.S.
  7. *
  8. * Author: Laurent Vivier <Laurent.Vivier@bull.net>
  9. *
  10. * some parts from Linux Virtio PCI driver
  11. *
  12. * Copyright IBM Corp. 2007
  13. * Authors: Anthony Liguori <aliguori@us.ibm.com>
  14. *
  15. * some parts from Linux Virtio Ring
  16. *
  17. * Copyright Rusty Russell IBM Corporation 2007
  18. *
  19. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  20. * See the COPYING file in the top-level directory.
  21. */
  22. FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
  23. #include <errno.h>
  24. #include <stdlib.h>
  25. #include <unistd.h>
  26. #include <ipxe/list.h>
  27. #include <ipxe/iobuf.h>
  28. #include <ipxe/netdevice.h>
  29. #include <ipxe/pci.h>
  30. #include <ipxe/if_ether.h>
  31. #include <ipxe/ethernet.h>
  32. #include <ipxe/virtio-pci.h>
  33. #include <ipxe/virtio-ring.h>
  34. #include "virtio-net.h"
  35. /*
  36. * Virtio network device driver
  37. *
  38. * Specification:
  39. * http://ozlabs.org/~rusty/virtio-spec/
  40. *
  41. * The virtio network device is supported by Linux virtualization software
  42. * including QEMU/KVM and lguest. This driver supports the virtio over PCI
  43. * transport; virtual machines have one virtio-net PCI adapter per NIC.
  44. *
  45. * Virtio-net is different from hardware NICs because virtio devices
  46. * communicate with the hypervisor via virtqueues, not traditional descriptor
  47. * rings. Virtqueues are unordered queues, they support add_buf() and
  48. * get_buf() operations. To transmit a packet, the driver has to add the
  49. * packet buffer onto the virtqueue. To receive a packet, the driver must
  50. * first add an empty buffer to the virtqueue and then get the filled packet
  51. * buffer on completion.
  52. *
  53. * Virtqueues are an abstraction that is commonly implemented using the vring
  54. * descriptor ring layout. The vring is the actual shared memory structure
  55. * that allows the virtual machine to communicate buffers with the hypervisor.
  56. * Because the vring layout is optimized for flexibility and performance rather
  57. * than space, it is heavy-weight and allocated like traditional descriptor
  58. * rings in the open() function of the driver and not in probe().
  59. *
  60. * There is no true interrupt enable/disable. Virtqueues have callback
  61. * enable/disable flags but these are only hints. The hypervisor may still
  62. * raise an interrupt. Nevertheless, this driver disables callbacks in the
  63. * hopes of avoiding interrupts.
  64. */
  65. /* Driver types are declared here so virtio-net.h can be easily synced with its
  66. * Linux source.
  67. */
  68. /* Virtqueue indices */
  69. enum {
  70. RX_INDEX = 0,
  71. TX_INDEX,
  72. QUEUE_NB
  73. };
  74. /** Max number of pending rx packets */
  75. #define NUM_RX_BUF 8
  76. struct virtnet_nic {
  77. /** Base pio register address */
  78. unsigned long ioaddr;
  79. /** 0 for legacy, 1 for virtio 1.0 */
  80. int virtio_version;
  81. /** Virtio 1.0 device data */
  82. struct virtio_pci_modern_device vdev;
  83. /** RX/TX virtqueues */
  84. struct vring_virtqueue *virtqueue;
  85. /** RX packets handed to the NIC waiting to be filled in */
  86. struct list_head rx_iobufs;
  87. /** Pending rx packet count */
  88. unsigned int rx_num_iobufs;
  89. /** Virtio net dummy packet headers */
  90. struct virtio_net_hdr_modern empty_header[QUEUE_NB];
  91. };
  92. /** Add an iobuf to a virtqueue
  93. *
  94. * @v netdev Network device
  95. * @v vq_idx Virtqueue index (RX_INDEX or TX_INDEX)
  96. * @v iobuf I/O buffer
  97. *
  98. * The virtqueue is kicked after the iobuf has been added.
  99. */
  100. static void virtnet_enqueue_iob ( struct net_device *netdev,
  101. int vq_idx, struct io_buffer *iobuf ) {
  102. struct virtnet_nic *virtnet = netdev->priv;
  103. struct vring_virtqueue *vq = &virtnet->virtqueue[vq_idx];
  104. struct virtio_net_hdr_modern *header = &virtnet->empty_header[vq_idx];
  105. unsigned int out = ( vq_idx == TX_INDEX ) ? 2 : 0;
  106. unsigned int in = ( vq_idx == TX_INDEX ) ? 0 : 2;
  107. size_t header_len = ( virtnet->virtio_version ?
  108. sizeof ( *header ) : sizeof ( header->legacy ) );
  109. struct vring_list list[] = {
  110. {
  111. /* Share a single zeroed virtio net header between all
  112. * packets in a ring. This works because this driver
  113. * does not use any advanced features so none of the
  114. * header fields get used.
  115. *
  116. * Some host implementations (notably Google Compute
  117. * Platform) are known to unconditionally write back
  118. * to header->flags for received packets. Work around
  119. * this by using separate RX and TX headers.
  120. */
  121. .addr = ( char* ) header,
  122. .length = header_len,
  123. },
  124. {
  125. .addr = ( char* ) iobuf->data,
  126. .length = iob_len ( iobuf ),
  127. },
  128. };
  129. DBGC2 ( virtnet, "VIRTIO-NET %p enqueuing iobuf %p on vq %d\n",
  130. virtnet, iobuf, vq_idx );
  131. vring_add_buf ( vq, list, out, in, iobuf, 0 );
  132. vring_kick ( virtnet->virtio_version ? &virtnet->vdev : NULL,
  133. virtnet->ioaddr, vq, 1 );
  134. }
  135. /** Try to keep rx virtqueue filled with iobufs
  136. *
  137. * @v netdev Network device
  138. */
  139. static void virtnet_refill_rx_virtqueue ( struct net_device *netdev ) {
  140. struct virtnet_nic *virtnet = netdev->priv;
  141. size_t len = ( netdev->max_pkt_len + 4 /* VLAN */ );
  142. while ( virtnet->rx_num_iobufs < NUM_RX_BUF ) {
  143. struct io_buffer *iobuf;
  144. /* Try to allocate a buffer, stop for now if out of memory */
  145. iobuf = alloc_iob ( len );
  146. if ( ! iobuf )
  147. break;
  148. /* Keep track of iobuf so close() can free it */
  149. list_add ( &iobuf->list, &virtnet->rx_iobufs );
  150. /* Mark packet length until we know the actual size */
  151. iob_put ( iobuf, len );
  152. virtnet_enqueue_iob ( netdev, RX_INDEX, iobuf );
  153. virtnet->rx_num_iobufs++;
  154. }
  155. }
  156. /** Helper to free all virtqueue memory
  157. *
  158. * @v netdev Network device
  159. */
  160. static void virtnet_free_virtqueues ( struct net_device *netdev ) {
  161. struct virtnet_nic *virtnet = netdev->priv;
  162. int i;
  163. for ( i = 0; i < QUEUE_NB; i++ ) {
  164. virtio_pci_unmap_capability ( &virtnet->virtqueue[i].notification );
  165. vp_free_vq ( &virtnet->virtqueue[i] );
  166. }
  167. free ( virtnet->virtqueue );
  168. virtnet->virtqueue = NULL;
  169. }
  170. /** Open network device, legacy virtio 0.9.5
  171. *
  172. * @v netdev Network device
  173. * @ret rc Return status code
  174. */
  175. static int virtnet_open_legacy ( struct net_device *netdev ) {
  176. struct virtnet_nic *virtnet = netdev->priv;
  177. unsigned long ioaddr = virtnet->ioaddr;
  178. u32 features;
  179. int i;
  180. /* Reset for sanity */
  181. vp_reset ( ioaddr );
  182. /* Allocate virtqueues */
  183. virtnet->virtqueue = zalloc ( QUEUE_NB *
  184. sizeof ( *virtnet->virtqueue ) );
  185. if ( ! virtnet->virtqueue )
  186. return -ENOMEM;
  187. /* Initialize rx/tx virtqueues */
  188. for ( i = 0; i < QUEUE_NB; i++ ) {
  189. if ( vp_find_vq ( ioaddr, i, &virtnet->virtqueue[i] ) == -1 ) {
  190. DBGC ( virtnet, "VIRTIO-NET %p cannot register queue %d\n",
  191. virtnet, i );
  192. virtnet_free_virtqueues ( netdev );
  193. return -ENOENT;
  194. }
  195. }
  196. /* Initialize rx packets */
  197. INIT_LIST_HEAD ( &virtnet->rx_iobufs );
  198. virtnet->rx_num_iobufs = 0;
  199. virtnet_refill_rx_virtqueue ( netdev );
  200. /* Disable interrupts before starting */
  201. netdev_irq ( netdev, 0 );
  202. /* Driver is ready */
  203. features = vp_get_features ( ioaddr );
  204. vp_set_features ( ioaddr, features & ( ( 1 << VIRTIO_NET_F_MAC ) |
  205. ( 1 << VIRTIO_NET_F_MTU ) ) );
  206. vp_set_status ( ioaddr, VIRTIO_CONFIG_S_DRIVER | VIRTIO_CONFIG_S_DRIVER_OK );
  207. return 0;
  208. }
  209. /** Open network device, modern virtio 1.0
  210. *
  211. * @v netdev Network device
  212. * @ret rc Return status code
  213. */
  214. static int virtnet_open_modern ( struct net_device *netdev ) {
  215. struct virtnet_nic *virtnet = netdev->priv;
  216. u64 features;
  217. u8 status;
  218. /* Negotiate features */
  219. features = vpm_get_features ( &virtnet->vdev );
  220. if ( ! ( features & VIRTIO_F_VERSION_1 ) ) {
  221. vpm_add_status ( &virtnet->vdev, VIRTIO_CONFIG_S_FAILED );
  222. return -EINVAL;
  223. }
  224. vpm_set_features ( &virtnet->vdev, features & (
  225. ( 1ULL << VIRTIO_NET_F_MAC ) |
  226. ( 1ULL << VIRTIO_NET_F_MTU ) |
  227. ( 1ULL << VIRTIO_F_VERSION_1 ) |
  228. ( 1ULL << VIRTIO_F_ANY_LAYOUT ) |
  229. ( 1ULL << VIRTIO_F_IOMMU_PLATFORM ) ) );
  230. vpm_add_status ( &virtnet->vdev, VIRTIO_CONFIG_S_FEATURES_OK );
  231. status = vpm_get_status ( &virtnet->vdev );
  232. if ( ! ( status & VIRTIO_CONFIG_S_FEATURES_OK ) ) {
  233. DBGC ( virtnet, "VIRTIO-NET %p device didn't accept features\n",
  234. virtnet );
  235. vpm_add_status ( &virtnet->vdev, VIRTIO_CONFIG_S_FAILED );
  236. return -EINVAL;
  237. }
  238. /* Allocate virtqueues */
  239. virtnet->virtqueue = zalloc ( QUEUE_NB *
  240. sizeof ( *virtnet->virtqueue ) );
  241. if ( ! virtnet->virtqueue ) {
  242. vpm_add_status ( &virtnet->vdev, VIRTIO_CONFIG_S_FAILED );
  243. return -ENOMEM;
  244. }
  245. /* Initialize rx/tx virtqueues */
  246. if ( vpm_find_vqs ( &virtnet->vdev, QUEUE_NB, virtnet->virtqueue ) ) {
  247. DBGC ( virtnet, "VIRTIO-NET %p cannot register queues\n",
  248. virtnet );
  249. virtnet_free_virtqueues ( netdev );
  250. vpm_add_status ( &virtnet->vdev, VIRTIO_CONFIG_S_FAILED );
  251. return -ENOENT;
  252. }
  253. /* Disable interrupts before starting */
  254. netdev_irq ( netdev, 0 );
  255. vpm_add_status ( &virtnet->vdev, VIRTIO_CONFIG_S_DRIVER_OK );
  256. /* Initialize rx packets */
  257. INIT_LIST_HEAD ( &virtnet->rx_iobufs );
  258. virtnet->rx_num_iobufs = 0;
  259. virtnet_refill_rx_virtqueue ( netdev );
  260. return 0;
  261. }
  262. /** Open network device
  263. *
  264. * @v netdev Network device
  265. * @ret rc Return status code
  266. */
  267. static int virtnet_open ( struct net_device *netdev ) {
  268. struct virtnet_nic *virtnet = netdev->priv;
  269. if ( virtnet->virtio_version ) {
  270. return virtnet_open_modern ( netdev );
  271. } else {
  272. return virtnet_open_legacy ( netdev );
  273. }
  274. }
  275. /** Close network device
  276. *
  277. * @v netdev Network device
  278. */
  279. static void virtnet_close ( struct net_device *netdev ) {
  280. struct virtnet_nic *virtnet = netdev->priv;
  281. struct io_buffer *iobuf;
  282. struct io_buffer *next_iobuf;
  283. if ( virtnet->virtio_version ) {
  284. vpm_reset ( &virtnet->vdev );
  285. } else {
  286. vp_reset ( virtnet->ioaddr );
  287. }
  288. /* Virtqueues can be freed now that NIC is reset */
  289. virtnet_free_virtqueues ( netdev );
  290. /* Free rx iobufs */
  291. list_for_each_entry_safe ( iobuf, next_iobuf, &virtnet->rx_iobufs, list ) {
  292. free_iob ( iobuf );
  293. }
  294. INIT_LIST_HEAD ( &virtnet->rx_iobufs );
  295. virtnet->rx_num_iobufs = 0;
  296. }
  297. /** Transmit packet
  298. *
  299. * @v netdev Network device
  300. * @v iobuf I/O buffer
  301. * @ret rc Return status code
  302. */
  303. static int virtnet_transmit ( struct net_device *netdev,
  304. struct io_buffer *iobuf ) {
  305. virtnet_enqueue_iob ( netdev, TX_INDEX, iobuf );
  306. return 0;
  307. }
  308. /** Complete packet transmission
  309. *
  310. * @v netdev Network device
  311. */
  312. static void virtnet_process_tx_packets ( struct net_device *netdev ) {
  313. struct virtnet_nic *virtnet = netdev->priv;
  314. struct vring_virtqueue *tx_vq = &virtnet->virtqueue[TX_INDEX];
  315. while ( vring_more_used ( tx_vq ) ) {
  316. struct io_buffer *iobuf = vring_get_buf ( tx_vq, NULL );
  317. DBGC2 ( virtnet, "VIRTIO-NET %p tx complete iobuf %p\n",
  318. virtnet, iobuf );
  319. netdev_tx_complete ( netdev, iobuf );
  320. }
  321. }
  322. /** Complete packet reception
  323. *
  324. * @v netdev Network device
  325. */
  326. static void virtnet_process_rx_packets ( struct net_device *netdev ) {
  327. struct virtnet_nic *virtnet = netdev->priv;
  328. struct vring_virtqueue *rx_vq = &virtnet->virtqueue[RX_INDEX];
  329. while ( vring_more_used ( rx_vq ) ) {
  330. unsigned int len;
  331. struct io_buffer *iobuf = vring_get_buf ( rx_vq, &len );
  332. /* Release ownership of iobuf */
  333. list_del ( &iobuf->list );
  334. virtnet->rx_num_iobufs--;
  335. /* Update iobuf length */
  336. iob_unput ( iobuf, iob_len ( iobuf ) );
  337. iob_put ( iobuf, len - sizeof ( struct virtio_net_hdr ) );
  338. DBGC2 ( virtnet, "VIRTIO-NET %p rx complete iobuf %p len %zd\n",
  339. virtnet, iobuf, iob_len ( iobuf ) );
  340. /* Pass completed packet to the network stack */
  341. netdev_rx ( netdev, iobuf );
  342. }
  343. virtnet_refill_rx_virtqueue ( netdev );
  344. }
  345. /** Poll for completed and received packets
  346. *
  347. * @v netdev Network device
  348. */
  349. static void virtnet_poll ( struct net_device *netdev ) {
  350. struct virtnet_nic *virtnet = netdev->priv;
  351. /* Acknowledge interrupt. This is necessary for UNDI operation and
  352. * interrupts that are raised despite VRING_AVAIL_F_NO_INTERRUPT being
  353. * set (that flag is just a hint and the hypervisor does not have to
  354. * honor it).
  355. */
  356. if ( virtnet->virtio_version ) {
  357. vpm_get_isr ( &virtnet->vdev );
  358. } else {
  359. vp_get_isr ( virtnet->ioaddr );
  360. }
  361. virtnet_process_tx_packets ( netdev );
  362. virtnet_process_rx_packets ( netdev );
  363. }
  364. /** Enable or disable interrupts
  365. *
  366. * @v netdev Network device
  367. * @v enable Interrupts should be enabled
  368. */
  369. static void virtnet_irq ( struct net_device *netdev, int enable ) {
  370. struct virtnet_nic *virtnet = netdev->priv;
  371. int i;
  372. for ( i = 0; i < QUEUE_NB; i++ ) {
  373. if ( enable )
  374. vring_enable_cb ( &virtnet->virtqueue[i] );
  375. else
  376. vring_disable_cb ( &virtnet->virtqueue[i] );
  377. }
  378. }
  379. /** virtio-net device operations */
  380. static struct net_device_operations virtnet_operations = {
  381. .open = virtnet_open,
  382. .close = virtnet_close,
  383. .transmit = virtnet_transmit,
  384. .poll = virtnet_poll,
  385. .irq = virtnet_irq,
  386. };
  387. /**
  388. * Probe PCI device, legacy virtio 0.9.5
  389. *
  390. * @v pci PCI device
  391. * @ret rc Return status code
  392. */
  393. static int virtnet_probe_legacy ( struct pci_device *pci ) {
  394. unsigned long ioaddr = pci->ioaddr;
  395. struct net_device *netdev;
  396. struct virtnet_nic *virtnet;
  397. u32 features;
  398. u16 mtu;
  399. int rc;
  400. /* Allocate and hook up net device */
  401. netdev = alloc_etherdev ( sizeof ( *virtnet ) );
  402. if ( ! netdev )
  403. return -ENOMEM;
  404. netdev_init ( netdev, &virtnet_operations );
  405. virtnet = netdev->priv;
  406. virtnet->ioaddr = ioaddr;
  407. pci_set_drvdata ( pci, netdev );
  408. netdev->dev = &pci->dev;
  409. DBGC ( virtnet, "VIRTIO-NET %p busaddr=%s ioaddr=%#lx irq=%d\n",
  410. virtnet, pci->dev.name, ioaddr, pci->irq );
  411. /* Enable PCI bus master and reset NIC */
  412. adjust_pci_device ( pci );
  413. vp_reset ( ioaddr );
  414. /* Load MAC address and MTU */
  415. features = vp_get_features ( ioaddr );
  416. if ( features & ( 1 << VIRTIO_NET_F_MAC ) ) {
  417. vp_get ( ioaddr, offsetof ( struct virtio_net_config, mac ),
  418. netdev->hw_addr, ETH_ALEN );
  419. DBGC ( virtnet, "VIRTIO-NET %p mac=%s\n", virtnet,
  420. eth_ntoa ( netdev->hw_addr ) );
  421. }
  422. if ( features & ( 1ULL << VIRTIO_NET_F_MTU ) ) {
  423. vp_get ( ioaddr, offsetof ( struct virtio_net_config, mtu ),
  424. &mtu, sizeof ( mtu ) );
  425. DBGC ( virtnet, "VIRTIO-NET %p mtu=%d\n", virtnet, mtu );
  426. netdev->max_pkt_len = ( mtu + ETH_HLEN );
  427. netdev->mtu = mtu;
  428. }
  429. /* Register network device */
  430. if ( ( rc = register_netdev ( netdev ) ) != 0 )
  431. goto err_register_netdev;
  432. /* Mark link as up, control virtqueue is not used */
  433. netdev_link_up ( netdev );
  434. return 0;
  435. unregister_netdev ( netdev );
  436. err_register_netdev:
  437. vp_reset ( ioaddr );
  438. netdev_nullify ( netdev );
  439. netdev_put ( netdev );
  440. return rc;
  441. }
  442. /**
  443. * Probe PCI device, modern virtio 1.0
  444. *
  445. * @v pci PCI device
  446. * @v found_dev Set to non-zero if modern device was found (probe may still fail)
  447. * @ret rc Return status code
  448. */
  449. static int virtnet_probe_modern ( struct pci_device *pci, int *found_dev ) {
  450. struct net_device *netdev;
  451. struct virtnet_nic *virtnet;
  452. u64 features;
  453. u16 mtu;
  454. int rc, common, isr, notify, config, device;
  455. common = virtio_pci_find_capability ( pci, VIRTIO_PCI_CAP_COMMON_CFG );
  456. if ( ! common ) {
  457. DBG ( "Common virtio capability not found!\n" );
  458. return -ENODEV;
  459. }
  460. *found_dev = 1;
  461. isr = virtio_pci_find_capability ( pci, VIRTIO_PCI_CAP_ISR_CFG );
  462. notify = virtio_pci_find_capability ( pci, VIRTIO_PCI_CAP_NOTIFY_CFG );
  463. config = virtio_pci_find_capability ( pci, VIRTIO_PCI_CAP_PCI_CFG );
  464. if ( ! isr || ! notify || ! config ) {
  465. DBG ( "Missing virtio capabilities %i/%i/%i/%i\n",
  466. common, isr, notify, config );
  467. return -EINVAL;
  468. }
  469. device = virtio_pci_find_capability ( pci, VIRTIO_PCI_CAP_DEVICE_CFG );
  470. /* Allocate and hook up net device */
  471. netdev = alloc_etherdev ( sizeof ( *virtnet ) );
  472. if ( ! netdev )
  473. return -ENOMEM;
  474. netdev_init ( netdev, &virtnet_operations );
  475. virtnet = netdev->priv;
  476. pci_set_drvdata ( pci, netdev );
  477. netdev->dev = &pci->dev;
  478. DBGC ( virtnet, "VIRTIO-NET modern %p busaddr=%s irq=%d\n",
  479. virtnet, pci->dev.name, pci->irq );
  480. virtnet->vdev.pci = pci;
  481. rc = virtio_pci_map_capability ( pci, common,
  482. sizeof ( struct virtio_pci_common_cfg ), 4,
  483. 0, sizeof ( struct virtio_pci_common_cfg ),
  484. &virtnet->vdev.common );
  485. if ( rc )
  486. goto err_map_common;
  487. rc = virtio_pci_map_capability ( pci, isr, sizeof ( u8 ), 1,
  488. 0, 1,
  489. &virtnet->vdev.isr );
  490. if ( rc )
  491. goto err_map_isr;
  492. virtnet->vdev.notify_cap_pos = notify;
  493. virtnet->vdev.cfg_cap_pos = config;
  494. /* Map the device capability */
  495. if ( device ) {
  496. rc = virtio_pci_map_capability ( pci, device,
  497. 0, 4, 0, sizeof ( struct virtio_net_config ),
  498. &virtnet->vdev.device );
  499. if ( rc )
  500. goto err_map_device;
  501. }
  502. /* Enable the PCI device */
  503. adjust_pci_device ( pci );
  504. /* Reset the device and set initial status bits */
  505. vpm_reset ( &virtnet->vdev );
  506. vpm_add_status ( &virtnet->vdev, VIRTIO_CONFIG_S_ACKNOWLEDGE );
  507. vpm_add_status ( &virtnet->vdev, VIRTIO_CONFIG_S_DRIVER );
  508. /* Load MAC address and MTU */
  509. if ( device ) {
  510. features = vpm_get_features ( &virtnet->vdev );
  511. if ( features & ( 1ULL << VIRTIO_NET_F_MAC ) ) {
  512. vpm_get ( &virtnet->vdev,
  513. offsetof ( struct virtio_net_config, mac ),
  514. netdev->hw_addr, ETH_ALEN );
  515. DBGC ( virtnet, "VIRTIO-NET %p mac=%s\n", virtnet,
  516. eth_ntoa ( netdev->hw_addr ) );
  517. }
  518. if ( features & ( 1ULL << VIRTIO_NET_F_MTU ) ) {
  519. vpm_get ( &virtnet->vdev,
  520. offsetof ( struct virtio_net_config, mtu ),
  521. &mtu, sizeof ( mtu ) );
  522. DBGC ( virtnet, "VIRTIO-NET %p mtu=%d\n", virtnet,
  523. mtu );
  524. netdev->max_pkt_len = ( mtu + ETH_HLEN );
  525. }
  526. }
  527. /* We need a valid MAC address */
  528. if ( ! is_valid_ether_addr ( netdev->hw_addr ) ) {
  529. rc = -EADDRNOTAVAIL;
  530. goto err_mac_address;
  531. }
  532. /* Register network device */
  533. if ( ( rc = register_netdev ( netdev ) ) != 0 )
  534. goto err_register_netdev;
  535. /* Mark link as up, control virtqueue is not used */
  536. netdev_link_up ( netdev );
  537. virtnet->virtio_version = 1;
  538. return 0;
  539. unregister_netdev ( netdev );
  540. err_register_netdev:
  541. err_mac_address:
  542. vpm_reset ( &virtnet->vdev );
  543. netdev_nullify ( netdev );
  544. netdev_put ( netdev );
  545. virtio_pci_unmap_capability ( &virtnet->vdev.device );
  546. err_map_device:
  547. virtio_pci_unmap_capability ( &virtnet->vdev.isr );
  548. err_map_isr:
  549. virtio_pci_unmap_capability ( &virtnet->vdev.common );
  550. err_map_common:
  551. return rc;
  552. }
  553. /**
  554. * Probe PCI device
  555. *
  556. * @v pci PCI device
  557. * @ret rc Return status code
  558. */
  559. static int virtnet_probe ( struct pci_device *pci ) {
  560. int found_modern = 0;
  561. int rc = virtnet_probe_modern ( pci, &found_modern );
  562. if ( ! found_modern && pci->device < 0x1040 ) {
  563. /* fall back to the legacy probe */
  564. rc = virtnet_probe_legacy ( pci );
  565. }
  566. return rc;
  567. }
  568. /**
  569. * Remove device
  570. *
  571. * @v pci PCI device
  572. */
  573. static void virtnet_remove ( struct pci_device *pci ) {
  574. struct net_device *netdev = pci_get_drvdata ( pci );
  575. struct virtnet_nic *virtnet = netdev->priv;
  576. virtio_pci_unmap_capability ( &virtnet->vdev.device );
  577. virtio_pci_unmap_capability ( &virtnet->vdev.isr );
  578. virtio_pci_unmap_capability ( &virtnet->vdev.common );
  579. unregister_netdev ( netdev );
  580. netdev_nullify ( netdev );
  581. netdev_put ( netdev );
  582. }
  583. static struct pci_device_id virtnet_nics[] = {
  584. PCI_ROM(0x1af4, 0x1000, "virtio-net", "Virtio Network Interface", 0),
  585. PCI_ROM(0x1af4, 0x1041, "virtio-net", "Virtio Network Interface 1.0", 0),
  586. };
  587. struct pci_driver virtnet_driver __pci_driver = {
  588. .ids = virtnet_nics,
  589. .id_count = ( sizeof ( virtnet_nics ) / sizeof ( virtnet_nics[0] ) ),
  590. .probe = virtnet_probe,
  591. .remove = virtnet_remove,
  592. };