Browse Source

Remove the one-packet-per-poll-cycle limit; it seems to no longer be

needed now that performance is up.
tags/v0.9.3
Michael Brown 18 years ago
parent
commit
05f4c3d176
1 changed files with 5 additions and 24 deletions
  1. 5
    24
      src/net/netdevice.c

+ 5
- 24
src/net/netdevice.c View File

387
  *
387
  *
388
  * This polls all interfaces for received packets, and processes
388
  * This polls all interfaces for received packets, and processes
389
  * packets from the RX queue.
389
  * packets from the RX queue.
390
- *
391
  */
390
  */
392
 static void net_step ( struct process *process ) {
391
 static void net_step ( struct process *process ) {
393
 	struct net_device *netdev;
392
 	struct net_device *netdev;
396
 	/* Poll and process each network device */
395
 	/* Poll and process each network device */
397
 	list_for_each_entry ( netdev, &net_devices, list ) {
396
 	list_for_each_entry ( netdev, &net_devices, list ) {
398
 
397
 
399
-		/* Poll for new packets.  Limit RX queue size to a
400
-		 * single packet, because otherwise most drivers are
401
-		 * in serious danger of running out of memory and
402
-		 * having to drop packets.
403
-		 *
404
-		 * This limitation isn't relevant to devices that
405
-		 * preallocate packet buffers (i.e. devices with
406
-		 * descriptor-based RX datapaths).  We might at some
407
-		 * point want to relax the quota for such devices.
408
-		 */
409
-		netdev_poll ( netdev,
410
-			      ( list_empty ( &netdev->rx_queue ) ? 1 : 0 ) );
411
-
412
-		/* Handle at most one received packet per poll.  We
413
-		 * avoid processing more than one packet per call to
414
-		 * netdev_poll(), because processing the received
415
-		 * packet can trigger transmission of a new packet
416
-		 * (e.g. an ARP response).  Since TX completions will
417
-		 * be processed as part of the poll operation, it is
418
-		 * easy to overflow small TX queues if multiple
419
-		 * packets are processed per poll.
420
-		 */
421
-		if ( ( pkb = netdev_rx_dequeue ( netdev ) ) ) {
398
+		/* Poll for new packets */
399
+		netdev_poll ( netdev, -1U );
400
+
401
+		/* Process received packets */
402
+		while ( ( pkb = netdev_rx_dequeue ( netdev ) ) ) {
422
 			DBGC ( netdev, "NETDEV %p processing %p\n",
403
 			DBGC ( netdev, "NETDEV %p processing %p\n",
423
 			       netdev, pkb );
404
 			       netdev, pkb );
424
 			netdev->ll_protocol->rx ( pkb, netdev );
405
 			netdev->ll_protocol->rx ( pkb, netdev );

Loading…
Cancel
Save