Browse Source

[netdevice] Add netdev_tx_defer() to allow drivers to defer transmissions

Devices with small transmit descriptor rings may temporarily run out
of space.  Provide netdev_tx_defer() to allow drivers to defer packets
for retransmission as soon as a descriptor becomes available.

Signed-off-by: Michael Brown <mcb30@ipxe.org>
tags/v1.20.1
Michael Brown 11 years ago
parent
commit
2095ed413e
2 changed files with 91 additions and 3 deletions
  1. 4
    0
      src/include/ipxe/netdevice.h
  2. 87
    3
      src/net/netdevice.c

+ 4
- 0
src/include/ipxe/netdevice.h View File

346
 	size_t max_pkt_len;
346
 	size_t max_pkt_len;
347
 	/** TX packet queue */
347
 	/** TX packet queue */
348
 	struct list_head tx_queue;
348
 	struct list_head tx_queue;
349
+	/** Deferred TX packet queue */
350
+	struct list_head tx_deferred;
349
 	/** RX packet queue */
351
 	/** RX packet queue */
350
 	struct list_head rx_queue;
352
 	struct list_head rx_queue;
351
 	/** TX statistics */
353
 	/** TX statistics */
605
 extern void netdev_link_err ( struct net_device *netdev, int rc );
607
 extern void netdev_link_err ( struct net_device *netdev, int rc );
606
 extern void netdev_link_down ( struct net_device *netdev );
608
 extern void netdev_link_down ( struct net_device *netdev );
607
 extern int netdev_tx ( struct net_device *netdev, struct io_buffer *iobuf );
609
 extern int netdev_tx ( struct net_device *netdev, struct io_buffer *iobuf );
610
+extern void netdev_tx_defer ( struct net_device *netdev,
611
+			      struct io_buffer *iobuf );
608
 extern void netdev_tx_err ( struct net_device *netdev,
612
 extern void netdev_tx_err ( struct net_device *netdev,
609
 			    struct io_buffer *iobuf, int rc );
613
 			    struct io_buffer *iobuf, int rc );
610
 extern void netdev_tx_complete_err ( struct net_device *netdev,
614
 extern void netdev_tx_complete_err ( struct net_device *netdev,

+ 87
- 3
src/net/netdevice.c View File

31
 #include <ipxe/tables.h>
31
 #include <ipxe/tables.h>
32
 #include <ipxe/process.h>
32
 #include <ipxe/process.h>
33
 #include <ipxe/init.h>
33
 #include <ipxe/init.h>
34
+#include <ipxe/malloc.h>
34
 #include <ipxe/device.h>
35
 #include <ipxe/device.h>
35
 #include <ipxe/errortab.h>
36
 #include <ipxe/errortab.h>
36
 #include <ipxe/vlan.h>
37
 #include <ipxe/vlan.h>
212
 	return rc;
213
 	return rc;
213
 }
214
 }
214
 
215
 
216
+/**
217
+ * Defer transmitted packet
218
+ *
219
+ * @v netdev		Network device
220
+ * @v iobuf		I/O buffer
221
+ *
222
+ * Drivers may call netdev_tx_defer() if there is insufficient space
223
+ * in the transmit descriptor ring.  Any packets deferred in this way
224
+ * will be automatically retransmitted as soon as space becomes
225
+ * available (i.e. as soon as the driver calls netdev_tx_complete()).
226
+ *
227
+ * The packet must currently be in the network device's TX queue.
228
+ *
229
+ * Drivers utilising netdev_tx_defer() must ensure that space in the
230
+ * transmit descriptor ring is freed up @b before calling
231
+ * netdev_tx_complete().  For example, if the ring is modelled using a
232
+ * producer counter and a consumer counter, then the consumer counter
233
+ * must be incremented before the call to netdev_tx_complete().
234
+ * Failure to do this will cause the retransmitted packet to be
235
+ * immediately redeferred (which will result in out-of-order
236
+ * transmissions and other nastiness).
237
+ */
238
+void netdev_tx_defer ( struct net_device *netdev, struct io_buffer *iobuf ) {
239
+
240
+	/* Catch data corruption as early as possible */
241
+	list_check_contains_entry ( iobuf, &netdev->tx_queue, list );
242
+
243
+	/* Remove from transmit queue */
244
+	list_del ( &iobuf->list );
245
+
246
+	/* Add to deferred transmit queue */
247
+	list_add_tail ( &iobuf->list, &netdev->tx_deferred );
248
+
249
+	/* Record "out of space" statistic */
250
+	netdev_tx_err ( netdev, NULL, -ENOBUFS );
251
+}
252
+
215
 /**
253
 /**
216
  * Discard transmitted packet
254
  * Discard transmitted packet
217
  *
255
  *
257
 	/* Dequeue and free I/O buffer */
295
 	/* Dequeue and free I/O buffer */
258
 	list_del ( &iobuf->list );
296
 	list_del ( &iobuf->list );
259
 	netdev_tx_err ( netdev, iobuf, rc );
297
 	netdev_tx_err ( netdev, iobuf, rc );
298
+
299
+	/* Transmit first pending packet, if any */
300
+	if ( ( iobuf = list_first_entry ( &netdev->tx_deferred,
301
+					  struct io_buffer, list ) ) != NULL ) {
302
+		list_del ( &iobuf->list );
303
+		netdev_tx ( netdev, iobuf );
304
+	}
260
 }
305
 }
261
 
306
 
262
 /**
307
 /**
270
 void netdev_tx_complete_next_err ( struct net_device *netdev, int rc ) {
315
 void netdev_tx_complete_next_err ( struct net_device *netdev, int rc ) {
271
 	struct io_buffer *iobuf;
316
 	struct io_buffer *iobuf;
272
 
317
 
273
-	list_for_each_entry ( iobuf, &netdev->tx_queue, list ) {
318
+	if ( ( iobuf = list_first_entry ( &netdev->tx_queue, struct io_buffer,
319
+					  list ) ) != NULL ) {
274
 		netdev_tx_complete_err ( netdev, iobuf, rc );
320
 		netdev_tx_complete_err ( netdev, iobuf, rc );
275
-		return;
276
 	}
321
 	}
277
 }
322
 }
278
 
323
 
283
  */
328
  */
284
 static void netdev_tx_flush ( struct net_device *netdev ) {
329
 static void netdev_tx_flush ( struct net_device *netdev ) {
285
 
330
 
286
-	/* Discard any packets in the TX queue */
331
+	/* Discard any packets in the TX queue.  This will also cause
332
+	 * any packets in the deferred TX queue to be discarded
333
+	 * automatically.
334
+	 */
287
 	while ( ! list_empty ( &netdev->tx_queue ) ) {
335
 	while ( ! list_empty ( &netdev->tx_queue ) ) {
288
 		netdev_tx_complete_next_err ( netdev, -ECANCELED );
336
 		netdev_tx_complete_next_err ( netdev, -ECANCELED );
289
 	}
337
 	}
338
+	assert ( list_empty ( &netdev->tx_queue ) );
339
+	assert ( list_empty ( &netdev->tx_deferred ) );
290
 }
340
 }
291
 
341
 
292
 /**
342
 /**
424
 		ref_init ( &netdev->refcnt, free_netdev );
474
 		ref_init ( &netdev->refcnt, free_netdev );
425
 		netdev->link_rc = -EUNKNOWN_LINK_STATUS;
475
 		netdev->link_rc = -EUNKNOWN_LINK_STATUS;
426
 		INIT_LIST_HEAD ( &netdev->tx_queue );
476
 		INIT_LIST_HEAD ( &netdev->tx_queue );
477
+		INIT_LIST_HEAD ( &netdev->tx_deferred );
427
 		INIT_LIST_HEAD ( &netdev->rx_queue );
478
 		INIT_LIST_HEAD ( &netdev->rx_queue );
428
 		netdev_settings_init ( netdev );
479
 		netdev_settings_init ( netdev );
429
 		netdev->priv = ( ( ( void * ) netdev ) + sizeof ( *netdev ) );
480
 		netdev->priv = ( ( ( void * ) netdev ) + sizeof ( *netdev ) );
817
 
868
 
818
 /** Networking stack process */
869
 /** Networking stack process */
819
 PERMANENT_PROCESS ( net_process, net_step );
870
 PERMANENT_PROCESS ( net_process, net_step );
871
+
872
+/**
873
+ * Discard some cached network device data
874
+ *
875
+ * @ret discarded	Number of cached items discarded
876
+ */
877
+static unsigned int net_discard ( void ) {
878
+	struct net_device *netdev;
879
+	struct io_buffer *iobuf;
880
+	unsigned int discarded = 0;
881
+
882
+	/* Try to drop one deferred TX packet from each network device */
883
+	for_each_netdev ( netdev ) {
884
+		if ( ( iobuf = list_first_entry ( &netdev->tx_deferred,
885
+						  struct io_buffer,
886
+						  list ) ) != NULL ) {
887
+
888
+			/* Discard first deferred packet */
889
+			list_del ( &iobuf->list );
890
+			free ( iobuf );
891
+
892
+			/* Report discard */
893
+			discarded++;
894
+		}
895
+	}
896
+
897
+	return discarded;
898
+}
899
+
900
+/** Network device cache discarder */
901
+struct cache_discarder net_discarder __cache_discarder ( CACHE_NORMAL ) = {
902
+	.discard = net_discard,
903
+};

Loading…
Cancel
Save