Browse Source

[netdevice] Add netdev_tx_defer() to allow drivers to defer transmissions

Devices with small transmit descriptor rings may temporarily run out
of space.  Provide netdev_tx_defer() to allow drivers to defer packets
for retransmission as soon as a descriptor becomes available.

Signed-off-by: Michael Brown <mcb30@ipxe.org>
tags/v1.20.1
Michael Brown 11 years ago
parent
commit
2095ed413e
2 changed files with 91 additions and 3 deletions
  1. 4
    0
      src/include/ipxe/netdevice.h
  2. 87
    3
      src/net/netdevice.c

+ 4
- 0
src/include/ipxe/netdevice.h View File

@@ -346,6 +346,8 @@ struct net_device {
346 346
 	size_t max_pkt_len;
347 347
 	/** TX packet queue */
348 348
 	struct list_head tx_queue;
349
+	/** Deferred TX packet queue */
350
+	struct list_head tx_deferred;
349 351
 	/** RX packet queue */
350 352
 	struct list_head rx_queue;
351 353
 	/** TX statistics */
@@ -605,6 +607,8 @@ netdev_rx_frozen ( struct net_device *netdev ) {
605 607
 extern void netdev_link_err ( struct net_device *netdev, int rc );
606 608
 extern void netdev_link_down ( struct net_device *netdev );
607 609
 extern int netdev_tx ( struct net_device *netdev, struct io_buffer *iobuf );
610
+extern void netdev_tx_defer ( struct net_device *netdev,
611
+			      struct io_buffer *iobuf );
608 612
 extern void netdev_tx_err ( struct net_device *netdev,
609 613
 			    struct io_buffer *iobuf, int rc );
610 614
 extern void netdev_tx_complete_err ( struct net_device *netdev,

+ 87
- 3
src/net/netdevice.c View File

@@ -31,6 +31,7 @@ FILE_LICENCE ( GPL2_OR_LATER );
31 31
 #include <ipxe/tables.h>
32 32
 #include <ipxe/process.h>
33 33
 #include <ipxe/init.h>
34
+#include <ipxe/malloc.h>
34 35
 #include <ipxe/device.h>
35 36
 #include <ipxe/errortab.h>
36 37
 #include <ipxe/vlan.h>
@@ -212,6 +213,43 @@ int netdev_tx ( struct net_device *netdev, struct io_buffer *iobuf ) {
212 213
 	return rc;
213 214
 }
214 215
 
216
+/**
217
+ * Defer transmitted packet
218
+ *
219
+ * @v netdev		Network device
220
+ * @v iobuf		I/O buffer
221
+ *
222
+ * Drivers may call netdev_tx_defer() if there is insufficient space
223
+ * in the transmit descriptor ring.  Any packets deferred in this way
224
+ * will be automatically retransmitted as soon as space becomes
225
+ * available (i.e. as soon as the driver calls netdev_tx_complete()).
226
+ *
227
+ * The packet must currently be in the network device's TX queue.
228
+ *
229
+ * Drivers utilising netdev_tx_defer() must ensure that space in the
230
+ * transmit descriptor ring is freed up @b before calling
231
+ * netdev_tx_complete().  For example, if the ring is modelled using a
232
+ * producer counter and a consumer counter, then the consumer counter
233
+ * must be incremented before the call to netdev_tx_complete().
234
+ * Failure to do this will cause the retransmitted packet to be
235
+ * immediately redeferred (which will result in out-of-order
236
+ * transmissions and other nastiness).
237
+ */
238
+void netdev_tx_defer ( struct net_device *netdev, struct io_buffer *iobuf ) {
239
+
240
+	/* Catch data corruption as early as possible */
241
+	list_check_contains_entry ( iobuf, &netdev->tx_queue, list );
242
+
243
+	/* Remove from transmit queue */
244
+	list_del ( &iobuf->list );
245
+
246
+	/* Add to deferred transmit queue */
247
+	list_add_tail ( &iobuf->list, &netdev->tx_deferred );
248
+
249
+	/* Record "out of space" statistic */
250
+	netdev_tx_err ( netdev, NULL, -ENOBUFS );
251
+}
252
+
215 253
 /**
216 254
  * Discard transmitted packet
217 255
  *
@@ -257,6 +295,13 @@ void netdev_tx_complete_err ( struct net_device *netdev,
257 295
 	/* Dequeue and free I/O buffer */
258 296
 	list_del ( &iobuf->list );
259 297
 	netdev_tx_err ( netdev, iobuf, rc );
298
+
299
+	/* Transmit first pending packet, if any */
300
+	if ( ( iobuf = list_first_entry ( &netdev->tx_deferred,
301
+					  struct io_buffer, list ) ) != NULL ) {
302
+		list_del ( &iobuf->list );
303
+		netdev_tx ( netdev, iobuf );
304
+	}
260 305
 }
261 306
 
262 307
 /**
@@ -270,9 +315,9 @@ void netdev_tx_complete_err ( struct net_device *netdev,
270 315
 void netdev_tx_complete_next_err ( struct net_device *netdev, int rc ) {
271 316
 	struct io_buffer *iobuf;
272 317
 
273
-	list_for_each_entry ( iobuf, &netdev->tx_queue, list ) {
318
+	if ( ( iobuf = list_first_entry ( &netdev->tx_queue, struct io_buffer,
319
+					  list ) ) != NULL ) {
274 320
 		netdev_tx_complete_err ( netdev, iobuf, rc );
275
-		return;
276 321
 	}
277 322
 }
278 323
 
@@ -283,10 +328,15 @@ void netdev_tx_complete_next_err ( struct net_device *netdev, int rc ) {
283 328
  */
284 329
 static void netdev_tx_flush ( struct net_device *netdev ) {
285 330
 
286
-	/* Discard any packets in the TX queue */
331
+	/* Discard any packets in the TX queue.  This will also cause
332
+	 * any packets in the deferred TX queue to be discarded
333
+	 * automatically.
334
+	 */
287 335
 	while ( ! list_empty ( &netdev->tx_queue ) ) {
288 336
 		netdev_tx_complete_next_err ( netdev, -ECANCELED );
289 337
 	}
338
+	assert ( list_empty ( &netdev->tx_queue ) );
339
+	assert ( list_empty ( &netdev->tx_deferred ) );
290 340
 }
291 341
 
292 342
 /**
@@ -424,6 +474,7 @@ struct net_device * alloc_netdev ( size_t priv_size ) {
424 474
 		ref_init ( &netdev->refcnt, free_netdev );
425 475
 		netdev->link_rc = -EUNKNOWN_LINK_STATUS;
426 476
 		INIT_LIST_HEAD ( &netdev->tx_queue );
477
+		INIT_LIST_HEAD ( &netdev->tx_deferred );
427 478
 		INIT_LIST_HEAD ( &netdev->rx_queue );
428 479
 		netdev_settings_init ( netdev );
429 480
 		netdev->priv = ( ( ( void * ) netdev ) + sizeof ( *netdev ) );
@@ -817,3 +868,36 @@ __weak struct net_device * vlan_find ( struct net_device *trunk __unused,
817 868
 
818 869
 /** Networking stack process */
819 870
 PERMANENT_PROCESS ( net_process, net_step );
871
+
872
+/**
873
+ * Discard some cached network device data
874
+ *
875
+ * @ret discarded	Number of cached items discarded
876
+ */
877
+static unsigned int net_discard ( void ) {
878
+	struct net_device *netdev;
879
+	struct io_buffer *iobuf;
880
+	unsigned int discarded = 0;
881
+
882
+	/* Try to drop one deferred TX packet from each network device */
883
+	for_each_netdev ( netdev ) {
884
+		if ( ( iobuf = list_first_entry ( &netdev->tx_deferred,
885
+						  struct io_buffer,
886
+						  list ) ) != NULL ) {
887
+
888
+			/* Discard first deferred packet */
889
+			list_del ( &iobuf->list );
890
+			free ( iobuf );
891
+
892
+			/* Report discard */
893
+			discarded++;
894
+		}
895
+	}
896
+
897
+	return discarded;
898
+}
899
+
900
+/** Network device cache discarder */
901
+struct cache_discarder net_discarder __cache_discarder ( CACHE_NORMAL ) = {
902
+	.discard = net_discard,
903
+};

Loading…
Cancel
Save