Преглед изворни кода

[slam] Implement SLAM flow control

On any fast network, or with any driver that may drop packets
(e.g. Infiniband, which has very small RX rings), the traditional
usage of the SLAM protocol will result in enormous numbers of packet
drops and a consequent large number of retransmissions.

By adapting the client behaviour, we can force the server to act more
like a multicast TFTP server, with flow control provided by a single
master client.

This behaviour should interoperate with any traditional SLAM client
(e.g. Etherboot 5.4) on the network.  The SLAM protocol isn't actually
documented anywhere, so it's hard to define either behaviour as
compliant or otherwise.
tags/v0.9.4
Michael Brown пре 16 година
родитељ
комит
5d36ec6721
1 измењених фајлова са 85 додато и 107 уклоњено
  1. 85
    107
      src/net/udp/slam.c

+ 85
- 107
src/net/udp/slam.c Прегледај датотеку

@@ -30,6 +30,7 @@
30 30
 #include <gpxe/open.h>
31 31
 #include <gpxe/uri.h>
32 32
 #include <gpxe/tcpip.h>
33
+#include <gpxe/timer.h>
33 34
 #include <gpxe/retry.h>
34 35
 
35 36
 /** @file
@@ -90,18 +91,18 @@ FEATURE ( FEATURE_PROTOCOL, "SLAM", DHCP_EB_FEATURE_SLAM, 1 );
90 91
 #define SLAM_DEFAULT_MULTICAST_PORT 10000
91 92
 
92 93
 /** Maximum SLAM header length */
93
-#define SLAM_MAX_HEADER_LEN ( 8 /* transaction id */ + 8 /* total_bytes */ + \
94
-			      8 /* block_size */ )
94
+#define SLAM_MAX_HEADER_LEN ( 7 /* transaction id */ + 7 /* total_bytes */ + \
95
+			      7 /* block_size */ )
95 96
 
96 97
 /** Maximum SLAM NACK length
97 98
  *
98
- * This is a policy decision.  Shorter packets take less time to
99
- * construct and spew out less debug output, and there's a limit to
100
- * how useful it is to send a complete missing-block list anyway; if
101
- * the loss rate is high then we're going to have to retransmit an
102
- * updated missing-block list anyway.
99
+ * We only ever send a NACK for a single packet.
103 100
  */
104
-#define SLAM_MAX_NACK_LEN 16
101
+#define SLAM_MAX_NACK_LEN ( 7 /* block */ + 1 /* #blocks = 1 */ + \
102
+			    1 /* NUL */ )
103
+
104
+/** SLAM slave timeout */
105
+#define SLAM_SLAVE_TIMEOUT ( 1 * TICKS_PER_SEC )
105 106
 
106 107
 /** A SLAM request */
107 108
 struct slam_request {
@@ -115,8 +116,10 @@ struct slam_request {
115 116
 	/** Multicast socket */
116 117
 	struct xfer_interface mc_socket;
117 118
 
118
-	/** NACK timer */
119
-	struct retry_timer timer;
119
+	/** Master client retry timer */
120
+	struct retry_timer master_timer;
121
+	/** Slave client retry timer */
122
+	struct retry_timer slave_timer;
120 123
 
121 124
 	/** Cached header */
122 125
 	uint8_t header[SLAM_MAX_HEADER_LEN];
@@ -167,8 +170,9 @@ static void slam_finished ( struct slam_request *slam, int rc ) {
167 170
 				   sizeof ( slam_disconnect ) );
168 171
 	}
169 172
 
170
-	/* Stop the retry timer */
171
-	stop_timer ( &slam->timer );
173
+	/* Stop the retry timers */
174
+	stop_timer ( &slam->master_timer );
175
+	stop_timer ( &slam->slave_timer );
172 176
 
173 177
 	/* Close all data transfer interfaces */
174 178
 	xfer_nullify ( &slam->socket );
@@ -191,14 +195,14 @@ static void slam_finished ( struct slam_request *slam, int rc ) {
191 195
  * @v slam		SLAM request
192 196
  * @v iobuf		I/O buffer
193 197
  * @v value		Value to add
194
- * @v reserved		Length of reserved space at end of buffer
195
- * @ret len		Length of value, or negative error.
198
+ * @ret rc		Return status code
196 199
  *
197
- * Adds a variable-length value to the end of an I/O buffer.
200
+ * Adds a variable-length value to the end of an I/O buffer.  Will
201
+ * always leave at least one byte of tailroom in the I/O buffer (to
202
+ * allow space for the terminating NUL).
198 203
  */
199 204
 static int slam_put_value ( struct slam_request *slam,
200
-			    struct io_buffer *iobuf, unsigned long value,
201
-			    size_t reserved ) {
205
+			    struct io_buffer *iobuf, unsigned long value ) {
202 206
 	uint8_t *data;
203 207
 	size_t len;
204 208
 	unsigned int i;
@@ -207,7 +211,7 @@ static int slam_put_value ( struct slam_request *slam,
207 211
 	 * leave at least one byte in the I/O buffer.
208 212
 	 */
209 213
 	len = ( ( flsl ( value ) + 10 ) / 8 );
210
-	if ( ( len + reserved ) > iob_tailroom ( iobuf ) ) {
214
+	if ( len >= iob_tailroom ( iobuf ) ) {
211 215
 		DBGC2 ( slam, "SLAM %p cannot add %d-byte value\n",
212 216
 			slam, len );
213 217
 		return -ENOBUFS;
@@ -227,72 +231,7 @@ static int slam_put_value ( struct slam_request *slam,
227 231
 	*data |= ( len << 5 );
228 232
 	assert ( value == 0 );
229 233
 
230
-	return len;
231
-}
232
-
233
-/**
234
- * Build SLAM compressed missing-block list
235
- *
236
- * @v slam		SLAM request
237
- * @v iobuf		I/O buffer
238
- * @ret rc		Return status code
239
- */
240
-static int slam_build_block_list ( struct slam_request *slam,
241
-				   struct io_buffer *iobuf ) {
242
-	unsigned long block;
243
-	unsigned long block_count;
244
-	int block_present;
245
-	int last_block_present;
246
-	int len;
247
-	size_t last_len = 0;
248
-	unsigned long last_block_count = 0;
249
-	int rc;
250
-
251
-	DBGC ( slam, "SLAM %p asking for", slam );
252
-
253
-	/* Walk bitmap to construct list */
254
-	block_count = 0;
255
-	last_block_present = ( ! 0 );
256
-	for ( block = 0 ; block < slam->num_blocks ; block++ ) {
257
-		block_present = ( !! bitmap_test ( &slam->bitmap, block ) );
258
-		if ( block_present != last_block_present ) {
259
-			if ( ( len = slam_put_value ( slam, iobuf, block_count,
260
-					     ( sizeof ( block ) + 1 ) ) ) < 0 )
261
-				goto truncated;
262
-			DBGC ( slam, "%c%ld",
263
-			       ( last_block_present ? ' ' : '-' ),
264
-			       ( last_block_present ? block : block - 1 ) );
265
-			last_len = len;
266
-			last_block_count = block_count;
267
-			last_block_present = block_present;
268
-			block_count = 0;
269
-		}
270
-		block_count++;
271
-	}
272
-	if ( ( len = slam_put_value ( slam, iobuf, block_count,
273
-				      ( sizeof ( block ) + 1 ) ) ) < 0 )
274
-		goto truncated;
275
-	DBGC ( slam, "%c%ld\n", ( last_block_present ? ' ' : '-' ),
276
-	       ( last_block_present ? block : block - 1 ) );
277
-
278 234
 	return 0;
279
-
280
- truncated:
281
-	rc = len;
282
-	block -= block_count;
283
-	assert ( last_len != 0 ); /* Cannot truncate on first entry */
284
-	if ( last_block_present ) {
285
-		/* Replace last missing-blocks number */
286
-		DBGC ( slam, "#" );
287
-		iob_unput ( iobuf, last_len );
288
-		block -= last_block_count;
289
-	}
290
-	/* Report all remaining blocks as missing */
291
-	block_count = ( slam->num_blocks - block );
292
-	DBGC ( slam, "-%ld\n", ( slam->num_blocks - 1 ) );
293
-	len = slam_put_value ( slam, iobuf, block_count, 1 );
294
-	assert ( len > 0 );
295
-	return rc;
296 235
 }
297 236
 
298 237
 /**
@@ -303,17 +242,14 @@ static int slam_build_block_list ( struct slam_request *slam,
303 242
  */
304 243
 static int slam_tx_nack ( struct slam_request *slam ) {
305 244
 	struct io_buffer *iobuf;
245
+	unsigned long block;
306 246
 	uint8_t *nul;
307
-
308
-	DBGC ( slam, "SLAM %p transmitting NACK\n", slam );
247
+	int rc;
309 248
 
310 249
 	/* Mark NACK as sent, so that we know we have to disconnect later */
311 250
 	slam->nack_sent = 1;
312 251
 
313
-	/* Use the current block size as a good estimate of how much
314
-	 * data we can fit in a packet.  If we overrun, it seems to be
315
-	 * acceptable to drop information anyway.
316
-	 */
252
+	/* Allocate I/O buffer */
317 253
 	iobuf = xfer_alloc_iob ( &slam->socket,	SLAM_MAX_NACK_LEN );
318 254
 	if ( ! iobuf ) {
319 255
 		DBGC ( slam, "SLAM %p could not allocate I/O buffer\n",
@@ -321,12 +257,23 @@ static int slam_tx_nack ( struct slam_request *slam ) {
321 257
 		return -ENOMEM;
322 258
 	}
323 259
 
324
-	/* Build block list.  (Errors are non-fatal; it just means we
325
-	 * couldn't fit the compressed list within the packet.)
260
+	/* Construct NACK.  We always request only a single packet;
261
+	 * this allows us to force multicast-TFTP-style flow control
262
+	 * on the SLAM server, which will otherwise just blast the
263
+	 * data out as fast as it can.  On a gigabit network, without
264
+	 * RX checksumming, this would inevitably cause packet drops.
326 265
 	 */
327
-	slam_build_block_list ( slam, iobuf );
328
-
329
-	/* Add NUL terminator */
266
+	block = bitmap_first_gap ( &slam->bitmap );
267
+	if ( block ) {
268
+		DBGCP ( slam, "SLAM %p transmitting NACK for block %ld\n",
269
+			slam, block );
270
+	} else {
271
+		DBGC ( slam, "SLAM %p transmitting initial NACK\n", slam );
272
+	}
273
+	if ( ( rc = slam_put_value ( slam, iobuf, block ) ) != 0 )
274
+		return rc;
275
+	if ( ( rc = slam_put_value ( slam, iobuf, 1 ) ) != 0 )
276
+		return rc;
330 277
 	nul = iob_put ( iobuf, 1 );
331 278
 	*nul = 0;
332 279
 
@@ -335,18 +282,47 @@ static int slam_tx_nack ( struct slam_request *slam ) {
335 282
 }
336 283
 
337 284
 /**
338
- * Handle SLAM retransmission timer expiry
285
+ * Handle SLAM master client retry timer expiry
339 286
  *
340
- * @v timer		Retry timer
287
+ * @v timer		Master retry timer
341 288
  * @v fail		Failure indicator
342 289
  */
343
-static void slam_timer_expired ( struct retry_timer *timer, int fail ) {
290
+static void slam_master_timer_expired ( struct retry_timer *timer,
291
+					int fail ) {
344 292
 	struct slam_request *slam =
345
-		container_of ( timer, struct slam_request, timer );
293
+		container_of ( timer, struct slam_request, master_timer );
346 294
 
347 295
 	if ( fail ) {
296
+		/* Allow timer to stop running.  We will terminate the
297
+		 * connection only if the slave timer times out.
298
+		 */
299
+		DBGC ( slam, "SLAM %p giving up acting as master client\n",
300
+		       slam );
301
+	} else {
302
+		/* Retransmit NACK */
303
+		start_timer ( timer );
304
+		slam_tx_nack ( slam );
305
+	}
306
+}
307
+
308
+/**
309
+ * Handle SLAM slave client retry timer expiry
310
+ *
311
+ * @v timer		Master retry timer
312
+ * @v fail		Failure indicator
313
+ */
314
+static void slam_slave_timer_expired ( struct retry_timer *timer,
315
+					int fail ) {
316
+	struct slam_request *slam =
317
+		container_of ( timer, struct slam_request, slave_timer );
318
+
319
+	if ( fail ) {
320
+		/* Terminate connection */
348 321
 		slam_finished ( slam, -ETIMEDOUT );
349 322
 	} else {
323
+		/* Try sending a NACK */
324
+		DBGC ( slam, "SLAM %p trying to become master client\n",
325
+		       slam );
350 326
 		start_timer ( timer );
351 327
 		slam_tx_nack ( slam );
352 328
 	}
@@ -490,9 +466,10 @@ static int slam_mc_socket_deliver ( struct xfer_interface *mc_socket,
490 466
 	size_t len;
491 467
 	int rc;
492 468
 
493
-	/* Hit the timer */
494
-	stop_timer ( &slam->timer );
495
-	start_timer ( &slam->timer );
469
+	/* Stop the master client timer.  Restart the slave client timer. */
470
+	stop_timer ( &slam->master_timer );
471
+	stop_timer ( &slam->slave_timer );
472
+	start_timer_fixed ( &slam->slave_timer, SLAM_SLAVE_TIMEOUT );
496 473
 
497 474
 	/* Read and strip packet header */
498 475
 	if ( ( rc = slam_pull_header ( slam, iobuf ) ) != 0 )
@@ -569,9 +546,9 @@ static int slam_socket_deliver ( struct xfer_interface *socket,
569 546
 		container_of ( socket, struct slam_request, socket );
570 547
 	int rc;
571 548
 
572
-	/* Hit the timer */
573
-	stop_timer ( &slam->timer );
574
-	start_timer ( &slam->timer );
549
+	/* Restart the master client timer */
550
+	stop_timer ( &slam->master_timer );
551
+	start_timer ( &slam->master_timer );
575 552
 
576 553
 	/* Read and strip packet header */
577 554
 	if ( ( rc = slam_pull_header ( slam, iobuf ) ) != 0 )
@@ -755,7 +732,8 @@ static int slam_open ( struct xfer_interface *xfer, struct uri *uri ) {
755 732
 	xfer_init ( &slam->socket, &slam_socket_operations, &slam->refcnt );
756 733
 	xfer_init ( &slam->mc_socket, &slam_mc_socket_operations,
757 734
 		    &slam->refcnt );
758
-	slam->timer.expired = slam_timer_expired;
735
+	slam->master_timer.expired = slam_master_timer_expired;
736
+	slam->slave_timer.expired = slam_slave_timer_expired;
759 737
 	/* Fake an invalid cached header of { 0x00, ... } */
760 738
 	slam->header_len = 1;
761 739
 	/* Fake parameters for initial NACK */
@@ -792,8 +770,8 @@ static int slam_open ( struct xfer_interface *xfer, struct uri *uri ) {
792 770
 		goto err;
793 771
 	}
794 772
 
795
-	/* Start retry timer */
796
-	start_timer ( &slam->timer );
773
+	/* Start slave retry timer */
774
+	start_timer_fixed ( &slam->slave_timer, SLAM_SLAVE_TIMEOUT );
797 775
 
798 776
 	/* Attach to parent interface, mortalise self, and return */
799 777
 	xfer_plug_plug ( &slam->xfer, xfer );

Loading…
Откажи
Сачувај