|
@@ -93,6 +93,16 @@ FEATURE ( FEATURE_PROTOCOL, "SLAM", DHCP_EB_FEATURE_SLAM, 1 );
|
93
|
93
|
#define SLAM_MAX_HEADER_LEN ( 8 /* transaction id */ + 8 /* total_bytes */ + \
|
94
|
94
|
8 /* block_size */ )
|
95
|
95
|
|
|
96
|
+/** Maximum SLAM NACK length
|
|
97
|
+ *
|
|
98
|
+ * This is a policy decision. Shorter packets take less time to
|
|
99
|
+ * construct and spew out less debug output, and there's a limit to
|
|
100
|
+ * how useful it is to send a complete missing-block list anyway; if
|
|
101
|
+ * the loss rate is high then we're going to have to retransmit an
|
|
102
|
+ * updated missing-block list anyway.
|
|
103
|
+ */
|
|
104
|
+#define SLAM_MAX_NACK_LEN 16
|
|
105
|
+
|
96
|
106
|
/** A SLAM request */
|
97
|
107
|
struct slam_request {
|
98
|
108
|
/** Reference counter */
|
|
@@ -198,8 +208,8 @@ static int slam_put_value ( struct slam_request *slam,
|
198
|
208
|
*/
|
199
|
209
|
len = ( ( flsl ( value ) + 10 ) / 8 );
|
200
|
210
|
if ( len >= iob_tailroom ( iobuf ) ) {
|
201
|
|
- DBGC ( slam, "SLAM %p cannot add %d-byte value\n",
|
202
|
|
- slam, len );
|
|
211
|
+ DBGC2 ( slam, "SLAM %p cannot add %d-byte value\n",
|
|
212
|
+ slam, len );
|
203
|
213
|
return -ENOBUFS;
|
204
|
214
|
}
|
205
|
215
|
/* There is no valid way within the protocol that we can end
|
|
@@ -221,17 +231,59 @@ static int slam_put_value ( struct slam_request *slam,
|
221
|
231
|
}
|
222
|
232
|
|
223
|
233
|
/**
|
224
|
|
- * Send SLAM NACK packet
|
|
234
|
+ * Build SLAM compressed missing-block list
|
225
|
235
|
*
|
226
|
236
|
* @v slam SLAM request
|
|
237
|
+ * @v iobuf I/O buffer
|
227
|
238
|
* @ret rc Return status code
|
228
|
239
|
*/
|
229
|
|
-static int slam_tx_nack ( struct slam_request *slam ) {
|
230
|
|
- struct io_buffer *iobuf;
|
|
240
|
+static int slam_build_block_list ( struct slam_request *slam,
|
|
241
|
+ struct io_buffer *iobuf ) {
|
231
|
242
|
unsigned int block;
|
232
|
243
|
unsigned int block_count;
|
233
|
244
|
int block_present;
|
234
|
245
|
int last_block_present;
|
|
246
|
+ int rc;
|
|
247
|
+
|
|
248
|
+ DBGC ( slam, "SLAM %p asking for ", slam );
|
|
249
|
+
|
|
250
|
+ /* Walk bitmap to construct list */
|
|
251
|
+ block_count = 0;
|
|
252
|
+ last_block_present = ( ! 0 );
|
|
253
|
+ for ( block = 0 ; block < slam->num_blocks ; block++ ) {
|
|
254
|
+ block_present = ( !! bitmap_test ( &slam->bitmap, block ) );
|
|
255
|
+ if ( block_present != last_block_present ) {
|
|
256
|
+ if ( ( rc = slam_put_value ( slam, iobuf,
|
|
257
|
+ block_count ) ) != 0 ) {
|
|
258
|
+ DBGC ( slam, "...\n" );
|
|
259
|
+ return rc;
|
|
260
|
+ }
|
|
261
|
+ DBGC ( slam, "%c%d",
|
|
262
|
+ ( last_block_present ? ' ' : '-' ),
|
|
263
|
+ ( last_block_present ? block : block - 1 ) );
|
|
264
|
+ block_count = 0;
|
|
265
|
+ last_block_present = block_present;
|
|
266
|
+ }
|
|
267
|
+ block_count++;
|
|
268
|
+ }
|
|
269
|
+ if ( ( rc = slam_put_value ( slam, iobuf, block_count ) ) != 0 ) {
|
|
270
|
+ DBGC ( slam, "...\n" );
|
|
271
|
+ return rc;
|
|
272
|
+ }
|
|
273
|
+ DBGC ( slam, "%c%d\n", ( last_block_present ? ' ' : '-' ),
|
|
274
|
+ ( last_block_present ? block : block - 1 ) );
|
|
275
|
+
|
|
276
|
+ return 0;
|
|
277
|
+}
|
|
278
|
+
|
|
279
|
+/**
|
|
280
|
+ * Send SLAM NACK packet
|
|
281
|
+ *
|
|
282
|
+ * @v slam SLAM request
|
|
283
|
+ * @ret rc Return status code
|
|
284
|
+ */
|
|
285
|
+static int slam_tx_nack ( struct slam_request *slam ) {
|
|
286
|
+ struct io_buffer *iobuf;
|
235
|
287
|
uint8_t *nul;
|
236
|
288
|
|
237
|
289
|
DBGC ( slam, "SLAM %p transmitting NACK\n", slam );
|
|
@@ -243,26 +295,17 @@ static int slam_tx_nack ( struct slam_request *slam ) {
|
243
|
295
|
* data we can fit in a packet. If we overrun, it seems to be
|
244
|
296
|
* acceptable to drop information anyway.
|
245
|
297
|
*/
|
246
|
|
- iobuf = xfer_alloc_iob ( &slam->socket, slam->block_size );
|
|
298
|
+ iobuf = xfer_alloc_iob ( &slam->socket, SLAM_MAX_NACK_LEN );
|
247
|
299
|
if ( ! iobuf ) {
|
248
|
300
|
DBGC ( slam, "SLAM %p could not allocate I/O buffer\n",
|
249
|
301
|
slam );
|
250
|
302
|
return -ENOMEM;
|
251
|
303
|
}
|
252
|
304
|
|
253
|
|
- /* Walk bitmap to construct list */
|
254
|
|
- block_count = 0;
|
255
|
|
- last_block_present = ( ! 0 );
|
256
|
|
- for ( block = 0 ; block < slam->num_blocks ; block++ ) {
|
257
|
|
- block_present = ( !! bitmap_test ( &slam->bitmap, block ) );
|
258
|
|
- if ( block_present != last_block_present ) {
|
259
|
|
- slam_put_value ( slam, iobuf, block_count );
|
260
|
|
- block_count = 0;
|
261
|
|
- last_block_present = block_present;
|
262
|
|
- }
|
263
|
|
- block_count++;
|
264
|
|
- }
|
265
|
|
- slam_put_value ( slam, iobuf, block_count );
|
|
305
|
+ /* Build block list. (Errors are non-fatal; it just means we
|
|
306
|
+ * couldn't fit the compressed list within the packet.)
|
|
307
|
+ */
|
|
308
|
+ slam_build_block_list ( slam, iobuf );
|
266
|
309
|
|
267
|
310
|
/* Add NUL terminator */
|
268
|
311
|
nul = iob_put ( iobuf, 1 );
|
|
@@ -697,7 +740,6 @@ static int slam_open ( struct xfer_interface *xfer, struct uri *uri ) {
|
697
|
740
|
/* Fake an invalid cached header of { 0x00, ... } */
|
698
|
741
|
slam->header_len = 1;
|
699
|
742
|
/* Fake parameters for initial NACK */
|
700
|
|
- slam->block_size = 512;
|
701
|
743
|
slam->num_blocks = 1;
|
702
|
744
|
if ( ( rc = bitmap_resize ( &slam->bitmap, 1 ) ) != 0 ) {
|
703
|
745
|
DBGC ( slam, "SLAM %p could not allocate initial bitmap: "
|