Quellcode durchsuchen

[ncm] Use large multi-packet buffers by default

Some devices have a very small number of internal buffers, and rely on
being able to pack multiple packets into each buffer.  Using 2048-byte
buffers on such devices produces throughput of around 100Mbps.  Using
a small number of much larger buffers (e.g. 32kB) increases the
throughput to around 780Mbps.  (The full 1Gbps is not reached because
the high RTT induced by the use of multi-packet buffers causes us to
saturate our 256kB TCP window.)

Since allocation of large buffers is very likely to fail, allocate the
buffer set only once when the device is opened and recycle buffers
immediately after use.  Received data is now always copied to
per-packet buffers.

If allocation of large buffers fails, fall back to allocating a larger
number of smaller buffers.  This will give reduced performance, but
the device will at least still be functional.

Share code between the interrupt and bulk IN endpoint handlers, since
the buffer handling is now very similar.

Signed-off-by: Michael Brown <mcb30@ipxe.org>
tags/v1.20.1
Michael Brown vor 9 Jahren
Ursprung
Commit
2d3f2b2446
2 geänderte Dateien mit 301 neuen und 165 gelöschten Zeilen
  1. 251
    140
      src/drivers/net/ncm.c
  2. 50
    25
      src/drivers/net/ncm.h

+ 251
- 140
src/drivers/net/ncm.c Datei anzeigen

@@ -35,14 +35,14 @@ FILE_LICENCE ( GPL2_OR_LATER );
35 35
  *
36 36
  */
37 37
 
38
+/** Ring refill profiler */
39
+static struct profiler ncm_refill_profiler __profiler =
40
+	{ .name = "ncm.refill" };
41
+
38 42
 /** Interrupt completion profiler */
39 43
 static struct profiler ncm_intr_profiler __profiler =
40 44
 	{ .name = "ncm.intr" };
41 45
 
42
-/** Interrupt refill profiler */
43
-static struct profiler ncm_intr_refill_profiler __profiler =
44
-	{ .name = "ncm.intr_refill" };
45
-
46 46
 /** Bulk IN completion profiler */
47 47
 static struct profiler ncm_in_profiler __profiler =
48 48
 	{ .name = "ncm.in" };
@@ -51,55 +51,155 @@ static struct profiler ncm_in_profiler __profiler =
51 51
 static struct profiler ncm_in_datagram_profiler __profiler =
52 52
 	{ .name = "ncm.in_dgram" };
53 53
 
54
-/** Bulk IN refill profiler */
55
-static struct profiler ncm_in_refill_profiler __profiler =
56
-	{ .name = "ncm.in_refill" };
57
-
58 54
 /** Bulk OUT profiler */
59 55
 static struct profiler ncm_out_profiler __profiler =
60 56
 	{ .name = "ncm.out" };
61 57
 
62 58
 /******************************************************************************
63 59
  *
64
- * CDC-NCM communications interface
60
+ * Ring management
65 61
  *
66 62
  ******************************************************************************
67 63
  */
68 64
 
69 65
 /**
70
- * Refill interrupt ring
66
+ * Transcribe receive ring name (for debugging)
67
+ *
68
+ * @v ncm		CDC-NCM device
69
+ * @v ring		Receive ring
70
+ * @ret name		Receive ring name
71
+ */
72
+static inline const char * ncm_rx_name ( struct ncm_device *ncm,
73
+					 struct ncm_rx_ring *ring ) {
74
+	if ( ring == &ncm->intr ) {
75
+		return "interrupt";
76
+	} else if ( ring == &ncm->in ) {
77
+		return "bulk IN";
78
+	} else {
79
+		return "UNKNOWN";
80
+	}
81
+}
82
+
83
+/**
84
+ * Allocate receive ring buffers
85
+ *
86
+ * @v ncm		CDC-NCM device
87
+ * @v ring		Receive ring
88
+ * @v mtu		I/O buffer size
89
+ * @v count		Number of I/O buffers
90
+ * @ret rc		Return status code
91
+ */
92
+static int ncm_rx_alloc ( struct ncm_device *ncm, struct ncm_rx_ring *ring,
93
+			  size_t mtu, unsigned int count ) {
94
+	struct io_buffer *iobuf;
95
+	struct io_buffer *tmp;
96
+	unsigned int i;
97
+	int rc;
98
+
99
+	/* Initialise ring */
100
+	ring->mtu = mtu;
101
+	INIT_LIST_HEAD ( &ring->list );
102
+
103
+	/* Allocate I/O buffers */
104
+	for ( i = 0 ; i < count ; i++ ) {
105
+		iobuf = alloc_iob ( mtu );
106
+		if ( ! iobuf ) {
107
+			DBGC ( ncm, "NCM %p could not allocate %dx %zd-byte "
108
+			       "buffers for %s\n", ncm, count, mtu,
109
+			       ncm_rx_name ( ncm, ring ) );
110
+			rc = -ENOMEM;
111
+			goto err_alloc;
112
+		}
113
+		list_add ( &iobuf->list, &ring->list );
114
+	}
115
+
116
+	return 0;
117
+
118
+ err_alloc:
119
+	list_for_each_entry_safe ( iobuf, tmp, &ring->list, list ) {
120
+		list_del ( &iobuf->list );
121
+		free_iob ( iobuf );
122
+	}
123
+	return rc;
124
+}
125
+
126
+/**
127
+ * Refill receive ring
71 128
  *
72 129
  * @v ncm		CDC-NCM device
130
+ * @v ring		Receive ring
131
+ * @ret rc		Return status code
73 132
  */
74
-static void ncm_intr_refill ( struct ncm_device *ncm ) {
133
+static int ncm_rx_refill ( struct ncm_device *ncm, struct ncm_rx_ring *ring ) {
75 134
 	struct io_buffer *iobuf;
76
-	size_t mtu = ncm->intr.mtu;
77 135
 	int rc;
78 136
 
79
-	/* Enqueue any available I/O buffers */
80
-	while ( ( iobuf = list_first_entry ( &ncm->intrs, struct io_buffer,
137
+	/* Enqueue any recycled I/O buffers */
138
+	while ( ( iobuf = list_first_entry ( &ring->list, struct io_buffer,
81 139
 					     list ) ) ) {
82 140
 
83 141
 		/* Profile refill */
84
-		profile_start ( &ncm_intr_refill_profiler );
142
+		profile_start ( &ncm_refill_profiler );
85 143
 
86 144
 		/* Reset size */
87
-		iob_put ( iobuf, ( mtu - iob_len ( iobuf ) ) );
145
+		iob_put ( iobuf, ( ring->mtu - iob_len ( iobuf ) ) );
88 146
 
89 147
 		/* Enqueue I/O buffer */
90
-		if ( ( rc = usb_stream ( &ncm->intr, iobuf ) ) != 0 ) {
91
-			DBGC ( ncm, "NCM %p could not enqueue interrupt: %s\n",
92
-			       ncm, strerror ( rc ) );
93
-			/* Leave in available list and wait for next refill */
94
-			return;
148
+		if ( ( rc = usb_stream ( &ring->ep, iobuf ) ) != 0 ) {
149
+			DBGC ( ncm, "NCM %p could not enqueue %s: %s\n", ncm,
150
+			       ncm_rx_name ( ncm, ring ), strerror ( rc ) );
151
+			/* Leave in recycled list and wait for next refill */
152
+			return rc;
95 153
 		}
96 154
 
97
-		/* Remove from available list */
155
+		/* Remove from recycled list */
98 156
 		list_del ( &iobuf->list );
99
-		profile_stop ( &ncm_intr_refill_profiler );
157
+		profile_stop ( &ncm_refill_profiler );
100 158
 	}
159
+
160
+	return 0;
161
+}
162
+
163
+/**
164
+ * Recycle receive buffer
165
+ *
166
+ * @v ncm		CDC-NCM device
167
+ * @v ring		Receive ring
168
+ * @v iobuf		I/O buffer
169
+ */
170
+static inline void ncm_rx_recycle ( struct ncm_device *ncm __unused,
171
+				    struct ncm_rx_ring *ring,
172
+				    struct io_buffer *iobuf ) {
173
+
174
+	/* Add to recycled list */
175
+	list_add_tail ( &iobuf->list, &ring->list );
101 176
 }
102 177
 
178
+/**
179
+ * Free receive ring
180
+ *
181
+ * @v ncm		CDC-NCM device
182
+ * @v ring		Receive ring
183
+ */
184
+static void ncm_rx_free ( struct ncm_device *ncm __unused,
185
+			  struct ncm_rx_ring *ring ) {
186
+	struct io_buffer *iobuf;
187
+	struct io_buffer *tmp;
188
+
189
+	/* Free I/O buffers */
190
+	list_for_each_entry_safe ( iobuf, tmp, &ring->list, list ) {
191
+		list_del ( &iobuf->list );
192
+		free_iob ( iobuf );
193
+	}
194
+}
195
+
196
+/******************************************************************************
197
+ *
198
+ * CDC-NCM communications interface
199
+ *
200
+ ******************************************************************************
201
+ */
202
+
103 203
 /**
104 204
  * Complete interrupt transfer
105 205
  *
@@ -109,7 +209,7 @@ static void ncm_intr_refill ( struct ncm_device *ncm ) {
109 209
  */
110 210
 static void ncm_intr_complete ( struct usb_endpoint *ep,
111 211
 				struct io_buffer *iobuf, int rc ) {
112
-	struct ncm_device *ncm = container_of ( ep, struct ncm_device, intr );
212
+	struct ncm_device *ncm = container_of ( ep, struct ncm_device, intr.ep);
113 213
 	struct net_device *netdev = ncm->netdev;
114 214
 	struct usb_setup_packet *message;
115 215
 	size_t len = iob_len ( iobuf );
@@ -161,8 +261,8 @@ static void ncm_intr_complete ( struct usb_endpoint *ep,
161 261
 	}
162 262
 
163 263
  done:
164
-	/* Return I/O buffer to available list */
165
-	list_add_tail ( &iobuf->list, &ncm->intrs );
264
+	/* Recycle buffer */
265
+	ncm_rx_recycle ( ncm, &ncm->intr, iobuf );
166 266
 	profile_stop ( &ncm_intr_profiler );
167 267
 }
168 268
 
@@ -178,23 +278,18 @@ static struct usb_endpoint_driver_operations ncm_intr_operations = {
178 278
  * @ret rc		Return status code
179 279
  */
180 280
 static int ncm_comms_open ( struct ncm_device *ncm ) {
181
-	struct io_buffer *iobuf;
182
-	struct io_buffer *tmp;
183
-	unsigned int i;
184 281
 	int rc;
185 282
 
186 283
 	/* Allocate I/O buffers */
187
-	for ( i = 0 ; i < NCM_INTR_FILL ; i++ ) {
188
-		iobuf = alloc_iob ( ncm->intr.mtu );
189
-		if ( ! iobuf ) {
190
-			rc = -ENOMEM;
191
-			goto err_alloc_iob;
192
-		}
193
-		list_add ( &iobuf->list, &ncm->intrs );
284
+	if ( ( rc = ncm_rx_alloc ( ncm, &ncm->intr, ncm->intr.ep.mtu,
285
+				   NCM_INTR_COUNT ) ) != 0 ) {
286
+		DBGC ( ncm, "NCM %p could not allocate RX buffers: %s\n",
287
+		       ncm, strerror ( rc ) );
288
+		goto err_alloc;
194 289
 	}
195 290
 
196 291
 	/* Open interrupt endpoint */
197
-	if ( ( rc = usb_endpoint_open ( &ncm->intr ) ) != 0 ) {
292
+	if ( ( rc = usb_endpoint_open ( &ncm->intr.ep ) ) != 0 ) {
198 293
 		DBGC ( ncm, "NCM %p could not open interrupt: %s\n",
199 294
 		       ncm, strerror ( rc ) );
200 295
 		goto err_open;
@@ -202,13 +297,10 @@ static int ncm_comms_open ( struct ncm_device *ncm ) {
202 297
 
203 298
 	return 0;
204 299
 
205
-	usb_endpoint_close ( &ncm->intr );
300
+	usb_endpoint_close ( &ncm->intr.ep );
206 301
  err_open:
207
- err_alloc_iob:
208
-	list_for_each_entry_safe ( iobuf, tmp, &ncm->intrs, list ) {
209
-		list_del ( &iobuf->list );
210
-		free_iob ( iobuf );
211
-	}
302
+	ncm_rx_free ( ncm, &ncm->intr );
303
+ err_alloc:
212 304
 	return rc;
213 305
 }
214 306
 
@@ -218,17 +310,12 @@ static int ncm_comms_open ( struct ncm_device *ncm ) {
218 310
  * @v ncm		CDC-NCM device
219 311
  */
220 312
 static void ncm_comms_close ( struct ncm_device *ncm ) {
221
-	struct io_buffer *iobuf;
222
-	struct io_buffer *tmp;
223 313
 
224 314
 	/* Close interrupt endpoint */
225
-	usb_endpoint_close ( &ncm->intr );
315
+	usb_endpoint_close ( &ncm->intr.ep );
226 316
 
227 317
 	/* Free I/O buffers */
228
-	list_for_each_entry_safe ( iobuf, tmp, &ncm->intrs, list ) {
229
-		list_del ( &iobuf->list );
230
-		free_iob ( iobuf );
231
-	}
318
+	ncm_rx_free ( ncm, &ncm->intr );
232 319
 }
233 320
 
234 321
 /******************************************************************************
@@ -239,39 +326,48 @@ static void ncm_comms_close ( struct ncm_device *ncm ) {
239 326
  */
240 327
 
241 328
 /**
242
- * Refill bulk IN ring
329
+ * Allocate bulk IN receive ring buffers
243 330
  *
244 331
  * @v ncm		CDC-NCM device
332
+ * @ret rc		Return status code
245 333
  */
246
-static void ncm_in_refill ( struct ncm_device *ncm ) {
247
-	struct net_device *netdev = ncm->netdev;
248
-	struct io_buffer *iobuf;
334
+static int ncm_in_alloc ( struct ncm_device *ncm ) {
335
+	size_t mtu;
336
+	unsigned int count;
249 337
 	int rc;
250 338
 
251
-	/* Refill ring */
252
-	while ( ncm->fill < NCM_IN_FILL ) {
253
-
254
-		/* Profile refill */
255
-		profile_start ( &ncm_in_refill_profiler );
256
-
257
-		/* Allocate I/O buffer */
258
-		iobuf = alloc_iob ( NCM_NTB_INPUT_SIZE );
259
-		if ( ! iobuf ) {
260
-			/* Wait for next refill */
261
-			break;
262
-		}
263
-		iob_put ( iobuf, NCM_NTB_INPUT_SIZE );
264
-
265
-		/* Enqueue I/O buffer */
266
-		if ( ( rc = usb_stream ( &ncm->in, iobuf ) ) != 0 ) {
267
-			netdev_rx_err ( netdev, iobuf, rc );
268
-			break;
269
-		}
270
-
271
-		/* Increment fill level */
272
-		ncm->fill++;
273
-		profile_stop ( &ncm_in_refill_profiler );
339
+	/* Some devices have a very small number of internal buffers,
340
+	 * and rely on being able to pack multiple packets into each
341
+	 * buffer.  We therefore want to use large buffers if
342
+	 * possible.  However, large allocations have a reasonable
343
+	 * chance of failure, especially if this is not the first or
344
+	 * only device to be opened.
345
+	 *
346
+	 * We therefore attempt to find a usable buffer size, starting
347
+	 * large and working downwards until allocation succeeds.
348
+	 * Smaller buffers will still work, albeit with a higher
349
+	 * chance of packet loss and so lower overall throughput.
350
+	 */
351
+	for ( mtu = ncm->mtu ; mtu >= NCM_MIN_NTB_INPUT_SIZE ; mtu >>= 1 ) {
352
+
353
+		/* Attempt allocation at this MTU */
354
+		if ( mtu > NCM_MAX_NTB_INPUT_SIZE )
355
+			continue;
356
+		count = ( NCM_IN_MIN_SIZE / mtu );
357
+		if ( count < NCM_IN_MIN_COUNT )
358
+			count = NCM_IN_MIN_COUNT;
359
+		if ( ( count * mtu ) > NCM_IN_MAX_SIZE )
360
+			continue;
361
+		if ( ( rc = ncm_rx_alloc ( ncm, &ncm->in, mtu, count ) ) != 0 )
362
+			continue;
363
+
364
+		DBGC ( ncm, "NCM %p using %dx %zd-byte buffers for bulk IN\n",
365
+		       ncm, count, mtu );
366
+		return 0;
274 367
 	}
368
+
369
+	DBGC ( ncm, "NCM %p could not allocate bulk IN buffers\n", ncm );
370
+	return -ENOMEM;
275 371
 }
276 372
 
277 373
 /**
@@ -283,7 +379,7 @@ static void ncm_in_refill ( struct ncm_device *ncm ) {
283 379
  */
284 380
 static void ncm_in_complete ( struct usb_endpoint *ep, struct io_buffer *iobuf,
285 381
 			      int rc ) {
286
-	struct ncm_device *ncm = container_of ( ep, struct ncm_device, in );
382
+	struct ncm_device *ncm = container_of ( ep, struct ncm_device, in.ep );
287 383
 	struct net_device *netdev = ncm->netdev;
288 384
 	struct ncm_transfer_header *nth;
289 385
 	struct ncm_datagram_pointer *ndp;
@@ -299,16 +395,16 @@ static void ncm_in_complete ( struct usb_endpoint *ep, struct io_buffer *iobuf,
299 395
 	/* Profile overall bulk IN completion */
300 396
 	profile_start ( &ncm_in_profiler );
301 397
 
302
-	/* Decrement fill level */
303
-	ncm->fill--;
304
-
305 398
 	/* Ignore packets cancelled when the endpoint closes */
306 399
 	if ( ! ep->open )
307 400
 		goto ignore;
308 401
 
309 402
 	/* Record USB errors against the network device */
310
-	if ( rc != 0 )
403
+	if ( rc != 0 ) {
404
+		DBGC ( ncm, "NCM %p bulk IN failed: %s\n",
405
+		       ncm, strerror ( rc ) );
311 406
 		goto drop;
407
+	}
312 408
 
313 409
 	/* Locate transfer header */
314 410
 	len = iob_len ( iobuf );
@@ -358,23 +454,21 @@ static void ncm_in_complete ( struct usb_endpoint *ep, struct io_buffer *iobuf,
358 454
 		/* Move to next descriptor */
359 455
 		desc++;
360 456
 
361
-		/* Create new I/O buffer if necessary */
362
-		if ( remaining && desc->offset ) {
363
-			/* More packets remain: create new buffer */
364
-			pkt = alloc_iob ( pkt_len );
365
-			if ( ! pkt ) {
366
-				/* Record error and continue */
367
-				netdev_rx_err ( netdev, NULL, -ENOMEM );
368
-				continue;
369
-			}
370
-			memcpy ( iob_put ( pkt, pkt_len ),
371
-				 ( iobuf->data + pkt_offset ), pkt_len );
372
-		} else {
373
-			/* This is the last packet: use in situ */
374
-			pkt = iob_disown ( iobuf );
375
-			iob_pull ( pkt, pkt_offset );
376
-			iob_unput ( pkt, ( iob_len ( pkt ) - pkt_len ) );
457
+		/* Copy data to a new I/O buffer.  Our USB buffers may
458
+		 * be very large and so we choose to recycle the
459
+		 * buffers directly rather than attempt reallocation
460
+		 * while the device is running.  We therefore copy the
461
+		 * data to a new I/O buffer even if this is the only
462
+		 * (or last) packet within the buffer.
463
+		 */
464
+		pkt = alloc_iob ( pkt_len );
465
+		if ( ! pkt ) {
466
+			/* Record error and continue */
467
+			netdev_rx_err ( netdev, NULL, -ENOMEM );
468
+			continue;
377 469
 		}
470
+		memcpy ( iob_put ( pkt, pkt_len ),
471
+			 ( iobuf->data + pkt_offset ), pkt_len );
378 472
 
379 473
 		/* Strip CRC, if present */
380 474
 		if ( ndp->magic & cpu_to_le32 ( NCM_DATAGRAM_POINTER_MAGIC_CRC))
@@ -385,22 +479,20 @@ static void ncm_in_complete ( struct usb_endpoint *ep, struct io_buffer *iobuf,
385 479
 		profile_stop ( &ncm_in_datagram_profiler );
386 480
 	}
387 481
 
388
-	/* Free I/O buffer (if still present) */
389
-	free_iob ( iobuf );
390
-
482
+	/* Recycle I/O buffer */
483
+	ncm_rx_recycle ( ncm, &ncm->in, iobuf );
391 484
 	profile_stop ( &ncm_in_profiler );
485
+
392 486
 	return;
393 487
 
394 488
  error:
395 489
 	rc = -EIO;
396 490
  drop:
491
+	/* Record error against network device */
397 492
 	DBGC_HDA ( ncm, 0, iobuf->data, iob_len ( iobuf ) );
398
-	netdev_rx_err ( netdev, iobuf, rc );
399
-	return;
400
-
493
+	netdev_rx_err ( netdev, NULL, rc );
401 494
  ignore:
402
-	free_iob ( iobuf );
403
-	return;
495
+	ncm_rx_recycle ( ncm, &ncm->in, iobuf );
404 496
 }
405 497
 
406 498
 /** Bulk IN endpoint operations */
@@ -419,7 +511,7 @@ static int ncm_out_transmit ( struct ncm_device *ncm,
419 511
 			      struct io_buffer *iobuf ) {
420 512
 	struct ncm_ntb_header *header;
421 513
 	size_t len = iob_len ( iobuf );
422
-	size_t header_len = ( sizeof ( *header ) + ncm->padding );
514
+	size_t header_len = ( sizeof ( *header ) + ncm->out.padding );
423 515
 	int rc;
424 516
 
425 517
 	/* Profile transmissions */
@@ -433,7 +525,7 @@ static int ncm_out_transmit ( struct ncm_device *ncm,
433 525
 	/* Populate header */
434 526
 	header->nth.magic = cpu_to_le32 ( NCM_TRANSFER_HEADER_MAGIC );
435 527
 	header->nth.header_len = cpu_to_le16 ( sizeof ( header->nth ) );
436
-	header->nth.sequence = cpu_to_le16 ( ncm->sequence );
528
+	header->nth.sequence = cpu_to_le16 ( ncm->out.sequence );
437 529
 	header->nth.len = cpu_to_le16 ( iob_len ( iobuf ) );
438 530
 	header->nth.offset =
439 531
 		cpu_to_le16 ( offsetof ( typeof ( *header ), ndp ) );
@@ -446,11 +538,11 @@ static int ncm_out_transmit ( struct ncm_device *ncm,
446 538
 	memset ( &header->desc[1], 0, sizeof ( header->desc[1] ) );
447 539
 
448 540
 	/* Enqueue I/O buffer */
449
-	if ( ( rc = usb_stream ( &ncm->out, iobuf ) ) != 0 )
541
+	if ( ( rc = usb_stream ( &ncm->out.ep, iobuf ) ) != 0 )
450 542
 		return rc;
451 543
 
452 544
 	/* Increment sequence number */
453
-	ncm->sequence++;
545
+	ncm->out.sequence++;
454 546
 
455 547
 	profile_stop ( &ncm_out_profiler );
456 548
 	return 0;
@@ -465,7 +557,7 @@ static int ncm_out_transmit ( struct ncm_device *ncm,
465 557
  */
466 558
 static void ncm_out_complete ( struct usb_endpoint *ep, struct io_buffer *iobuf,
467 559
 			       int rc ) {
468
-	struct ncm_device *ncm = container_of ( ep, struct ncm_device, out );
560
+	struct ncm_device *ncm = container_of ( ep, struct ncm_device, out.ep );
469 561
 	struct net_device *netdev = ncm->netdev;
470 562
 
471 563
 	/* Report TX completion */
@@ -488,13 +580,17 @@ static int ncm_data_open ( struct ncm_device *ncm ) {
488 580
 	struct ncm_set_ntb_input_size size;
489 581
 	int rc;
490 582
 
583
+	/* Allocate I/O buffers */
584
+	if ( ( rc = ncm_in_alloc ( ncm ) ) != 0 )
585
+		goto err_alloc;
586
+
491 587
 	/* Set maximum input size */
492 588
 	memset ( &size, 0, sizeof ( size ) );
493
-	size.mtu = cpu_to_le32 ( NCM_NTB_INPUT_SIZE );
589
+	size.mtu = cpu_to_le32 ( ncm->in.mtu );
494 590
 	if ( ( rc = usb_control ( usb, NCM_SET_NTB_INPUT_SIZE, 0, ncm->comms,
495 591
 				  &size, sizeof ( size ) ) ) != 0 ) {
496
-		DBGC ( ncm, "NCM %p could not set input size: %s\n",
497
-		       ncm, strerror ( rc ) );
592
+		DBGC ( ncm, "NCM %p could not set input size to %zd: %s\n",
593
+		       ncm, ncm->in.mtu, strerror ( rc ) );
498 594
 		goto err_set_ntb_input_size;
499 595
 	}
500 596
 
@@ -507,28 +603,33 @@ static int ncm_data_open ( struct ncm_device *ncm ) {
507 603
 	}
508 604
 
509 605
 	/* Open bulk IN endpoint */
510
-	if ( ( rc = usb_endpoint_open ( &ncm->in ) ) != 0 ) {
606
+	if ( ( rc = usb_endpoint_open ( &ncm->in.ep ) ) != 0 ) {
511 607
 		DBGC ( ncm, "NCM %p could not open bulk IN: %s\n",
512 608
 		       ncm, strerror ( rc ) );
513 609
 		goto err_open_in;
514 610
 	}
515 611
 
516 612
 	/* Open bulk OUT endpoint */
517
-	if ( ( rc = usb_endpoint_open ( &ncm->out ) ) != 0 ) {
613
+	if ( ( rc = usb_endpoint_open ( &ncm->out.ep ) ) != 0 ) {
518 614
 		DBGC ( ncm, "NCM %p could not open bulk OUT: %s\n",
519 615
 		       ncm, strerror ( rc ) );
520 616
 		goto err_open_out;
521 617
 	}
522 618
 
619
+	/* Reset transmit sequence number */
620
+	ncm->out.sequence = 0;
621
+
523 622
 	return 0;
524 623
 
525
-	usb_endpoint_close ( &ncm->out );
624
+	usb_endpoint_close ( &ncm->out.ep );
526 625
  err_open_out:
527
-	usb_endpoint_close ( &ncm->in );
626
+	usb_endpoint_close ( &ncm->in.ep );
528 627
  err_open_in:
529 628
 	usb_set_interface ( usb, ncm->data, 0 );
530 629
  err_set_interface:
531 630
  err_set_ntb_input_size:
631
+	ncm_rx_free ( ncm, &ncm->in );
632
+ err_alloc:
532 633
 	return rc;
533 634
 }
534 635
 
@@ -541,11 +642,14 @@ static void ncm_data_close ( struct ncm_device *ncm ) {
541 642
 	struct usb_device *usb = ncm->usb;
542 643
 
543 644
 	/* Close endpoints */
544
-	usb_endpoint_close ( &ncm->out );
545
-	usb_endpoint_close ( &ncm->in );
645
+	usb_endpoint_close ( &ncm->out.ep );
646
+	usb_endpoint_close ( &ncm->in.ep );
546 647
 
547 648
 	/* Reset data interface */
548 649
 	usb_set_interface ( usb, ncm->data, 0 );
650
+
651
+	/* Free I/O buffers */
652
+	ncm_rx_free ( ncm, &ncm->in );
549 653
 }
550 654
 
551 655
 /******************************************************************************
@@ -566,26 +670,30 @@ static int ncm_open ( struct net_device *netdev ) {
566 670
 	int rc;
567 671
 
568 672
 	/* Reset sequence number */
569
-	ncm->sequence = 0;
673
+	ncm->out.sequence = 0;
570 674
 
571 675
 	/* Open communications interface */
572 676
 	if ( ( rc = ncm_comms_open ( ncm ) ) != 0 )
573 677
 		goto err_comms_open;
574 678
 
575 679
 	/* Refill interrupt ring */
576
-	ncm_intr_refill ( ncm );
680
+	if ( ( rc = ncm_rx_refill ( ncm, &ncm->intr ) ) != 0 )
681
+		goto err_intr_refill;
577 682
 
578 683
 	/* Open data interface */
579 684
 	if ( ( rc = ncm_data_open ( ncm ) ) != 0 )
580 685
 		goto err_data_open;
581 686
 
582 687
 	/* Refill bulk IN ring */
583
-	ncm_in_refill ( ncm );
688
+	if ( ( rc = ncm_rx_refill ( ncm, &ncm->in ) ) != 0 )
689
+		goto err_in_refill;
584 690
 
585 691
 	return 0;
586 692
 
693
+ err_in_refill:
587 694
 	ncm_data_close ( ncm );
588 695
  err_data_open:
696
+ err_intr_refill:
589 697
 	ncm_comms_close ( ncm );
590 698
  err_comms_open:
591 699
 	return rc;
@@ -604,9 +712,6 @@ static void ncm_close ( struct net_device *netdev ) {
604 712
 
605 713
 	/* Close communications interface */
606 714
 	ncm_comms_close ( ncm );
607
-
608
-	/* Sanity check */
609
-	assert ( ncm->fill == 0 );
610 715
 }
611 716
 
612 717
 /**
@@ -635,15 +740,18 @@ static int ncm_transmit ( struct net_device *netdev,
635 740
  */
636 741
 static void ncm_poll ( struct net_device *netdev ) {
637 742
 	struct ncm_device *ncm = netdev->priv;
743
+	int rc;
638 744
 
639 745
 	/* Poll USB bus */
640 746
 	usb_poll ( ncm->bus );
641 747
 
642 748
 	/* Refill interrupt ring */
643
-	ncm_intr_refill ( ncm );
749
+	if ( ( rc = ncm_rx_refill ( ncm, &ncm->intr ) ) != 0 )
750
+		netdev_rx_err ( netdev, NULL, rc );
644 751
 
645 752
 	/* Refill bulk IN ring */
646
-	ncm_in_refill ( ncm );
753
+	if ( ( rc = ncm_rx_refill ( ncm, &ncm->in ) ) != 0 )
754
+		netdev_rx_err ( netdev, NULL, rc );
647 755
 }
648 756
 
649 757
 /** CDC-NCM network device operations */
@@ -692,10 +800,9 @@ static int ncm_probe ( struct usb_function *func,
692 800
 	ncm->usb = usb;
693 801
 	ncm->bus = usb->port->hub->bus;
694 802
 	ncm->netdev = netdev;
695
-	usb_endpoint_init ( &ncm->intr, usb, &ncm_intr_operations );
696
-	usb_endpoint_init ( &ncm->in, usb, &ncm_in_operations );
697
-	usb_endpoint_init ( &ncm->out, usb, &ncm_out_operations );
698
-	INIT_LIST_HEAD ( &ncm->intrs );
803
+	usb_endpoint_init ( &ncm->intr.ep, usb, &ncm_intr_operations );
804
+	usb_endpoint_init ( &ncm->in.ep, usb, &ncm_in_operations );
805
+	usb_endpoint_init ( &ncm->out.ep, usb, &ncm_out_operations );
699 806
 	DBGC ( ncm, "NCM %p on %s\n", ncm, func->name );
700 807
 
701 808
 	/* Identify interfaces */
@@ -726,7 +833,7 @@ static int ncm_probe ( struct usb_function *func,
726 833
 	}
727 834
 
728 835
 	/* Describe interrupt endpoint */
729
-	if ( ( rc = usb_endpoint_described ( &ncm->intr, config, comms,
836
+	if ( ( rc = usb_endpoint_described ( &ncm->intr.ep, config, comms,
730 837
 					     USB_INTERRUPT, 0 ) ) != 0 ) {
731 838
 		DBGC ( ncm, "NCM %p could not describe interrupt endpoint: "
732 839
 		       "%s\n", ncm, strerror ( rc ) );
@@ -734,7 +841,7 @@ static int ncm_probe ( struct usb_function *func,
734 841
 	}
735 842
 
736 843
 	/* Describe bulk IN endpoint */
737
-	if ( ( rc = usb_endpoint_described ( &ncm->in, config, data,
844
+	if ( ( rc = usb_endpoint_described ( &ncm->in.ep, config, data,
738 845
 					     USB_BULK_IN, 0 ) ) != 0 ) {
739 846
 		DBGC ( ncm, "NCM %p could not describe bulk IN endpoint: "
740 847
 		       "%s\n", ncm, strerror ( rc ) );
@@ -742,7 +849,7 @@ static int ncm_probe ( struct usb_function *func,
742 849
 	}
743 850
 
744 851
 	/* Describe bulk OUT endpoint */
745
-	if ( ( rc = usb_endpoint_described ( &ncm->out, config, data,
852
+	if ( ( rc = usb_endpoint_described ( &ncm->out.ep, config, data,
746 853
 					     USB_BULK_OUT, 0 ) ) != 0 ) {
747 854
 		DBGC ( ncm, "NCM %p could not describe bulk OUT endpoint: "
748 855
 		       "%s\n", ncm, strerror ( rc ) );
@@ -772,13 +879,17 @@ static int ncm_probe ( struct usb_function *func,
772 879
 		goto err_ntb_parameters;
773 880
 	}
774 881
 
882
+	/* Get maximum supported input size */
883
+	ncm->mtu = le32_to_cpu ( params.in.mtu );
884
+	DBGC2 ( ncm, "NCM %p maximum IN size is %zd bytes\n", ncm, ncm->mtu );
885
+
775 886
 	/* Calculate transmit padding */
776
-	ncm->padding = ( ( le16_to_cpu ( params.out.remainder ) -
777
-			   sizeof ( struct ncm_ntb_header ) - ETH_HLEN ) &
778
-			 ( le16_to_cpu ( params.out.divisor ) - 1 ) );
887
+	ncm->out.padding = ( ( le16_to_cpu ( params.out.remainder ) -
888
+			       sizeof ( struct ncm_ntb_header ) - ETH_HLEN ) &
889
+			     ( le16_to_cpu ( params.out.divisor ) - 1 ) );
779 890
 	DBGC2 ( ncm, "NCM %p using %zd-byte transmit padding\n",
780
-		ncm, ncm->padding );
781
-	assert ( ( ( sizeof ( struct ncm_ntb_header ) + ncm->padding +
891
+		ncm, ncm->out.padding );
892
+	assert ( ( ( sizeof ( struct ncm_ntb_header ) + ncm->out.padding +
782 893
 		     ETH_HLEN ) % le16_to_cpu ( params.out.divisor ) ) ==
783 894
 		 le16_to_cpu ( params.out.remainder ) );
784 895
 

+ 50
- 25
src/drivers/net/ncm.h Datei anzeigen

@@ -74,12 +74,11 @@ struct ncm_set_ntb_input_size {
74 74
 	uint32_t mtu;
75 75
 } __attribute__ (( packed ));
76 76
 
77
-/** NTB input size
78
- *
79
- * This is a policy decision.  2048 is the minimum size which must be
80
- * supported according to the specification.
81
- */
82
-#define NCM_NTB_INPUT_SIZE 2048
77
+/** Minimum allowed NTB input size */
78
+#define NCM_MIN_NTB_INPUT_SIZE 2048
79
+
80
+/** Maximum allowed NTB input size (16-bit) */
81
+#define NCM_MAX_NTB_INPUT_SIZE 65536
83 82
 
84 83
 /** CDC-NCM transfer header (16-bit) */
85 84
 struct ncm_transfer_header {
@@ -140,6 +139,26 @@ struct ncm_ntb_header {
140 139
 	struct ncm_datagram_descriptor desc[2];
141 140
 } __attribute__ (( packed ));
142 141
 
142
+/** A CDC-NCM receive ring */
143
+struct ncm_rx_ring {
144
+	/** USB endpoint */
145
+	struct usb_endpoint ep;
146
+	/** I/O buffer size */
147
+	size_t mtu;
148
+	/** Recycled buffer list */
149
+	struct list_head list;
150
+};
151
+
152
+/** A CDC-NCM transmit ring */
153
+struct ncm_tx_ring {
154
+	/** USB endpoint */
155
+	struct usb_endpoint ep;
156
+	/** Transmitted packet sequence number */
157
+	uint16_t sequence;
158
+	/** Alignment padding required on transmitted packets */
159
+	size_t padding;
160
+};
161
+
143 162
 /** A CDC-NCM network device */
144 163
 struct ncm_device {
145 164
 	/** USB device */
@@ -154,33 +173,39 @@ struct ncm_device {
154 173
 	/** Data interface */
155 174
 	unsigned int data;
156 175
 
157
-	/** Interrupt endpoint */
158
-	struct usb_endpoint intr;
159
-	/** Bulk IN endpoint */
160
-	struct usb_endpoint in;
161
-	/** Bulk OUT endpoint */
162
-	struct usb_endpoint out;
163
-
164
-	/** Recycled interrupt I/O buffers */
165
-	struct list_head intrs;
166
-	/** Current bulk IN ring fill level */
167
-	unsigned int fill;
168
-	/** Transmitted packet sequence number */
169
-	uint16_t sequence;
170
-	/** Alignment padding required on transmitted packets */
171
-	size_t padding;
176
+	/** Maximum supported NTB input size */
177
+	size_t mtu;
178
+
179
+	/** Interrupt ring */
180
+	struct ncm_rx_ring intr;
181
+	/** Bulk IN ring */
182
+	struct ncm_rx_ring in;
183
+	/** Bulk OUT ring */
184
+	struct ncm_tx_ring out;
172 185
 };
173 186
 
174
-/** Bulk IN ring fill level
187
+/** Bulk IN ring minimum buffer count
188
+ *
189
+ * This is a policy decision.
190
+ */
191
+#define NCM_IN_MIN_COUNT 3
192
+
193
+/** Bulk IN ring minimum total buffer size
194
+ *
195
+ * This is a policy decision.
196
+ */
197
+#define NCM_IN_MIN_SIZE 16384
198
+
199
+/** Bulk IN ring maximum total buffer size
175 200
  *
176 201
  * This is a policy decision.
177 202
  */
178
-#define NCM_IN_FILL 16
203
+#define NCM_IN_MAX_SIZE 131072
179 204
 
180
-/** Interrupt ring fill level
205
+/** Interrupt ring buffer count
181 206
  *
182 207
  * This is a policy decision.
183 208
  */
184
-#define NCM_INTR_FILL 2
209
+#define NCM_INTR_COUNT 2
185 210
 
186 211
 #endif /* _NCM_H */

Laden…
Abbrechen
Speichern