Selaa lähdekoodia

[infiniband] Poll completion queues automatically

Currently, all Infiniband users must create a process for polling
their completion queues (or rely on a regular hook such as
netdev_poll() in ipoib.c).

Move instead to a model whereby the Infiniband core maintains a single
process calling ib_poll_eq(), and polling the event queue triggers
polls of the applicable completion queues.  (At present, the
Infiniband core simply polls all of the device's completion queues.)
Polling a completion queue will now implicitly refill all attached
receive work queues; this is analogous to the way that netdev_poll()
implicitly refills the RX ring.

Infiniband users no longer need to create a process just to poll their
completion queues and refill their receive rings.
tags/v0.9.8
Michael Brown 15 vuotta sitten
vanhempi
commit
887d296b88

+ 3
- 6
src/drivers/net/ipoib.c Näytä tiedosto

@@ -743,10 +743,7 @@ static void ipoib_poll ( struct net_device *netdev ) {
743 743
 	struct ipoib_device *ipoib = netdev->priv;
744 744
 	struct ib_device *ibdev = ipoib->ibdev;
745 745
 
746
-	ib_poll_cq ( ibdev, ipoib->meta.cq );
747
-	ib_poll_cq ( ibdev, ipoib->data.cq );
748
-	ib_qset_refill_recv ( ibdev, &ipoib->meta );
749
-	ib_qset_refill_recv ( ibdev, &ipoib->data );
746
+	ib_poll_eq ( ibdev );
750 747
 }
751 748
 
752 749
 /**
@@ -861,8 +858,8 @@ static int ipoib_open ( struct net_device *netdev ) {
861 858
 	mac->qpn = htonl ( ipoib->data.qp->qpn );
862 859
 
863 860
 	/* Fill receive rings */
864
-	ib_qset_refill_recv ( ibdev, &ipoib->meta );
865
-	ib_qset_refill_recv ( ibdev, &ipoib->data );
861
+	ib_refill_recv ( ibdev, ipoib->meta.qp );
862
+	ib_refill_recv ( ibdev, ipoib->data.qp );
866 863
 
867 864
 	/* Join broadcast group */
868 865
 	if ( ( rc = ipoib_join_broadcast_group ( ipoib ) ) != 0 ) {

+ 0
- 4
src/include/gpxe/ib_qset.h Näytä tiedosto

@@ -18,8 +18,6 @@ struct ib_queue_set {
18 18
 	struct ib_completion_queue *cq;
19 19
 	/** Queue pair */
20 20
 	struct ib_queue_pair *qp;
21
-	/** Receive work queue maximum fill level */
22
-	unsigned int recv_max_fill;
23 21
 };
24 22
 
25 23
 extern int ib_create_qset ( struct ib_device *ibdev,
@@ -27,8 +25,6 @@ extern int ib_create_qset ( struct ib_device *ibdev,
27 25
 			    struct ib_completion_queue_operations *cq_op,
28 26
 			    unsigned int num_send_wqes,
29 27
 			    unsigned int num_recv_wqes, unsigned long qkey );
30
-extern void ib_qset_refill_recv ( struct ib_device *ibdev,
31
-				  struct ib_queue_set *qset );
32 28
 extern void ib_destroy_qset ( struct ib_device *ibdev,
33 29
 			      struct ib_queue_set *qset );
34 30
 

+ 0
- 3
src/include/gpxe/ib_sma.h Näytä tiedosto

@@ -10,7 +10,6 @@
10 10
 FILE_LICENCE ( GPL2_OR_LATER );
11 11
 
12 12
 #include <gpxe/infiniband.h>
13
-#include <gpxe/process.h>
14 13
 
15 14
 /** Infiniband Subnet Management Agent operations */
16 15
 struct ib_sma_operations {
@@ -33,8 +32,6 @@ struct ib_sma {
33 32
 	struct ib_completion_queue *cq;
34 33
 	/** SMA queue pair */
35 34
 	struct ib_queue_pair *qp;
36
-	/** Poll process */
37
-	struct process poll;
38 35
 };
39 36
 
40 37
 /** SMA number of send WQEs

+ 11
- 11
src/include/gpxe/infiniband.h Näytä tiedosto

@@ -154,6 +154,10 @@ struct ib_completion_queue_operations {
154 154
 
155 155
 /** An Infiniband Completion Queue */
156 156
 struct ib_completion_queue {
157
+	/** Containing Infiniband device */
158
+	struct ib_device *ibdev;
159
+	/** List of completion queues on this Infiniband device */
160
+	struct list_head list;
157 161
 	/** Completion queue number */
158 162
 	unsigned long cqn;
159 163
 	/** Number of completion queue entries */
@@ -310,6 +314,8 @@ struct ib_device {
310 314
 	struct list_head list;
311 315
 	/** Underlying device */
312 316
 	struct device *dev;
317
+	/** List of completion queues */
318
+	struct list_head cqs;
313 319
 	/** List of queue pairs */
314 320
 	struct list_head qps;
315 321
 	/** Infiniband operations */
@@ -350,6 +356,8 @@ ib_create_cq ( struct ib_device *ibdev, unsigned int num_cqes,
350 356
 	       struct ib_completion_queue_operations *op );
351 357
 extern void ib_destroy_cq ( struct ib_device *ibdev,
352 358
 			    struct ib_completion_queue *cq );
359
+extern void ib_poll_cq ( struct ib_device *ibdev,
360
+			 struct ib_completion_queue *cq );
353 361
 extern struct ib_queue_pair *
354 362
 ib_create_qp ( struct ib_device *ibdev, unsigned int num_send_wqes,
355 363
 	       struct ib_completion_queue *send_cq, unsigned int num_recv_wqes,
@@ -376,6 +384,8 @@ extern void ib_complete_recv ( struct ib_device *ibdev,
376 384
 			       struct ib_queue_pair *qp,
377 385
 			       struct ib_address_vector *av,
378 386
 			       struct io_buffer *iobuf, int rc );
387
+extern void ib_refill_recv ( struct ib_device *ibdev,
388
+			     struct ib_queue_pair *qp );
379 389
 extern int ib_open ( struct ib_device *ibdev );
380 390
 extern void ib_close ( struct ib_device *ibdev );
381 391
 extern int ib_mcast_attach ( struct ib_device *ibdev, struct ib_queue_pair *qp,
@@ -388,23 +398,13 @@ extern struct ib_device * alloc_ibdev ( size_t priv_size );
388 398
 extern int register_ibdev ( struct ib_device *ibdev );
389 399
 extern void unregister_ibdev ( struct ib_device *ibdev );
390 400
 extern void ib_link_state_changed ( struct ib_device *ibdev );
401
+extern void ib_poll_eq ( struct ib_device *ibdev );
391 402
 extern struct list_head ib_devices;
392 403
 
393 404
 /** Iterate over all network devices */
394 405
 #define for_each_ibdev( ibdev ) \
395 406
 	list_for_each_entry ( (ibdev), &ib_devices, list )
396 407
 
397
-/**
398
- * Poll completion queue
399
- *
400
- * @v ibdev		Infiniband device
401
- * @v cq		Completion queue
402
- */
403
-static inline __always_inline void
404
-ib_poll_cq ( struct ib_device *ibdev, struct ib_completion_queue *cq ) {
405
-	ibdev->op->poll_cq ( ibdev, cq );
406
-}
407
-
408 408
 /**
409 409
  * Check link state
410 410
  *

+ 109
- 3
src/net/infiniband.c Näytä tiedosto

@@ -43,6 +43,13 @@ FILE_LICENCE ( GPL2_OR_LATER );
43 43
 /** List of Infiniband devices */
44 44
 struct list_head ib_devices = LIST_HEAD_INIT ( ib_devices );
45 45
 
46
+/***************************************************************************
47
+ *
48
+ * Completion queues
49
+ *
50
+ ***************************************************************************
51
+ */
52
+
46 53
 /**
47 54
  * Create completion queue
48 55
  *
@@ -63,6 +70,8 @@ ib_create_cq ( struct ib_device *ibdev, unsigned int num_cqes,
63 70
 	cq = zalloc ( sizeof ( *cq ) );
64 71
 	if ( ! cq )
65 72
 		goto err_alloc_cq;
73
+	cq->ibdev = ibdev;
74
+	list_add ( &cq->list, &ibdev->cqs );
66 75
 	cq->num_cqes = num_cqes;
67 76
 	INIT_LIST_HEAD ( &cq->work_queues );
68 77
 	cq->op = op;
@@ -81,6 +90,7 @@ ib_create_cq ( struct ib_device *ibdev, unsigned int num_cqes,
81 90
 
82 91
 	ibdev->op->destroy_cq ( ibdev, cq );
83 92
  err_dev_create_cq:
93
+	list_del ( &cq->list );
84 94
 	free ( cq );
85 95
  err_alloc_cq:
86 96
 	return NULL;
@@ -98,9 +108,37 @@ void ib_destroy_cq ( struct ib_device *ibdev,
98 108
 	       ibdev, cq->cqn );
99 109
 	assert ( list_empty ( &cq->work_queues ) );
100 110
 	ibdev->op->destroy_cq ( ibdev, cq );
111
+	list_del ( &cq->list );
101 112
 	free ( cq );
102 113
 }
103 114
 
115
+/**
116
+ * Poll completion queue
117
+ *
118
+ * @v ibdev		Infiniband device
119
+ * @v cq		Completion queue
120
+ */
121
+void ib_poll_cq ( struct ib_device *ibdev,
122
+		  struct ib_completion_queue *cq ) {
123
+	struct ib_work_queue *wq;
124
+
125
+	/* Poll completion queue */
126
+	ibdev->op->poll_cq ( ibdev, cq );
127
+
128
+	/* Refill receive work queues */
129
+	list_for_each_entry ( wq, &cq->work_queues, list ) {
130
+		if ( ! wq->is_send )
131
+			ib_refill_recv ( ibdev, wq->qp );
132
+	}
133
+}
134
+
135
+/***************************************************************************
136
+ *
137
+ * Work queues
138
+ *
139
+ ***************************************************************************
140
+ */
141
+
104 142
 /**
105 143
  * Create queue pair
106 144
  *
@@ -400,6 +438,44 @@ void ib_complete_recv ( struct ib_device *ibdev, struct ib_queue_pair *qp,
400 438
 	qp->recv.fill--;
401 439
 }
402 440
 
441
+/**
442
+ * Refill receive work queue
443
+ *
444
+ * @v ibdev		Infiniband device
445
+ * @v qp		Queue pair
446
+ */
447
+void ib_refill_recv ( struct ib_device *ibdev, struct ib_queue_pair *qp ) {
448
+	struct io_buffer *iobuf;
449
+	int rc;
450
+
451
+	/* Keep filling while unfilled entries remain */
452
+	while ( qp->recv.fill < qp->recv.num_wqes ) {
453
+
454
+		/* Allocate I/O buffer */
455
+		iobuf = alloc_iob ( IB_MAX_PAYLOAD_SIZE );
456
+		if ( ! iobuf ) {
457
+			/* Non-fatal; we will refill on next attempt */
458
+			return;
459
+		}
460
+
461
+		/* Post I/O buffer */
462
+		if ( ( rc = ib_post_recv ( ibdev, qp, iobuf ) ) != 0 ) {
463
+			DBGC ( ibdev, "IBDEV %p could not refill: %s\n",
464
+			       ibdev, strerror ( rc ) );
465
+			free_iob ( iobuf );
466
+			/* Give up */
467
+			return;
468
+		}
469
+	}
470
+}
471
+
472
+/***************************************************************************
473
+ *
474
+ * Link control
475
+ *
476
+ ***************************************************************************
477
+ */
478
+
403 479
 /**
404 480
  * Open port
405 481
  *
@@ -436,6 +512,13 @@ void ib_close ( struct ib_device *ibdev ) {
436 512
 		ibdev->op->close ( ibdev );
437 513
 }
438 514
 
515
+/***************************************************************************
516
+ *
517
+ * Multicast
518
+ *
519
+ ***************************************************************************
520
+ */
521
+
439 522
 /**
440 523
  * Attach to multicast group
441 524
  *
@@ -495,6 +578,13 @@ void ib_mcast_detach ( struct ib_device *ibdev, struct ib_queue_pair *qp,
495 578
 	}
496 579
 }
497 580
 
581
+/***************************************************************************
582
+ *
583
+ * Miscellaneous
584
+ *
585
+ ***************************************************************************
586
+ */
587
+
498 588
 /**
499 589
  * Get Infiniband HCA information
500 590
  *
@@ -540,6 +630,22 @@ void ib_link_state_changed ( struct ib_device *ibdev ) {
540 630
 	ipoib_link_state_changed ( ibdev );
541 631
 }
542 632
 
633
+/**
634
+ * Poll event queue
635
+ *
636
+ * @v ibdev		Infiniband device
637
+ */
638
+void ib_poll_eq ( struct ib_device *ibdev ) {
639
+	struct ib_completion_queue *cq;
640
+
641
+	/* Poll device's event queue */
642
+	ibdev->op->poll_eq ( ibdev );
643
+
644
+	/* Poll all completion queues */
645
+	list_for_each_entry ( cq, &ibdev->cqs, list )
646
+		ib_poll_cq ( ibdev, cq );
647
+}
648
+
543 649
 /**
544 650
  * Single-step the Infiniband event queue
545 651
  *
@@ -548,9 +654,8 @@ void ib_link_state_changed ( struct ib_device *ibdev ) {
548 654
 static void ib_step ( struct process *process __unused ) {
549 655
 	struct ib_device *ibdev;
550 656
 
551
-	list_for_each_entry ( ibdev, &ib_devices, list ) {
552
-		ibdev->op->poll_eq ( ibdev );
553
-	}
657
+	for_each_ibdev ( ibdev )
658
+		ib_poll_eq ( ibdev );
554 659
 }
555 660
 
556 661
 /** Infiniband event queue process */
@@ -581,6 +686,7 @@ struct ib_device * alloc_ibdev ( size_t priv_size ) {
581 686
 	if ( ibdev ) {
582 687
 		drv_priv = ( ( ( void * ) ibdev ) + sizeof ( *ibdev ) );
583 688
 		ib_set_drvdata ( ibdev, drv_priv );
689
+		INIT_LIST_HEAD ( &ibdev->cqs );
584 690
 		INIT_LIST_HEAD ( &ibdev->qps );
585 691
 		ibdev->lid = IB_LID_NONE;
586 692
 		ibdev->pkey = IB_PKEY_NONE;

+ 0
- 34
src/net/infiniband/ib_qset.c Näytä tiedosto

@@ -54,9 +54,6 @@ int ib_create_qset ( struct ib_device *ibdev, struct ib_queue_set *qset,
54 54
 	assert ( qset->cq == NULL );
55 55
 	assert ( qset->qp == NULL );
56 56
 
57
-	/* Store queue parameters */
58
-	qset->recv_max_fill = num_recv_wqes;
59
-
60 57
 	/* Allocate completion queue */
61 58
 	qset->cq = ib_create_cq ( ibdev, num_cqes, cq_op );
62 59
 	if ( ! qset->cq ) {
@@ -83,37 +80,6 @@ int ib_create_qset ( struct ib_device *ibdev, struct ib_queue_set *qset,
83 80
 	return rc;
84 81
 }
85 82
 
86
-/**
87
- * Refill IPoIB receive ring
88
- *
89
- * @v ibdev		Infiniband device
90
- * @v qset		Queue set
91
- */
92
-void ib_qset_refill_recv ( struct ib_device *ibdev,
93
-			   struct ib_queue_set *qset ) {
94
-	struct io_buffer *iobuf;
95
-	int rc;
96
-
97
-	while ( qset->qp->recv.fill < qset->recv_max_fill ) {
98
-
99
-		/* Allocate I/O buffer */
100
-		iobuf = alloc_iob ( IB_MAX_PAYLOAD_SIZE );
101
-		if ( ! iobuf ) {
102
-			/* Non-fatal; we will refill on next attempt */
103
-			return;
104
-		}
105
-
106
-		/* Post I/O buffer */
107
-		if ( ( rc = ib_post_recv ( ibdev, qset->qp, iobuf ) ) != 0 ) {
108
-			DBGC ( ibdev, "IBDEV %p could not refill: %s\n",
109
-			       ibdev, strerror ( rc ) );
110
-			free_iob ( iobuf );
111
-			/* Give up */
112
-			return;
113
-		}
114
-	}
115
-}
116
-
117 83
 /**
118 84
  * Destroy queue set
119 85
  *

+ 1
- 52
src/net/infiniband/ib_sma.c Näytä tiedosto

@@ -27,7 +27,6 @@ FILE_LICENCE ( GPL2_OR_LATER );
27 27
 #include <byteswap.h>
28 28
 #include <gpxe/infiniband.h>
29 29
 #include <gpxe/iobuf.h>
30
-#include <gpxe/process.h>
31 30
 #include <gpxe/ib_sma.h>
32 31
 
33 32
 /**
@@ -348,36 +347,6 @@ static int ib_sma_mad ( struct ib_sma *sma, union ib_mad *mad ) {
348 347
 	return 0;
349 348
 }
350 349
 
351
-/**
352
- * Refill SMA receive ring
353
- *
354
- * @v sma		Subnet management agent
355
- */
356
-static void ib_sma_refill_recv ( struct ib_sma *sma ) {
357
-	struct ib_device *ibdev = sma->ibdev;
358
-	struct io_buffer *iobuf;
359
-	int rc;
360
-
361
-	while ( sma->qp->recv.fill < IB_SMA_NUM_RECV_WQES ) {
362
-
363
-		/* Allocate I/O buffer */
364
-		iobuf = alloc_iob ( IB_MAX_PAYLOAD_SIZE );
365
-		if ( ! iobuf ) {
366
-			/* Non-fatal; we will refill on next attempt */
367
-			return;
368
-		}
369
-
370
-		/* Post I/O buffer */
371
-		if ( ( rc = ib_post_recv ( ibdev, sma->qp, iobuf ) ) != 0 ) {
372
-			DBGC ( sma, "SMA %p could not refill: %s\n",
373
-			       sma, strerror ( rc ) );
374
-			free_iob ( iobuf );
375
-			/* Give up */
376
-			return;
377
-		}
378
-	}
379
-}
380
-
381 350
 /**
382 351
  * Complete SMA send
383 352
  *
@@ -456,23 +425,6 @@ static struct ib_completion_queue_operations ib_sma_completion_ops = {
456 425
 	.complete_recv = ib_sma_complete_recv,
457 426
 };
458 427
 
459
-/**
460
- * Poll SMA
461
- *
462
- * @v process		Process
463
- */
464
-static void ib_sma_step ( struct process *process ) {
465
-	struct ib_sma *sma =
466
-		container_of ( process, struct ib_sma, poll );
467
-	struct ib_device *ibdev = sma->ibdev;
468
-
469
-	/* Poll the kernel completion queue */
470
-	ib_poll_cq ( ibdev, sma->cq );
471
-
472
-	/* Refill the receive ring */
473
-	ib_sma_refill_recv ( sma );
474
-}
475
-
476 428
 /**
477 429
  * Create SMA
478 430
  *
@@ -489,7 +441,6 @@ int ib_create_sma ( struct ib_sma *sma, struct ib_device *ibdev,
489 441
 	memset ( sma, 0, sizeof ( *sma ) );
490 442
 	sma->ibdev = ibdev;
491 443
 	sma->op = op;
492
-	process_init ( &sma->poll, ib_sma_step, &ibdev->refcnt );
493 444
 
494 445
 	/* Create completion queue */
495 446
 	sma->cq = ib_create_cq ( ibdev, IB_SMA_NUM_CQES,
@@ -517,7 +468,7 @@ int ib_create_sma ( struct ib_sma *sma, struct ib_device *ibdev,
517 468
 	}
518 469
 
519 470
 	/* Fill receive ring */
520
-	ib_sma_refill_recv ( sma );
471
+	ib_refill_recv ( ibdev, sma->qp );
521 472
 	return 0;
522 473
 
523 474
  err_not_qp0:
@@ -525,7 +476,6 @@ int ib_create_sma ( struct ib_sma *sma, struct ib_device *ibdev,
525 476
  err_create_qp:
526 477
 	ib_destroy_cq ( ibdev, sma->cq );
527 478
  err_create_cq:
528
-	process_del ( &sma->poll );
529 479
 	return rc;
530 480
 }
531 481
 
@@ -539,5 +489,4 @@ void ib_destroy_sma ( struct ib_sma *sma ) {
539 489
 
540 490
 	ib_destroy_qp ( ibdev, sma->qp );
541 491
 	ib_destroy_cq ( ibdev, sma->cq );
542
-	process_del ( &sma->poll );
543 492
 }

Loading…
Peruuta
Tallenna