Selaa lähdekoodia

Rearrange data structures to maximise embedding (and hence minimise

the number of separate allocations that need to be done).
tags/v0.9.3
Michael Brown 17 vuotta sitten
vanhempi
commit
156b409ccc

+ 13
- 0
src/drivers/net/mlx_ipoib/arbel.h Näytä tiedosto

@@ -119,8 +119,20 @@ struct arbel_recv_work_queue {
119 119
 	union arbel_recv_wqe *wqe;
120 120
 };
121 121
 
122
+/** An Arbel queue pair */
123
+struct arbel_queue_pair {
124
+	/** Infiniband queue pair */
125
+	struct ib_queue_pair qp;
126
+	/** Send work queue */
127
+	struct arbel_send_work_queue send;
128
+	/** Receive work queue */
129
+	struct arbel_recv_work_queue recv;
130
+};
131
+
122 132
 /** An Arbel completion queue */
123 133
 struct arbel_completion_queue {
134
+	/** Infiniband completion queue */
135
+	struct ib_completion_queue cq;
124 136
 	/** Doorbell record number */
125 137
 	unsigned int doorbell_idx;
126 138
 	/** Completion queue entries */
@@ -154,6 +166,7 @@ struct arbel {
154 166
  */
155 167
 
156 168
 #define ARBEL_HCR_QUERY_DEV_LIM		0x0003
169
+#define ARBEL_HCR_SW2HW_CQ		0x0016
157 170
 
158 171
 #define ARBEL_HCR_BASE			0x80680
159 172
 #define ARBEL_HCR_REG(x)		( ARBEL_HCR_BASE + 4 * (x) )

+ 118
- 75
src/drivers/net/mlx_ipoib/mt25218.c Näytä tiedosto

@@ -52,57 +52,54 @@ static struct io_buffer *static_ipoib_tx_ring[NUM_IPOIB_SND_WQES];
52 52
 static struct io_buffer *static_ipoib_rx_ring[NUM_IPOIB_RCV_WQES];
53 53
 
54 54
 static struct arbel static_arbel;
55
-static struct arbel_send_work_queue static_arbel_ipoib_send_wq = {
56
-	.doorbell_idx = IPOIB_SND_QP_DB_IDX,
57
-};
58
-static struct arbel_send_work_queue static_arbel_ipoib_recv_wq = {
59
-	.doorbell_idx = IPOIB_RCV_QP_DB_IDX,
60
-};
61
-static struct arbel_completion_queue static_arbel_ipoib_send_cq = {
62
-	.doorbell_idx = IPOIB_SND_CQ_CI_DB_IDX,
63
-};
64
-static struct arbel_completion_queue static_arbel_ipoib_recv_cq = {
65
-	.doorbell_idx = IPOIB_RCV_CQ_CI_DB_IDX,
66
-};
67
-
68
-static struct ib_completion_queue static_ipoib_send_cq;
69
-static struct ib_completion_queue static_ipoib_recv_cq;
70
-static struct ib_device static_ibdev = {
71
-	.dev_priv = &static_arbel,
72
-};
73
-static struct ib_queue_pair static_ipoib_qp = {
55
+static struct arbel_completion_queue static_ipoib_send_cq;
56
+static struct arbel_completion_queue static_ipoib_recv_cq;
57
+
58
+static struct arbel_queue_pair static_ipoib_qp = {
59
+	.qp = {
60
+		.send = {
61
+			.qp = &static_ipoib_qp.qp,
62
+			.is_send = 1,
63
+			.cq = &static_ipoib_send_cq.cq,
64
+			.num_wqes = NUM_IPOIB_SND_WQES,
65
+			.iobufs = static_ipoib_tx_ring,
66
+			.list = LIST_HEAD_INIT (static_ipoib_qp.qp.send.list),
67
+		},
68
+		.recv = {
69
+			.qp = &static_ipoib_qp.qp,
70
+			.is_send = 0,
71
+			.cq = &static_ipoib_recv_cq.cq,
72
+			.num_wqes = NUM_IPOIB_RCV_WQES,
73
+			.iobufs = static_ipoib_rx_ring,
74
+			.list = LIST_HEAD_INIT (static_ipoib_qp.qp.recv.list),
75
+		},
76
+	},
74 77
 	.send = {
75
-		.qp = &static_ipoib_qp,
76
-		.is_send = 1,
77
-		.cq = &static_ipoib_send_cq,
78
-		.num_wqes = NUM_IPOIB_SND_WQES,
79
-		.iobufs = static_ipoib_tx_ring,
80
-		.dev_priv = &static_arbel_ipoib_send_wq,
81
-		.list = LIST_HEAD_INIT ( static_ipoib_qp.send.list ),
78
+		.doorbell_idx = IPOIB_SND_QP_DB_IDX,
82 79
 	},
83 80
 	.recv = {
84
-		.qp = &static_ipoib_qp,
85
-		.is_send = 0,
86
-		.cq = &static_ipoib_recv_cq,
87
-		.num_wqes = NUM_IPOIB_RCV_WQES,
88
-		.iobufs = static_ipoib_rx_ring,
89
-		.dev_priv = &static_arbel_ipoib_recv_wq,
90
-		.list = LIST_HEAD_INIT ( static_ipoib_qp.recv.list ),
81
+		.doorbell_idx = IPOIB_RCV_QP_DB_IDX,
91 82
 	},
92 83
 };
93
-static struct ib_completion_queue static_ipoib_send_cq = {
94
-	.cqn = 1234, /* Only used for debug messages */
95
-	.num_cqes = NUM_IPOIB_SND_CQES,
96
-	.dev_priv = &static_arbel_ipoib_send_cq,
97
-	.work_queues = LIST_HEAD_INIT ( static_ipoib_send_cq.work_queues ),
84
+static struct arbel_completion_queue static_ipoib_send_cq = {
85
+	.cq = {
86
+		.cqn = 1234, /* Only used for debug messages */
87
+		.num_cqes = NUM_IPOIB_SND_CQES,
88
+		.work_queues = LIST_HEAD_INIT (static_ipoib_send_cq.cq.work_queues),
89
+	},
90
+	.doorbell_idx = IPOIB_SND_CQ_CI_DB_IDX,
98 91
 };
99
-static struct ib_completion_queue static_ipoib_recv_cq = {
100
-	.cqn = 2345, /* Only used for debug messages */
101
-	.num_cqes = NUM_IPOIB_RCV_CQES,
102
-	.dev_priv = &static_arbel_ipoib_recv_cq,
103
-	.work_queues = LIST_HEAD_INIT ( static_ipoib_recv_cq.work_queues ),
92
+static struct arbel_completion_queue static_ipoib_recv_cq = {
93
+	.cq = {
94
+		.cqn = 2345, /* Only used for debug messages */
95
+		.num_cqes = NUM_IPOIB_RCV_CQES,
96
+		.work_queues = LIST_HEAD_INIT (static_ipoib_recv_cq.cq.work_queues),
97
+	},
98
+	.doorbell_idx = IPOIB_RCV_CQ_CI_DB_IDX,
99
+};
100
+static struct ib_device static_ibdev = {
101
+	.priv = &static_arbel,
104 102
 };
105
-
106 103
 
107 104
 
108 105
 /**
@@ -152,7 +149,7 @@ static int mlx_transmit_direct ( struct net_device *netdev,
152 149
 	};
153 150
 	memcpy ( &av.gid, ( ( void * ) bav ) + 16, 16 );
154 151
 
155
-	rc = arbel_post_send ( &static_ibdev, &static_ipoib_qp, &av, iobuf );
152
+	rc = arbel_post_send ( &static_ibdev, &static_ipoib_qp.qp, &av, iobuf );
156 153
 
157 154
 	return rc;
158 155
 }
@@ -206,7 +203,8 @@ static void mlx_refill_rx ( struct net_device *netdev ) {
206 203
 		if ( ! iobuf )
207 204
 			break;
208 205
 		DBG ( "Posting RX buffer %p:\n", iobuf );
209
-		if ( ( rc = arbel_post_recv ( &static_ibdev, &static_ipoib_qp,
206
+		if ( ( rc = arbel_post_recv ( &static_ibdev,
207
+					      &static_ipoib_qp.qp,
210 208
 					      iobuf ) ) != 0 ) {
211 209
 			free_iob ( iobuf );
212 210
 			break;
@@ -238,9 +236,9 @@ static void mlx_poll ( struct net_device *netdev ) {
238 236
 	}
239 237
 
240 238
 	/* Poll completion queues */
241
-	arbel_poll_cq ( &static_ibdev, &static_ipoib_send_cq,
239
+	arbel_poll_cq ( &static_ibdev, &static_ipoib_send_cq.cq,
242 240
 			temp_complete_send, temp_complete_recv );
243
-	arbel_poll_cq ( &static_ibdev, &static_ipoib_recv_cq,
241
+	arbel_poll_cq ( &static_ibdev, &static_ipoib_recv_cq.cq,
244 242
 			temp_complete_send, temp_complete_recv );
245 243
 	//	mlx_poll_cq ( netdev, mlx->rcv_cqh, mlx_rx_complete );
246 244
 
@@ -383,27 +381,63 @@ static int arbel_cmd ( struct arbel *arbel, unsigned long command,
383 381
 	return 0;
384 382
 }
385 383
 
386
-static int arbel_cmd_query_dev_lim ( struct arbel *arbel,
387
-				     struct arbelprm_query_dev_lim *out ) {
384
+static inline int
385
+arbel_cmd_query_dev_lim ( struct arbel *arbel,
386
+			  struct arbelprm_query_dev_lim *dev_lim ) {
388 387
 	return arbel_cmd ( arbel,
389 388
 			   ARBEL_HCR_OUT_CMD ( ARBEL_HCR_QUERY_DEV_LIM, 
390
-					       1, sizeof ( *out ) ),
391
-			   0, NULL, 0, out );
389
+					       1, sizeof ( *dev_lim ) ),
390
+			   0, NULL, 0, dev_lim );
391
+}
392
+
393
+static inline int
394
+arbel_cmd_sw2hw_cq ( struct arbel *arbel, unsigned long cqn,
395
+		     const struct arbelprm_completion_queue_context *cqctx ) {
396
+	return arbel_cmd ( arbel,
397
+			   ARBEL_HCR_IN_CMD ( ARBEL_HCR_SW2HW_CQ,
398
+					      1, sizeof ( *cqctx ) ),
399
+			   0, cqctx, cqn, NULL );
392 400
 }
393 401
 
402
+/***************************************************************************
403
+ *
404
+ * Completion queue operations
405
+ *
406
+ ***************************************************************************
407
+ */
408
+
394 409
 /**
395 410
  * Create completion queue
396 411
  *
397 412
  * @v ibdev		Infiniband device
398 413
  * @v 
399 414
  */
400
-static int arbel_create_cq ( struct ib_device *ibdev ) {
401
-	struct arbelprm_completion_queue_context *cqctx;
415
+static int arbel_create_cq ( struct ib_device *ibdev,
416
+			     struct ib_completion_queue **new_cq ) {
417
+	struct arbel *arbel = ibdev->priv;
418
+	struct arbelprm_completion_queue_context cqctx;
419
+	struct ib_completion_queue *cq;
420
+
421
+	cq = zalloc ( sizeof ( *cq ) );
422
+	if ( ! cq )
423
+		return -ENOMEM;
402 424
 
425
+	
403 426
 
427
+	memset ( &cqctx, 0, sizeof ( cqctx ) );
428
+	
429
+
430
+	return arbel_cmd_sw2hw_cq ( arbel, 0, &cqctx );
404 431
 }
405 432
 
406 433
 
434
+/***************************************************************************
435
+ *
436
+ * Work request operations
437
+ *
438
+ ***************************************************************************
439
+ */
440
+
407 441
 /**
408 442
  * Ring doorbell register in UAR
409 443
  *
@@ -438,9 +472,11 @@ static int arbel_post_send ( struct ib_device *ibdev,
438 472
 			     struct ib_queue_pair *qp,
439 473
 			     struct ib_address_vector *av,
440 474
 			     struct io_buffer *iobuf ) {
441
-	struct arbel *arbel = ibdev->dev_priv;
475
+	struct arbel *arbel = ibdev->priv;
476
+	struct arbel_queue_pair *arbel_qp
477
+		= container_of ( qp, struct arbel_queue_pair, qp );
442 478
 	struct ib_work_queue *wq = &qp->send;
443
-	struct arbel_send_work_queue *arbel_send_wq = wq->dev_priv;
479
+	struct arbel_send_work_queue *arbel_send_wq = &arbel_qp->send;
444 480
 	struct arbelprm_ud_send_wqe *prev_wqe;
445 481
 	struct arbelprm_ud_send_wqe *wqe;
446 482
 	union arbelprm_doorbell_record *db_rec;
@@ -526,9 +562,11 @@ static int arbel_post_send ( struct ib_device *ibdev,
526 562
 static int arbel_post_recv ( struct ib_device *ibdev,
527 563
 			     struct ib_queue_pair *qp,
528 564
 			     struct io_buffer *iobuf ) {
529
-	struct arbel *arbel = ibdev->dev_priv;
565
+	struct arbel *arbel = ibdev->priv;
566
+	struct arbel_queue_pair *arbel_qp
567
+		= container_of ( qp, struct arbel_queue_pair, qp );
530 568
 	struct ib_work_queue *wq = &qp->recv;
531
-	struct arbel_recv_work_queue *arbel_recv_wq = wq->dev_priv;
569
+	struct arbel_recv_work_queue *arbel_recv_wq = &arbel_qp->recv;
532 570
 	struct arbelprm_recv_wqe *wqe;
533 571
 	union arbelprm_doorbell_record *db_rec;
534 572
 	unsigned int wqe_idx_mask;
@@ -575,12 +613,14 @@ static int arbel_complete ( struct ib_device *ibdev,
575 613
 			    union arbelprm_completion_entry *cqe,
576 614
 			    ib_completer_t complete_send,
577 615
 			    ib_completer_t complete_recv ) {
578
-	struct arbel *arbel = ibdev->dev_priv;
616
+	struct arbel *arbel = ibdev->priv;
579 617
 	struct ib_completion completion;
580 618
 	struct ib_work_queue *wq;
581
-	struct io_buffer *iobuf;
619
+	struct ib_queue_pair *qp;
620
+	struct arbel_queue_pair *arbel_qp;
582 621
 	struct arbel_send_work_queue *arbel_send_wq;
583 622
 	struct arbel_recv_work_queue *arbel_recv_wq;
623
+	struct io_buffer *iobuf;
584 624
 	ib_completer_t complete;
585 625
 	unsigned int opcode;
586 626
 	unsigned long qpn;
@@ -614,14 +654,16 @@ static int arbel_complete ( struct ib_device *ibdev,
614 654
 		       arbel, cq->cqn, ( is_send ? "send" : "recv" ), qpn );
615 655
 		return -EIO;
616 656
 	}
657
+	qp = wq->qp;
658
+	arbel_qp = container_of ( qp, struct arbel_queue_pair, qp );
617 659
 
618 660
 	/* Identify work queue entry index */
619 661
 	if ( is_send ) {
620
-		arbel_send_wq = wq->dev_priv;
662
+		arbel_send_wq = &arbel_qp->send;
621 663
 		wqe_idx = ( ( wqe_adr - virt_to_bus ( arbel_send_wq->wqe ) ) /
622 664
 			    sizeof ( arbel_send_wq->wqe[0] ) );
623 665
 	} else {
624
-		arbel_recv_wq = wq->dev_priv;
666
+		arbel_recv_wq = &arbel_qp->recv;
625 667
 		wqe_idx = ( ( wqe_adr - virt_to_bus ( arbel_recv_wq->wqe ) ) /
626 668
 			    sizeof ( arbel_recv_wq->wqe[0] ) );
627 669
 	}
@@ -637,7 +679,7 @@ static int arbel_complete ( struct ib_device *ibdev,
637 679
 
638 680
 	/* Pass off to caller's completion handler */
639 681
 	complete = ( is_send ? complete_send : complete_recv );
640
-	complete ( ibdev, wq->qp, &completion, iobuf );
682
+	complete ( ibdev, qp, &completion, iobuf );
641 683
 
642 684
 	return rc;
643 685
 }			     
@@ -654,8 +696,9 @@ static void arbel_poll_cq ( struct ib_device *ibdev,
654 696
 			    struct ib_completion_queue *cq,
655 697
 			    ib_completer_t complete_send,
656 698
 			    ib_completer_t complete_recv ) {
657
-	struct arbel *arbel = ibdev->dev_priv;
658
-	struct arbel_completion_queue *arbel_cq = cq->dev_priv;
699
+	struct arbel *arbel = ibdev->priv;
700
+	struct arbel_completion_queue *arbel_cq
701
+		= container_of ( cq, struct arbel_completion_queue, cq );
659 702
 	union arbelprm_doorbell_record *db_rec;
660 703
 	union arbelprm_completion_entry *cqe;
661 704
 	unsigned int cqe_idx_mask;
@@ -757,20 +800,20 @@ static int arbel_probe ( struct pci_device *pci,
757 800
 	static_arbel.uar = memfree_pci_dev.uar;
758 801
 	static_arbel.db_rec = dev_ib_data.uar_context_base;
759 802
 	static_arbel.reserved_lkey = dev_ib_data.mkey;
760
-	static_arbel_ipoib_send_wq.wqe =
803
+	static_ipoib_qp.send.wqe =
761 804
 		( ( struct udqp_st * ) qph )->snd_wq;
762
-	static_arbel_ipoib_recv_wq.wqe =
805
+	static_ipoib_qp.recv.wqe =
763 806
 		( ( struct udqp_st * ) qph )->rcv_wq;
764
-	static_arbel_ipoib_send_cq.cqe =
807
+	static_ipoib_send_cq.cqe =
765 808
 		( ( struct cq_st * ) ib_data.ipoib_snd_cq )->cq_buf;
766
-	static_arbel_ipoib_recv_cq.cqe =
809
+	static_ipoib_recv_cq.cqe =
767 810
 		( ( struct cq_st * ) ib_data.ipoib_rcv_cq )->cq_buf;
768
-	static_ipoib_qp.qpn = ib_get_qpn ( qph );
769
-	static_ipoib_qp.priv = netdev;
770
-	list_add ( &static_ipoib_qp.send.list,
771
-		   &static_ipoib_send_cq.work_queues );
772
-	list_add ( &static_ipoib_qp.recv.list,
773
-		   &static_ipoib_recv_cq.work_queues );
811
+	static_ipoib_qp.qp.qpn = ib_get_qpn ( qph );
812
+	static_ipoib_qp.qp.priv = netdev;
813
+	list_add ( &static_ipoib_qp.qp.send.list,
814
+		   &static_ipoib_send_cq.cq.work_queues );
815
+	list_add ( &static_ipoib_qp.qp.recv.list,
816
+		   &static_ipoib_recv_cq.cq.work_queues );
774 817
 
775 818
 	struct arbelprm_query_dev_lim dev_lim;
776 819
 	memset ( &dev_lim, 0xaa, sizeof ( dev_lim ) );

+ 2
- 8
src/include/gpxe/infiniband.h Näytä tiedosto

@@ -89,8 +89,6 @@ struct ib_work_queue {
89 89
 	unsigned long next_idx;
90 90
 	/** I/O buffers assigned to work queue */
91 91
 	struct io_buffer **iobufs;
92
-	/** Device private data */
93
-	void *dev_priv;
94 92
 };
95 93
 
96 94
 /** An Infiniband Queue Pair */
@@ -103,8 +101,6 @@ struct ib_queue_pair {
103 101
 	struct ib_work_queue recv;
104 102
 	/** Queue owner private data */
105 103
 	void *priv;
106
-	/** Device private data */
107
-	void *dev_priv;
108 104
 };
109 105
 
110 106
 /** An Infiniband Completion Queue */
@@ -123,8 +119,6 @@ struct ib_completion_queue {
123 119
 	unsigned long next_idx;
124 120
 	/** List of work queues completing to this queue */
125 121
 	struct list_head work_queues;
126
-	/** Device private data */
127
-	void *dev_priv;
128 122
 };
129 123
 
130 124
 /** An Infiniband completion */
@@ -224,8 +218,8 @@ struct ib_device_operations {
224 218
 
225 219
 /** An Infiniband device */
226 220
 struct ib_device {	
227
-	/** Device private data */
228
-	void *dev_priv;
221
+	/** Driver private data */
222
+	void *priv;
229 223
 };
230 224
 
231 225
 

Loading…
Peruuta
Tallenna