Browse Source

post_recv() now works, and we can pass data on the IPoIB queue pair

using entirely our own code.
tags/v0.9.3
Michael Brown 17 years ago
parent
commit
37fc40bc8c

+ 25
- 2
src/drivers/net/mlx_ipoib/arbel.h View File

25
 struct MLX_DECLARE_STRUCT ( arbelprm_completion_with_error );
25
 struct MLX_DECLARE_STRUCT ( arbelprm_completion_with_error );
26
 struct MLX_DECLARE_STRUCT ( arbelprm_cq_ci_db_record );
26
 struct MLX_DECLARE_STRUCT ( arbelprm_cq_ci_db_record );
27
 struct MLX_DECLARE_STRUCT ( arbelprm_qp_db_record );
27
 struct MLX_DECLARE_STRUCT ( arbelprm_qp_db_record );
28
+struct MLX_DECLARE_STRUCT ( arbelprm_recv_wqe_segment_next );
28
 struct MLX_DECLARE_STRUCT ( arbelprm_send_doorbell );
29
 struct MLX_DECLARE_STRUCT ( arbelprm_send_doorbell );
29
 struct MLX_DECLARE_STRUCT ( arbelprm_ud_address_vector );
30
 struct MLX_DECLARE_STRUCT ( arbelprm_ud_address_vector );
30
 struct MLX_DECLARE_STRUCT ( arbelprm_wqe_segment_ctrl_send );
31
 struct MLX_DECLARE_STRUCT ( arbelprm_wqe_segment_ctrl_send );
37
  *
38
  *
38
  */
39
  */
39
 
40
 
40
-#define ARBELPRM_MAX_GATHER 1
41
+#define ARBEL_MAX_GATHER 1
41
 
42
 
42
 struct arbelprm_ud_send_wqe {
43
 struct arbelprm_ud_send_wqe {
43
 	struct arbelprm_wqe_segment_next next;
44
 	struct arbelprm_wqe_segment_next next;
44
 	struct arbelprm_wqe_segment_ctrl_send ctrl;
45
 	struct arbelprm_wqe_segment_ctrl_send ctrl;
45
 	struct arbelprm_wqe_segment_ud ud;
46
 	struct arbelprm_wqe_segment_ud ud;
46
-	struct arbelprm_wqe_segment_data_ptr data[ARBELPRM_MAX_GATHER];
47
+	struct arbelprm_wqe_segment_data_ptr data[ARBEL_MAX_GATHER];
48
+} __attribute__ (( packed ));
49
+
50
+#define ARBEL_MAX_SCATTER 1
51
+
52
+struct arbelprm_recv_wqe {
53
+	/* The autogenerated header is inconsistent between send and
54
+	 * receive WQEs.  The "ctrl" structure for receive WQEs is
55
+	 * defined to include the "next" structure.  Since the "ctrl"
56
+	 * part of the "ctrl" structure contains only "reserved, must
57
+	 * be zero" bits, we ignore its definition and provide
58
+	 * something more usable.
59
+	 */
60
+	struct arbelprm_recv_wqe_segment_next next;
61
+	uint32_t ctrl[2]; /* All "reserved, must be zero" */
62
+	struct arbelprm_wqe_segment_data_ptr data[ARBEL_MAX_SCATTER];
47
 } __attribute__ (( packed ));
63
 } __attribute__ (( packed ));
48
 
64
 
49
 union arbelprm_completion_entry {
65
 union arbelprm_completion_entry {
88
 
104
 
89
 /** An Arbel receive work queue entry */
105
 /** An Arbel receive work queue entry */
90
 union arbel_recv_wqe {
106
 union arbel_recv_wqe {
107
+	struct arbelprm_recv_wqe recv;
91
 	uint8_t force_align[ARBEL_RECV_WQE_ALIGN];
108
 	uint8_t force_align[ARBEL_RECV_WQE_ALIGN];
92
 } __attribute__ (( packed ));
109
 } __attribute__ (( packed ));
93
 
110
 
113
 	void *uar;
130
 	void *uar;
114
 	/** Doorbell records */
131
 	/** Doorbell records */
115
 	union arbelprm_doorbell_record *db_rec;
132
 	union arbelprm_doorbell_record *db_rec;
133
+	/** Reserved LKey
134
+	 *
135
+	 * Used to get unrestricted memory access.
136
+	 */
137
+	unsigned long reserved_lkey;
138
+	
116
 };
139
 };
117
 
140
 
118
 #endif /* _ARBEL_H */
141
 #endif /* _ARBEL_H */

+ 1
- 1
src/drivers/net/mlx_ipoib/ib_mt25218.c View File

1311
 	qp->rcv_buf_sz = IPOIB_RCV_BUF_SZ;
1311
 	qp->rcv_buf_sz = IPOIB_RCV_BUF_SZ;
1312
 
1312
 
1313
 	qp->max_recv_wqes = NUM_IPOIB_RCV_WQES;
1313
 	qp->max_recv_wqes = NUM_IPOIB_RCV_WQES;
1314
-	qp->recv_wqe_cur_free = NUM_IPOIB_RCV_WQES;
1314
+	qp->recv_wqe_cur_free = 0; //NUM_IPOIB_RCV_WQES;
1315
 
1315
 
1316
 	qp->rcv_uar_context =
1316
 	qp->rcv_uar_context =
1317
 	    dev_ib_data.uar_context_base + 8 * IPOIB_RCV_QP_DB_IDX;
1317
 	    dev_ib_data.uar_context_base + 8 * IPOIB_RCV_QP_DB_IDX;

+ 168
- 56
src/drivers/net/mlx_ipoib/mt25218.c View File

26
 #include "arbel.h"
26
 #include "arbel.h"
27
 
27
 
28
 
28
 
29
+#define MLX_RX_MAX_FILL NUM_IPOIB_RCV_WQES
29
 
30
 
30
 struct mlx_nic {
31
 struct mlx_nic {
31
 	/** Queue pair handle */
32
 	/** Queue pair handle */
36
 	cq_t snd_cqh;
37
 	cq_t snd_cqh;
37
 	/** Receive completion queue */
38
 	/** Receive completion queue */
38
 	cq_t rcv_cqh;
39
 	cq_t rcv_cqh;
40
+
41
+	/** RX fill level */
42
+	unsigned int rx_fill;
39
 };
43
 };
40
 
44
 
41
 
45
 
42
 static struct io_buffer *static_ipoib_tx_ring[NUM_IPOIB_SND_WQES];
46
 static struct io_buffer *static_ipoib_tx_ring[NUM_IPOIB_SND_WQES];
47
+static struct io_buffer *static_ipoib_rx_ring[NUM_IPOIB_RCV_WQES];
43
 
48
 
44
 static struct arbel static_arbel;
49
 static struct arbel static_arbel;
45
 static struct arbel_send_work_queue static_arbel_ipoib_send_wq = {
50
 static struct arbel_send_work_queue static_arbel_ipoib_send_wq = {
46
 	.doorbell_idx = IPOIB_SND_QP_DB_IDX,
51
 	.doorbell_idx = IPOIB_SND_QP_DB_IDX,
47
 };
52
 };
53
+static struct arbel_send_work_queue static_arbel_ipoib_recv_wq = {
54
+	.doorbell_idx = IPOIB_RCV_QP_DB_IDX,
55
+};
48
 static struct arbel_completion_queue static_arbel_ipoib_send_cq = {
56
 static struct arbel_completion_queue static_arbel_ipoib_send_cq = {
49
 	.doorbell_idx = IPOIB_SND_CQ_CI_DB_IDX,
57
 	.doorbell_idx = IPOIB_SND_CQ_CI_DB_IDX,
50
 };
58
 };
59
+static struct arbel_completion_queue static_arbel_ipoib_recv_cq = {
60
+	.doorbell_idx = IPOIB_RCV_CQ_CI_DB_IDX,
61
+};
51
 
62
 
63
+static struct ib_completion_queue static_ipoib_send_cq;
64
+static struct ib_completion_queue static_ipoib_recv_cq;
52
 static struct ib_device static_ibdev = {
65
 static struct ib_device static_ibdev = {
53
 	.dev_priv = &static_arbel,
66
 	.dev_priv = &static_arbel,
54
 };
67
 };
55
 static struct ib_queue_pair static_ipoib_qp = {
68
 static struct ib_queue_pair static_ipoib_qp = {
56
 	.send = {
69
 	.send = {
70
+		.qp = &static_ipoib_qp,
71
+		.is_send = 1,
72
+		.cq = &static_ipoib_send_cq,
57
 		.num_wqes = NUM_IPOIB_SND_WQES,
73
 		.num_wqes = NUM_IPOIB_SND_WQES,
58
 		.iobufs = static_ipoib_tx_ring,
74
 		.iobufs = static_ipoib_tx_ring,
59
 		.dev_priv = &static_arbel_ipoib_send_wq,
75
 		.dev_priv = &static_arbel_ipoib_send_wq,
76
+		.list = LIST_HEAD_INIT ( static_ipoib_qp.send.list ),
77
+	},
78
+	.recv = {
79
+		.qp = &static_ipoib_qp,
80
+		.is_send = 0,
81
+		.cq = &static_ipoib_recv_cq,
82
+		.num_wqes = NUM_IPOIB_RCV_WQES,
83
+		.iobufs = static_ipoib_rx_ring,
84
+		.dev_priv = &static_arbel_ipoib_recv_wq,
85
+		.list = LIST_HEAD_INIT ( static_ipoib_qp.recv.list ),
60
 	},
86
 	},
61
-	.list = LIST_HEAD_INIT ( static_ipoib_qp.list ),
62
 };
87
 };
63
 static struct ib_completion_queue static_ipoib_send_cq = {
88
 static struct ib_completion_queue static_ipoib_send_cq = {
64
 	.cqn = 1234, /* Only used for debug messages */
89
 	.cqn = 1234, /* Only used for debug messages */
65
 	.num_cqes = NUM_IPOIB_SND_CQES,
90
 	.num_cqes = NUM_IPOIB_SND_CQES,
66
 	.dev_priv = &static_arbel_ipoib_send_cq,
91
 	.dev_priv = &static_arbel_ipoib_send_cq,
67
-	.queue_pairs = LIST_HEAD_INIT ( static_ipoib_send_cq.queue_pairs ),
92
+	.work_queues = LIST_HEAD_INIT ( static_ipoib_send_cq.work_queues ),
93
+};
94
+static struct ib_completion_queue static_ipoib_recv_cq = {
95
+	.cqn = 2345, /* Only used for debug messages */
96
+	.num_cqes = NUM_IPOIB_RCV_CQES,
97
+	.dev_priv = &static_arbel_ipoib_recv_cq,
98
+	.work_queues = LIST_HEAD_INIT ( static_ipoib_recv_cq.work_queues ),
68
 };
99
 };
69
 
100
 
70
 
101
 
157
 }
188
 }
158
 
189
 
159
 
190
 
160
-static void arbel_poll_cq ( struct ib_device *ibdev,
161
-			    struct ib_completion_queue *cq,
162
-			    ib_completer_t complete_send,
163
-			    ib_completer_t complete_recv );
164
-
165
-static void temp_complete_send ( struct ib_device *ibdev __unused,
166
-				 struct ib_queue_pair *qp,
167
-				 struct ib_completion *completion,
168
-				 struct io_buffer *iobuf ) {
169
-	struct net_device *netdev = qp->priv;
170
-
171
-	DBG ( "Wahey! TX completion\n" );
172
-	netdev_tx_complete_err ( netdev, iobuf,
173
-				 ( completion->syndrome ? -EIO : 0 ) );
174
-}
175
-
176
-static void temp_complete_recv ( struct ib_device *ibdev __unused,
177
-				 struct ib_queue_pair *qp __unused,
178
-				 struct ib_completion *completion __unused,
179
-				 struct io_buffer *iobuf __unused ) {
180
-	DBG ( "AARGH! recv completion\n" );
181
-}
182
-
183
-static void mlx_poll_cq_direct ( struct net_device *netdev ) {
184
-	struct mlx_nic *mlx = netdev->priv;
185
-
186
-	arbel_poll_cq ( &static_ibdev, &static_ipoib_send_cq,
187
-			temp_complete_send, temp_complete_recv );
188
-}
189
-
190
 /**
191
 /**
191
  * Handle TX completion
192
  * Handle TX completion
192
  *
193
  *
233
 	netdev_rx ( netdev, iobuf );
234
 	netdev_rx ( netdev, iobuf );
234
 }
235
 }
235
 
236
 
237
+static void arbel_poll_cq ( struct ib_device *ibdev,
238
+			    struct ib_completion_queue *cq,
239
+			    ib_completer_t complete_send,
240
+			    ib_completer_t complete_recv );
241
+
242
+static void temp_complete_send ( struct ib_device *ibdev __unused,
243
+				 struct ib_queue_pair *qp,
244
+				 struct ib_completion *completion,
245
+				 struct io_buffer *iobuf ) {
246
+	struct net_device *netdev = qp->priv;
247
+
248
+	DBG ( "Wahey! TX completion\n" );
249
+	netdev_tx_complete_err ( netdev, iobuf,
250
+				 ( completion->syndrome ? -EIO : 0 ) );
251
+}
252
+
253
+static void temp_complete_recv ( struct ib_device *ibdev __unused,
254
+				 struct ib_queue_pair *qp,
255
+				 struct ib_completion *completion,
256
+				 struct io_buffer *iobuf ) {
257
+	struct net_device *netdev = qp->priv;
258
+	struct mlx_nic *mlx = netdev->priv;
259
+
260
+	DBG ( "Yay! RX completion on %p len %zx:\n", iobuf, completion->len );
261
+	//	DBG_HD ( iobuf, sizeof ( *iobuf ) );
262
+	//	DBG_HD ( iobuf->data, 256 );
263
+	if ( completion->syndrome ) {
264
+		netdev_rx_err ( netdev, iobuf, -EIO );
265
+	} else {
266
+		iob_put ( iobuf, completion->len );
267
+		iob_pull ( iobuf, sizeof ( struct ib_global_route_header ) );
268
+		netdev_rx ( netdev, iobuf );
269
+	}
270
+
271
+	mlx->rx_fill--;
272
+}
273
+
274
+#if 0
236
 /**
275
 /**
237
  * Poll completion queue
276
  * Poll completion queue
238
  *
277
  *
267
 		free_wqe ( ib_cqe.wqe );
306
 		free_wqe ( ib_cqe.wqe );
268
 	}
307
 	}
269
 }
308
 }
309
+#endif
310
+
311
+static int arbel_post_recv ( struct ib_device *ibdev,
312
+			     struct ib_queue_pair *qp,
313
+			     struct io_buffer *iobuf );
314
+
315
+static void mlx_refill_rx ( struct net_device *netdev ) {
316
+	struct mlx_nic *mlx = netdev->priv;
317
+	struct io_buffer *iobuf;
318
+	int rc;
319
+
320
+	while ( mlx->rx_fill < MLX_RX_MAX_FILL ) {
321
+		iobuf = alloc_iob ( 2048 );
322
+		if ( ! iobuf )
323
+			break;
324
+		DBG ( "Posting RX buffer %p:\n", iobuf );
325
+		//		memset ( iobuf->data, 0xaa, 256 );
326
+		//		DBG_HD ( iobuf, sizeof ( *iobuf ) );
327
+		if ( ( rc = arbel_post_recv ( &static_ibdev, &static_ipoib_qp,
328
+					      iobuf ) ) != 0 ) {
329
+			free_iob ( iobuf );
330
+			break;
331
+		}
332
+		mlx->rx_fill++;
333
+	}
334
+}
270
 
335
 
271
 /**
336
 /**
272
  * Poll for completed and received packets
337
  * Poll for completed and received packets
291
 	}
356
 	}
292
 
357
 
293
 	/* Poll completion queues */
358
 	/* Poll completion queues */
294
-	mlx_poll_cq_direct ( netdev );
295
-	mlx_poll_cq ( netdev, mlx->rcv_cqh, mlx_rx_complete );
359
+	arbel_poll_cq ( &static_ibdev, &static_ipoib_send_cq,
360
+			temp_complete_send, temp_complete_recv );
361
+	arbel_poll_cq ( &static_ibdev, &static_ipoib_recv_cq,
362
+			temp_complete_send, temp_complete_recv );
363
+	//	mlx_poll_cq ( netdev, mlx->rcv_cqh, mlx_rx_complete );
364
+
365
+	mlx_refill_rx ( netdev );
296
 }
366
 }
297
 
367
 
298
 /**
368
 /**
397
 	memcpy ( &wqe->ud.u.dwords[4], gid, sizeof ( *gid ) );
467
 	memcpy ( &wqe->ud.u.dwords[4], gid, sizeof ( *gid ) );
398
 	MLX_FILL_1 ( &wqe->ud, 8, destination_qp, av->dest_qp );
468
 	MLX_FILL_1 ( &wqe->ud, 8, destination_qp, av->dest_qp );
399
 	MLX_FILL_1 ( &wqe->ud, 9, q_key, av->qkey );
469
 	MLX_FILL_1 ( &wqe->ud, 9, q_key, av->qkey );
470
+	MLX_FILL_1 ( &wqe->data[0], 0, byte_count, iob_len ( iobuf ) );
400
 	MLX_FILL_1 ( &wqe->data[0], 3,
471
 	MLX_FILL_1 ( &wqe->data[0], 3,
401
 		     local_address_l, virt_to_bus ( iobuf->data ) );
472
 		     local_address_l, virt_to_bus ( iobuf->data ) );
402
-	MLX_FILL_1 ( &wqe->data[0], 0, byte_count, iob_len ( iobuf ) );
403
-
404
-	DBG ( "Work queue entry:\n" );
405
-	DBG_HD ( wqe, sizeof ( *wqe ) );
406
 
473
 
407
 	/* Update previous work queue entry's "next" field */
474
 	/* Update previous work queue entry's "next" field */
408
 	nds = ( ( offsetof ( typeof ( *wqe ), data ) +
475
 	nds = ( ( offsetof ( typeof ( *wqe ), data ) +
413
 		     f, 1,
480
 		     f, 1,
414
 		     always1, 1 );
481
 		     always1, 1 );
415
 
482
 
416
-	DBG ( "Previous work queue entry's next field:\n" );
417
-	DBG_HD ( &prev_wqe->next, sizeof ( prev_wqe->next ) );
418
-
419
 	/* Update doorbell record */
483
 	/* Update doorbell record */
484
+	barrier();
420
 	db_rec = &arbel->db_rec[arbel_send_wq->doorbell_idx];
485
 	db_rec = &arbel->db_rec[arbel_send_wq->doorbell_idx];
421
 	MLX_FILL_1 ( &db_rec->qp, 0,
486
 	MLX_FILL_1 ( &db_rec->qp, 0,
422
 		     counter, ( ( wq->next_idx + 1 ) & 0xffff ) );
487
 		     counter, ( ( wq->next_idx + 1 ) & 0xffff ) );
423
-	barrier();
424
-	DBG ( "Doorbell record:\n" );
425
-	DBG_HD ( db_rec, 8 );
426
 
488
 
427
 	/* Ring doorbell register */
489
 	/* Ring doorbell register */
428
 	MLX_FILL_4 ( &db_reg.send, 0,
490
 	MLX_FILL_4 ( &db_reg.send, 0,
441
 	return 0;
503
 	return 0;
442
 }
504
 }
443
 
505
 
506
+/**
507
+ * Post receive work queue entry
508
+ *
509
+ * @v ibdev		Infiniband device
510
+ * @v qp		Queue pair
511
+ * @v iobuf		I/O buffer
512
+ * @ret rc		Return status code
513
+ */
514
+static int arbel_post_recv ( struct ib_device *ibdev,
515
+			     struct ib_queue_pair *qp,
516
+			     struct io_buffer *iobuf ) {
517
+	struct arbel *arbel = ibdev->dev_priv;
518
+	struct ib_work_queue *wq = &qp->recv;
519
+	struct arbel_recv_work_queue *arbel_recv_wq = wq->dev_priv;
520
+	struct arbelprm_recv_wqe *wqe;
521
+	union arbelprm_doorbell_record *db_rec;
522
+	unsigned int wqe_idx_mask;
523
+
524
+	/* Allocate work queue entry */
525
+	wqe_idx_mask = ( wq->num_wqes - 1 );
526
+	if ( wq->iobufs[wq->next_idx & wqe_idx_mask] ) {
527
+		DBGC ( arbel, "Arbel %p receive queue full", arbel );
528
+		return -ENOBUFS;
529
+	}
530
+	wq->iobufs[wq->next_idx & wqe_idx_mask] = iobuf;
531
+	wqe = &arbel_recv_wq->wqe[wq->next_idx & wqe_idx_mask].recv;
532
+
533
+	/* Construct work queue entry */
534
+	MLX_FILL_1 ( &wqe->data[0], 0, byte_count, iob_tailroom ( iobuf ) );
535
+	MLX_FILL_1 ( &wqe->data[0], 1, l_key, arbel->reserved_lkey );
536
+	MLX_FILL_1 ( &wqe->data[0], 3,
537
+		     local_address_l, virt_to_bus ( iobuf->data ) );
538
+
539
+	/* Update doorbell record */
540
+	barrier();
541
+	db_rec = &arbel->db_rec[arbel_recv_wq->doorbell_idx];
542
+	MLX_FILL_1 ( &db_rec->qp, 0,
543
+		     counter, ( ( wq->next_idx + 1 ) & 0xffff ) );	
544
+
545
+	/* Update work queue's index */
546
+	wq->next_idx++;
547
+
548
+	return 0;	
549
+}
550
+
444
 /**
551
 /**
445
  * Handle completion
552
  * Handle completion
446
  *
553
  *
458
 			    ib_completer_t complete_recv ) {
565
 			    ib_completer_t complete_recv ) {
459
 	struct arbel *arbel = ibdev->dev_priv;
566
 	struct arbel *arbel = ibdev->dev_priv;
460
 	struct ib_completion completion;
567
 	struct ib_completion completion;
461
-	struct ib_queue_pair *qp;
462
 	struct ib_work_queue *wq;
568
 	struct ib_work_queue *wq;
463
 	struct io_buffer *iobuf;
569
 	struct io_buffer *iobuf;
464
 	struct arbel_send_work_queue *arbel_send_wq;
570
 	struct arbel_send_work_queue *arbel_send_wq;
466
 	ib_completer_t complete;
572
 	ib_completer_t complete;
467
 	unsigned int opcode;
573
 	unsigned int opcode;
468
 	unsigned long qpn;
574
 	unsigned long qpn;
469
-	unsigned int is_send;
575
+	int is_send;
470
 	unsigned long wqe_adr;
576
 	unsigned long wqe_adr;
471
 	unsigned int wqe_idx;
577
 	unsigned int wqe_idx;
472
 	int rc = 0;
578
 	int rc = 0;
489
 		/* Don't return immediately; propagate error to completer */
595
 		/* Don't return immediately; propagate error to completer */
490
 	}
596
 	}
491
 
597
 
492
-	/* Identify queue pair */
493
-	qp = ib_find_qp ( &cq->queue_pairs, qpn );
494
-	if ( ! qp ) {
495
-		DBGC ( arbel, "Arbel %p CQN %lx unknown QPN %lx\n",
496
-		       arbel, cq->cqn, qpn );
598
+	/* Identify work queue */
599
+	wq = ib_find_wq ( cq, qpn, is_send );
600
+	if ( ! wq ) {
601
+		DBGC ( arbel, "Arbel %p CQN %lx unknown %s QPN %lx\n",
602
+		       arbel, cq->cqn, ( is_send ? "send" : "recv" ), qpn );
497
 		return -EIO;
603
 		return -EIO;
498
 	}
604
 	}
499
 
605
 
500
 	/* Identify work queue entry index */
606
 	/* Identify work queue entry index */
501
 	if ( is_send ) {
607
 	if ( is_send ) {
502
-		wq = &qp->send;
503
 		arbel_send_wq = wq->dev_priv;
608
 		arbel_send_wq = wq->dev_priv;
504
 		wqe_idx = ( ( wqe_adr - virt_to_bus ( arbel_send_wq->wqe ) ) /
609
 		wqe_idx = ( ( wqe_adr - virt_to_bus ( arbel_send_wq->wqe ) ) /
505
 			    sizeof ( arbel_send_wq->wqe[0] ) );
610
 			    sizeof ( arbel_send_wq->wqe[0] ) );
506
 	} else {
611
 	} else {
507
-		wq = &qp->recv;
508
 		arbel_recv_wq = wq->dev_priv;
612
 		arbel_recv_wq = wq->dev_priv;
509
 		wqe_idx = ( ( wqe_adr - virt_to_bus ( arbel_recv_wq->wqe ) ) /
613
 		wqe_idx = ( ( wqe_adr - virt_to_bus ( arbel_recv_wq->wqe ) ) /
510
 			    sizeof ( arbel_recv_wq->wqe[0] ) );
614
 			    sizeof ( arbel_recv_wq->wqe[0] ) );
521
 
625
 
522
 	/* Pass off to caller's completion handler */
626
 	/* Pass off to caller's completion handler */
523
 	complete = ( is_send ? complete_send : complete_recv );
627
 	complete = ( is_send ? complete_send : complete_recv );
524
-	complete ( ibdev, qp, &completion, iobuf );
628
+	complete ( ibdev, wq->qp, &completion, iobuf );
525
 
629
 
526
 	return rc;
630
 	return rc;
527
 }			     
631
 }			     
577
 /** Arbel Infiniband operations */
681
 /** Arbel Infiniband operations */
578
 static struct ib_device_operations arbel_ib_operations = {
682
 static struct ib_device_operations arbel_ib_operations = {
579
 	.post_send	= arbel_post_send,
683
 	.post_send	= arbel_post_send,
684
+	.post_recv	= arbel_post_recv,
580
 	.poll_cq	= arbel_poll_cq,
685
 	.poll_cq	= arbel_poll_cq,
581
 };
686
 };
582
 
687
 
636
 	/* Hack up IB structures */
741
 	/* Hack up IB structures */
637
 	static_arbel.uar = memfree_pci_dev.uar;
742
 	static_arbel.uar = memfree_pci_dev.uar;
638
 	static_arbel.db_rec = dev_ib_data.uar_context_base;
743
 	static_arbel.db_rec = dev_ib_data.uar_context_base;
744
+	static_arbel.reserved_lkey = dev_ib_data.mkey;
639
 	static_arbel_ipoib_send_wq.wqe =
745
 	static_arbel_ipoib_send_wq.wqe =
640
 		( ( struct udqp_st * ) qph )->snd_wq;
746
 		( ( struct udqp_st * ) qph )->snd_wq;
747
+	static_arbel_ipoib_recv_wq.wqe =
748
+		( ( struct udqp_st * ) qph )->rcv_wq;
641
 	static_arbel_ipoib_send_cq.cqe =
749
 	static_arbel_ipoib_send_cq.cqe =
642
 		( ( struct cq_st * ) ib_data.ipoib_snd_cq )->cq_buf;
750
 		( ( struct cq_st * ) ib_data.ipoib_snd_cq )->cq_buf;
751
+	static_arbel_ipoib_recv_cq.cqe =
752
+		( ( struct cq_st * ) ib_data.ipoib_rcv_cq )->cq_buf;
643
 	static_ipoib_qp.qpn = ib_get_qpn ( qph );
753
 	static_ipoib_qp.qpn = ib_get_qpn ( qph );
644
 	static_ipoib_qp.priv = netdev;
754
 	static_ipoib_qp.priv = netdev;
645
-	list_add ( &static_ipoib_qp.list,
646
-		   &static_ipoib_send_cq.queue_pairs );
755
+	list_add ( &static_ipoib_qp.send.list,
756
+		   &static_ipoib_send_cq.work_queues );
757
+	list_add ( &static_ipoib_qp.recv.list,
758
+		   &static_ipoib_recv_cq.work_queues );
647
 
759
 
648
 	/* Register network device */
760
 	/* Register network device */
649
 	if ( ( rc = register_netdev ( netdev ) ) != 0 )
761
 	if ( ( rc = register_netdev ( netdev ) ) != 0 )

+ 30
- 6
src/include/gpxe/infiniband.h View File

64
 
64
 
65
 
65
 
66
 struct ib_device;
66
 struct ib_device;
67
+struct ib_queue_pair;
68
+struct ib_completion_queue;
67
 
69
 
68
 /** An Infiniband Work Queue */
70
 /** An Infiniband Work Queue */
69
 struct ib_work_queue {
71
 struct ib_work_queue {
72
+	/** Containing queue pair */
73
+	struct ib_queue_pair *qp;
74
+	/** "Is a send queue" flag */
75
+	int is_send;
76
+	/** Associated completion queue */
77
+	struct ib_completion_queue *cq;
78
+	/** List of work queues on this completion queue */
79
+	struct list_head list;
70
 	/** Number of work queue entries */
80
 	/** Number of work queue entries */
71
 	unsigned int num_wqes;
81
 	unsigned int num_wqes;
72
 	/** Next work queue entry index
82
 	/** Next work queue entry index
85
 
95
 
86
 /** An Infiniband Queue Pair */
96
 /** An Infiniband Queue Pair */
87
 struct ib_queue_pair {
97
 struct ib_queue_pair {
88
-	/** List of queue pairs sharing a completion queue */
89
-	struct list_head list;
90
 	/** Queue Pair Number */
98
 	/** Queue Pair Number */
91
 	unsigned long qpn;
99
 	unsigned long qpn;
92
 	/** Send queue */
100
 	/** Send queue */
113
 	 * array index.
121
 	 * array index.
114
 	 */
122
 	 */
115
 	unsigned long next_idx;
123
 	unsigned long next_idx;
116
-	/** List of associated queue pairs */
117
-	struct list_head queue_pairs;
124
+	/** List of work queues completing to this queue */
125
+	struct list_head work_queues;
118
 	/** Device private data */
126
 	/** Device private data */
119
 	void *dev_priv;
127
 	void *dev_priv;
120
 };
128
 };
183
 			      struct ib_queue_pair *qp,
191
 			      struct ib_queue_pair *qp,
184
 			      struct ib_address_vector *av,
192
 			      struct ib_address_vector *av,
185
 			      struct io_buffer *iobuf );
193
 			      struct io_buffer *iobuf );
194
+	/**
195
+	 * Post receive work queue entry
196
+	 *
197
+	 * @v ibdev		Infiniband device
198
+	 * @v qp		Queue pair
199
+	 * @v iobuf		I/O buffer
200
+	 * @ret rc		Return status code
201
+	 *
202
+	 * If this method returns success, the I/O buffer remains
203
+	 * owned by the queue pair.  If this method returns failure,
204
+	 * the I/O buffer is immediately released; the failure is
205
+	 * interpreted as "failure to enqueue buffer".
206
+	 */
207
+	int ( * post_recv ) ( struct ib_device *ibdev,
208
+			      struct ib_queue_pair *qp,
209
+			      struct io_buffer *iobuf );
186
 	/** Poll completion queue
210
 	/** Poll completion queue
187
 	 *
211
 	 *
188
 	 * @v ibdev		Infiniband device
212
 	 * @v ibdev		Infiniband device
205
 };
229
 };
206
 
230
 
207
 
231
 
208
-extern struct ib_queue_pair * ib_find_qp ( struct list_head *list,
209
-					   unsigned long qpn );
232
+extern struct ib_work_queue * ib_find_wq ( struct ib_completion_queue *cq,
233
+					   unsigned long qpn, int is_send );
210
 
234
 
211
 
235
 
212
 
236
 

+ 10
- 9
src/net/infiniband.c View File

34
  */
34
  */
35
 
35
 
36
 /**
36
 /**
37
- * Find queue pair from a list
37
+ * Find work queue belonging to completion queue
38
  *
38
  *
39
- * @v list		List of queue pairs
39
+ * @v cq		Completion queue
40
  * @v qpn		Queue pair number
40
  * @v qpn		Queue pair number
41
- * @ret qp		Queue pair, or NULL if not found
41
+ * @v is_send		Find send work queue (rather than receive)
42
+ * @ret wq		Work queue, or NULL if not found
42
  */
43
  */
43
-struct ib_queue_pair * ib_find_qp ( struct list_head *list,
44
-				    unsigned long qpn ) {
45
-	struct ib_queue_pair *qp;
44
+struct ib_work_queue * ib_find_wq ( struct ib_completion_queue *cq,
45
+				    unsigned long qpn, int is_send ) {
46
+	struct ib_work_queue *wq;
46
 
47
 
47
-	list_for_each_entry ( qp, list, list ) {
48
-		if ( qp->qpn == qpn )
49
-			return qp;
48
+	list_for_each_entry ( wq, &cq->work_queues, list ) {
49
+		if ( ( wq->qp->qpn == qpn ) && ( wq->is_send == is_send ) )
50
+			return wq;
50
 	}
51
 	}
51
 	return NULL;
52
 	return NULL;
52
 }
53
 }

Loading…
Cancel
Save