|
@@ -25,28 +25,6 @@ Skeleton NIC driver for Etherboot
|
25
|
25
|
|
26
|
26
|
#include "arbel.h"
|
27
|
27
|
|
28
|
|
-struct arbel_send_work_queue {
|
29
|
|
- /** Doorbell record number */
|
30
|
|
- unsigned int doorbell_idx;
|
31
|
|
- /** Work queue entries */
|
32
|
|
- // struct ud_send_wqe_st *wqe;
|
33
|
|
- union ud_send_wqe_u *wqe_u;
|
34
|
|
-};
|
35
|
|
-
|
36
|
|
-struct arbel_completion_queue {
|
37
|
|
- /** Doorbell record number */
|
38
|
|
- unsigned int doorbell_idx;
|
39
|
|
- /** Completion queue entries */
|
40
|
|
- union arbelprm_completion_entry *cqe;
|
41
|
|
-};
|
42
|
|
-
|
43
|
|
-struct arbel {
|
44
|
|
- /** User Access Region */
|
45
|
|
- void *uar;
|
46
|
|
- /** Doorbell records */
|
47
|
|
- union arbelprm_doorbell_record *db_rec;
|
48
|
|
-};
|
49
|
|
-
|
50
|
28
|
|
51
|
29
|
|
52
|
30
|
struct mlx_nic {
|
|
@@ -119,9 +97,10 @@ static int mlx_transmit ( struct net_device *netdev,
|
119
|
97
|
return 0;
|
120
|
98
|
}
|
121
|
99
|
|
122
|
|
-static int arbel_post_send ( struct ib_device *ibdev, struct io_buffer *iobuf,
|
|
100
|
+static int arbel_post_send ( struct ib_device *ibdev,
|
|
101
|
+ struct ib_queue_pair *qp,
|
123
|
102
|
struct ib_address_vector *av,
|
124
|
|
- struct ib_queue_pair *qp );
|
|
103
|
+ struct io_buffer *iobuf );
|
125
|
104
|
|
126
|
105
|
static struct io_buffer *tx_ring[NUM_IPOIB_SND_WQES];
|
127
|
106
|
static int next_tx_idx = 0;
|
|
@@ -137,10 +116,10 @@ static int mlx_transmit_direct ( struct net_device *netdev,
|
137
|
116
|
};
|
138
|
117
|
struct arbel_send_work_queue arbel_send_queue = {
|
139
|
118
|
.doorbell_idx = IPOIB_SND_QP_DB_IDX,
|
140
|
|
- .wqe_u = ( (struct udqp_st *) mlx->ipoib_qph )->snd_wq,
|
|
119
|
+ .wqe = ( (struct udqp_st *) mlx->ipoib_qph )->snd_wq,
|
141
|
120
|
};
|
142
|
121
|
struct ib_device ibdev = {
|
143
|
|
- .priv = &arbel,
|
|
122
|
+ .dev_priv = &arbel,
|
144
|
123
|
};
|
145
|
124
|
struct ib_queue_pair qp = {
|
146
|
125
|
.qpn = ib_get_qpn ( mlx->ipoib_qph ),
|
|
@@ -148,7 +127,7 @@ static int mlx_transmit_direct ( struct net_device *netdev,
|
148
|
127
|
.num_wqes = NUM_IPOIB_SND_WQES,
|
149
|
128
|
.next_idx = next_tx_idx,
|
150
|
129
|
.iobufs = tx_ring,
|
151
|
|
- .priv = &arbel_send_queue,
|
|
130
|
+ .dev_priv = &arbel_send_queue,
|
152
|
131
|
},
|
153
|
132
|
};
|
154
|
133
|
struct ud_av_st *bcast_av = mlx->bcast_av;
|
|
@@ -164,7 +143,7 @@ static int mlx_transmit_direct ( struct net_device *netdev,
|
164
|
143
|
};
|
165
|
144
|
memcpy ( &av.gid, ( ( void * ) bav ) + 16, 16 );
|
166
|
145
|
|
167
|
|
- rc = arbel_post_send ( &ibdev, iobuf, &av, &qp );
|
|
146
|
+ rc = arbel_post_send ( &ibdev, &qp, &av, iobuf );
|
168
|
147
|
|
169
|
148
|
next_tx_idx = qp.send.next_idx;
|
170
|
149
|
|
|
@@ -172,6 +151,75 @@ static int mlx_transmit_direct ( struct net_device *netdev,
|
172
|
151
|
}
|
173
|
152
|
|
174
|
153
|
|
|
154
|
+static void arbel_poll_cq ( struct ib_device *ibdev,
|
|
155
|
+ struct ib_completion_queue *cq,
|
|
156
|
+ ib_completer_t complete_send,
|
|
157
|
+ ib_completer_t complete_recv );
|
|
158
|
+
|
|
159
|
+static void temp_complete_send ( struct ib_device *ibdev __unused,
|
|
160
|
+ struct ib_queue_pair *qp,
|
|
161
|
+ struct ib_completion *completion,
|
|
162
|
+ struct io_buffer *iobuf ) {
|
|
163
|
+ struct net_device *netdev = qp->priv;
|
|
164
|
+
|
|
165
|
+ DBG ( "Wahey! TX completion\n" );
|
|
166
|
+ netdev_tx_complete_err ( netdev, iobuf,
|
|
167
|
+ ( completion->syndrome ? -EIO : 0 ) );
|
|
168
|
+}
|
|
169
|
+
|
|
170
|
+static void temp_complete_recv ( struct ib_device *ibdev __unused,
|
|
171
|
+ struct ib_queue_pair *qp __unused,
|
|
172
|
+ struct ib_completion *completion __unused,
|
|
173
|
+ struct io_buffer *iobuf __unused ) {
|
|
174
|
+ DBG ( "AARGH! recv completion\n" );
|
|
175
|
+}
|
|
176
|
+
|
|
177
|
+static int next_cq_idx = 0;
|
|
178
|
+
|
|
179
|
+static void mlx_poll_cq_direct ( struct net_device *netdev ) {
|
|
180
|
+ struct mlx_nic *mlx = netdev->priv;
|
|
181
|
+
|
|
182
|
+ struct arbel arbel = {
|
|
183
|
+ .uar = memfree_pci_dev.uar,
|
|
184
|
+ .db_rec = dev_ib_data.uar_context_base,
|
|
185
|
+ };
|
|
186
|
+ struct arbel_send_work_queue arbel_send_queue = {
|
|
187
|
+ .doorbell_idx = IPOIB_SND_QP_DB_IDX,
|
|
188
|
+ .wqe = ( ( struct udqp_st * ) mlx->ipoib_qph )->snd_wq,
|
|
189
|
+ };
|
|
190
|
+ struct ib_device ibdev = {
|
|
191
|
+ .dev_priv = &arbel,
|
|
192
|
+ };
|
|
193
|
+ struct ib_queue_pair qp = {
|
|
194
|
+ .qpn = ib_get_qpn ( mlx->ipoib_qph ),
|
|
195
|
+ .send = {
|
|
196
|
+ .num_wqes = NUM_IPOIB_SND_WQES,
|
|
197
|
+ .next_idx = next_tx_idx,
|
|
198
|
+ .iobufs = tx_ring,
|
|
199
|
+ .dev_priv = &arbel_send_queue,
|
|
200
|
+ },
|
|
201
|
+ .priv = netdev,
|
|
202
|
+ };
|
|
203
|
+ struct arbel_completion_queue arbel_cq = {
|
|
204
|
+ .doorbell_idx = IPOIB_SND_CQ_CI_DB_IDX,
|
|
205
|
+ .cqe = ( ( struct cq_st * ) mlx->snd_cqh )->cq_buf,
|
|
206
|
+ };
|
|
207
|
+ struct ib_completion_queue cq = {
|
|
208
|
+ .cqn = 1234,
|
|
209
|
+ .num_cqes = NUM_IPOIB_SND_CQES,
|
|
210
|
+ .next_idx = next_cq_idx,
|
|
211
|
+ .dev_priv = &arbel_cq,
|
|
212
|
+ };
|
|
213
|
+
|
|
214
|
+ INIT_LIST_HEAD ( &cq.queue_pairs );
|
|
215
|
+ INIT_LIST_HEAD ( &qp.list );
|
|
216
|
+ list_add ( &qp.list, &cq.queue_pairs );
|
|
217
|
+
|
|
218
|
+ arbel_poll_cq ( &ibdev, &cq, temp_complete_send, temp_complete_recv );
|
|
219
|
+
|
|
220
|
+ next_cq_idx = cq.next_idx;
|
|
221
|
+}
|
|
222
|
+
|
175
|
223
|
/**
|
176
|
224
|
* Handle TX completion
|
177
|
225
|
*
|
|
@@ -276,7 +324,11 @@ static void mlx_poll ( struct net_device *netdev ) {
|
276
|
324
|
}
|
277
|
325
|
|
278
|
326
|
/* Poll completion queues */
|
|
327
|
+#if 0
|
279
|
328
|
mlx_poll_cq ( netdev, mlx->snd_cqh, mlx_tx_complete );
|
|
329
|
+#else
|
|
330
|
+ mlx_poll_cq_direct ( netdev );
|
|
331
|
+#endif
|
280
|
332
|
mlx_poll_cq ( netdev, mlx->rcv_cqh, mlx_rx_complete );
|
281
|
333
|
}
|
282
|
334
|
|
|
@@ -336,17 +388,18 @@ static void arbel_ring_doorbell ( struct arbel *arbel,
|
336
|
388
|
* Post send work queue entry
|
337
|
389
|
*
|
338
|
390
|
* @v ibdev Infiniband device
|
339
|
|
- * @v iobuf I/O buffer
|
340
|
|
- * @v av Address vector
|
341
|
391
|
* @v qp Queue pair
|
|
392
|
+ * @v av Address vector
|
|
393
|
+ * @v iobuf I/O buffer
|
342
|
394
|
* @ret rc Return status code
|
343
|
395
|
*/
|
344
|
|
-static int arbel_post_send ( struct ib_device *ibdev, struct io_buffer *iobuf,
|
|
396
|
+static int arbel_post_send ( struct ib_device *ibdev,
|
|
397
|
+ struct ib_queue_pair *qp,
|
345
|
398
|
struct ib_address_vector *av,
|
346
|
|
- struct ib_queue_pair *qp ) {
|
347
|
|
- struct arbel *arbel = ibdev->priv;
|
|
399
|
+ struct io_buffer *iobuf ) {
|
|
400
|
+ struct arbel *arbel = ibdev->dev_priv;
|
348
|
401
|
struct ib_work_queue *wq = &qp->send;
|
349
|
|
- struct arbel_send_work_queue *arbel_wq = wq->priv;
|
|
402
|
+ struct arbel_send_work_queue *arbel_send_wq = wq->dev_priv;
|
350
|
403
|
struct arbelprm_ud_send_wqe *prev_wqe;
|
351
|
404
|
struct arbelprm_ud_send_wqe *wqe;
|
352
|
405
|
union arbelprm_doorbell_record *db_rec;
|
|
@@ -358,12 +411,12 @@ static int arbel_post_send ( struct ib_device *ibdev, struct io_buffer *iobuf,
|
358
|
411
|
/* Allocate work queue entry */
|
359
|
412
|
wqe_idx_mask = ( wq->num_wqes - 1 );
|
360
|
413
|
if ( wq->iobufs[wq->next_idx & wqe_idx_mask] ) {
|
361
|
|
- DBGC ( arbel, "ARBEL %p send queue full", arbel );
|
|
414
|
+ DBGC ( arbel, "Arbel %p send queue full", arbel );
|
362
|
415
|
return -ENOBUFS;
|
363
|
416
|
}
|
364
|
417
|
wq->iobufs[wq->next_idx & wqe_idx_mask] = iobuf;
|
365
|
|
- prev_wqe = &arbel_wq->wqe_u[(wq->next_idx - 1) & wqe_idx_mask].wqe_cont.wqe;
|
366
|
|
- wqe = &arbel_wq->wqe_u[wq->next_idx & wqe_idx_mask].wqe_cont.wqe;
|
|
418
|
+ prev_wqe = &arbel_send_wq->wqe[(wq->next_idx - 1) & wqe_idx_mask].ud;
|
|
419
|
+ wqe = &arbel_send_wq->wqe[wq->next_idx & wqe_idx_mask].ud;
|
367
|
420
|
|
368
|
421
|
/* Construct work queue entry */
|
369
|
422
|
MLX_FILL_1 ( &wqe->next, 1, always1, 1 );
|
|
@@ -395,7 +448,7 @@ static int arbel_post_send ( struct ib_device *ibdev, struct io_buffer *iobuf,
|
395
|
448
|
/* Update previous work queue entry's "next" field */
|
396
|
449
|
nds = ( ( offsetof ( typeof ( *wqe ), data ) +
|
397
|
450
|
sizeof ( wqe->data[0] ) ) >> 4 );
|
398
|
|
- MLX_SET ( &prev_wqe->next, nopcode, XDEV_NOPCODE_SEND );
|
|
451
|
+ MLX_SET ( &prev_wqe->next, nopcode, ARBEL_OPCODE_SEND );
|
399
|
452
|
MLX_FILL_3 ( &prev_wqe->next, 1,
|
400
|
453
|
nds, nds,
|
401
|
454
|
f, 1,
|
|
@@ -405,7 +458,7 @@ static int arbel_post_send ( struct ib_device *ibdev, struct io_buffer *iobuf,
|
405
|
458
|
DBG_HD ( &prev_wqe->next, sizeof ( prev_wqe->next ) );
|
406
|
459
|
|
407
|
460
|
/* Update doorbell record */
|
408
|
|
- db_rec = &arbel->db_rec[arbel_wq->doorbell_idx];
|
|
461
|
+ db_rec = &arbel->db_rec[arbel_send_wq->doorbell_idx];
|
409
|
462
|
MLX_FILL_1 ( &db_rec->qp, 0,
|
410
|
463
|
counter, ( ( wq->next_idx + 1 ) & 0xffff ) );
|
411
|
464
|
barrier();
|
|
@@ -414,7 +467,7 @@ static int arbel_post_send ( struct ib_device *ibdev, struct io_buffer *iobuf,
|
414
|
467
|
|
415
|
468
|
/* Ring doorbell register */
|
416
|
469
|
MLX_FILL_4 ( &db_reg.send, 0,
|
417
|
|
- nopcode, XDEV_NOPCODE_SEND,
|
|
470
|
+ nopcode, ARBEL_OPCODE_SEND,
|
418
|
471
|
f, 1,
|
419
|
472
|
wqe_counter, ( wq->next_idx & 0xffff ),
|
420
|
473
|
wqe_cnt, 1 );
|
|
@@ -429,50 +482,126 @@ static int arbel_post_send ( struct ib_device *ibdev, struct io_buffer *iobuf,
|
429
|
482
|
return 0;
|
430
|
483
|
}
|
431
|
484
|
|
432
|
|
-static void arbel_parse_completion ( struct arbel *arbel,
|
433
|
|
- union arbelprm_completion_entry *cqe,
|
434
|
|
- struct ib_completion *completion ) {
|
435
|
|
- memset ( completion, 0, sizeof ( *completion ) );
|
436
|
|
- completion->is_send = MLX_GET ( &cqe->normal, s );
|
437
|
|
- completion->len = MLX_GET ( &cqe->normal, byte_cnt );
|
438
|
|
-}
|
|
485
|
+/**
|
|
486
|
+ * Handle completion
|
|
487
|
+ *
|
|
488
|
+ * @v ibdev Infiniband device
|
|
489
|
+ * @v cq Completion queue
|
|
490
|
+ * @v cqe Hardware completion queue entry
|
|
491
|
+ * @v complete_send Send completion handler
|
|
492
|
+ * @v complete_recv Receive completion handler
|
|
493
|
+ * @ret rc Return status code
|
|
494
|
+ */
|
|
495
|
+static int arbel_complete ( struct ib_device *ibdev,
|
|
496
|
+ struct ib_completion_queue *cq,
|
|
497
|
+ union arbelprm_completion_entry *cqe,
|
|
498
|
+ ib_completer_t complete_send,
|
|
499
|
+ ib_completer_t complete_recv ) {
|
|
500
|
+ struct arbel *arbel = ibdev->dev_priv;
|
|
501
|
+ struct ib_completion completion;
|
|
502
|
+ struct ib_queue_pair *qp;
|
|
503
|
+ struct ib_work_queue *wq;
|
|
504
|
+ struct io_buffer *iobuf;
|
|
505
|
+ struct arbel_send_work_queue *arbel_send_wq;
|
|
506
|
+ struct arbel_recv_work_queue *arbel_recv_wq;
|
|
507
|
+ ib_completer_t complete;
|
|
508
|
+ unsigned int opcode;
|
|
509
|
+ unsigned long qpn;
|
|
510
|
+ unsigned int is_send;
|
|
511
|
+ unsigned long wqe_adr;
|
|
512
|
+ unsigned int wqe_idx;
|
|
513
|
+ int rc = 0;
|
|
514
|
+
|
|
515
|
+ /* Parse completion */
|
|
516
|
+ memset ( &completion, 0, sizeof ( completion ) );
|
|
517
|
+ completion.len = MLX_GET ( &cqe->normal, byte_cnt );
|
|
518
|
+ qpn = MLX_GET ( &cqe->normal, my_qpn );
|
|
519
|
+ is_send = MLX_GET ( &cqe->normal, s );
|
|
520
|
+ wqe_adr = ( MLX_GET ( &cqe->normal, wqe_adr ) << 6 );
|
|
521
|
+ opcode = MLX_GET ( &cqe->normal, opcode );
|
|
522
|
+ if ( opcode >= ARBEL_OPCODE_RECV_ERROR ) {
|
|
523
|
+ /* "s" field is not valid for error opcodes */
|
|
524
|
+ is_send = ( opcode == ARBEL_OPCODE_SEND_ERROR );
|
|
525
|
+ completion.syndrome = MLX_GET ( &cqe->error, syndrome );
|
|
526
|
+ DBGC ( arbel, "Arbel %p CPN %lx syndrome %x vendor %lx\n",
|
|
527
|
+ arbel, cq->cqn, completion.syndrome,
|
|
528
|
+ MLX_GET ( &cqe->error, vendor_code ) );
|
|
529
|
+ rc = -EIO;
|
|
530
|
+ /* Don't return immediately; propagate error to completer */
|
|
531
|
+ }
|
|
532
|
+
|
|
533
|
+ /* Identify queue pair */
|
|
534
|
+ qp = ib_find_qp ( &cq->queue_pairs, qpn );
|
|
535
|
+ if ( ! qp ) {
|
|
536
|
+ DBGC ( arbel, "Arbel %p CQN %lx unknown QPN %lx\n",
|
|
537
|
+ arbel, cq->cqn, qpn );
|
|
538
|
+ return -EIO;
|
|
539
|
+ }
|
|
540
|
+
|
|
541
|
+ /* Identify work queue entry index */
|
|
542
|
+ if ( is_send ) {
|
|
543
|
+ wq = &qp->send;
|
|
544
|
+ arbel_send_wq = wq->dev_priv;
|
|
545
|
+ wqe_idx = ( ( wqe_adr - virt_to_bus ( arbel_send_wq->wqe ) ) /
|
|
546
|
+ sizeof ( arbel_send_wq->wqe[0] ) );
|
|
547
|
+ } else {
|
|
548
|
+ wq = &qp->recv;
|
|
549
|
+ arbel_recv_wq = wq->dev_priv;
|
|
550
|
+ wqe_idx = ( ( wqe_adr - virt_to_bus ( arbel_recv_wq->wqe ) ) /
|
|
551
|
+ sizeof ( arbel_recv_wq->wqe[0] ) );
|
|
552
|
+ }
|
|
553
|
+
|
|
554
|
+ /* Identify I/O buffer */
|
|
555
|
+ iobuf = wq->iobufs[wqe_idx];
|
|
556
|
+ if ( ! iobuf ) {
|
|
557
|
+ DBGC ( arbel, "Arbel %p CQN %lx QPN %lx empty WQE %x\n",
|
|
558
|
+ arbel, cq->cqn, qpn, wqe_idx );
|
|
559
|
+ return -EIO;
|
|
560
|
+ }
|
|
561
|
+ wq->iobufs[wqe_idx] = NULL;
|
|
562
|
+
|
|
563
|
+ /* Pass off to caller's completion handler */
|
|
564
|
+ complete = ( is_send ? complete_send : complete_recv );
|
|
565
|
+ complete ( ibdev, qp, &completion, iobuf );
|
|
566
|
+
|
|
567
|
+ return rc;
|
|
568
|
+}
|
439
|
569
|
|
440
|
570
|
/**
|
441
|
571
|
* Poll completion queue
|
442
|
572
|
*
|
443
|
573
|
* @v ibdev Infiniband device
|
444
|
574
|
* @v cq Completion queue
|
445
|
|
- * @v complete Completion handler
|
|
575
|
+ * @v complete_send Send completion handler
|
|
576
|
+ * @v complete_recv Receive completion handler
|
446
|
577
|
*/
|
447
|
578
|
static void arbel_poll_cq ( struct ib_device *ibdev,
|
448
|
579
|
struct ib_completion_queue *cq,
|
449
|
580
|
ib_completer_t complete_send,
|
450
|
581
|
ib_completer_t complete_recv ) {
|
451
|
|
- struct arbel *arbel = ibdev->priv;
|
452
|
|
- struct arbel_completion_queue *arbel_cq = cq->priv;
|
453
|
|
- unsigned int cqe_idx_mask = ( cq->num_cqes - 1 );
|
|
582
|
+ struct arbel *arbel = ibdev->dev_priv;
|
|
583
|
+ struct arbel_completion_queue *arbel_cq = cq->dev_priv;
|
454
|
584
|
union arbelprm_doorbell_record *db_rec;
|
455
|
585
|
union arbelprm_completion_entry *cqe;
|
456
|
|
- struct ib_completion completion;
|
457
|
|
- struct io_buffer *iobuf;
|
458
|
|
- int is_send;
|
|
586
|
+ unsigned int cqe_idx_mask;
|
|
587
|
+ int rc;
|
459
|
588
|
|
460
|
589
|
while ( 1 ) {
|
461
|
590
|
/* Look for completion entry */
|
|
591
|
+ cqe_idx_mask = ( cq->num_cqes - 1 );
|
462
|
592
|
cqe = &arbel_cq->cqe[cq->next_idx & cqe_idx_mask];
|
463
|
593
|
if ( MLX_GET ( &cqe->normal, owner ) != 0 ) {
|
464
|
594
|
/* Entry still owned by hardware; end of poll */
|
465
|
595
|
break;
|
466
|
596
|
}
|
467
|
597
|
|
468
|
|
- /* Parse completion */
|
469
|
|
-
|
470
|
|
-
|
471
|
|
-
|
472
|
598
|
/* Handle completion */
|
473
|
|
- ( is_send ? complete_send : complete_recv ) ( ibdev,
|
474
|
|
- &completion,
|
475
|
|
- iobuf );
|
|
599
|
+ if ( ( rc = arbel_complete ( ibdev, cq, cqe, complete_send,
|
|
600
|
+ complete_recv ) ) != 0 ) {
|
|
601
|
+ DBGC ( arbel, "Arbel %p failed to complete: %s\n",
|
|
602
|
+ arbel, strerror ( rc ) );
|
|
603
|
+ DBGC_HD ( arbel, cqe, sizeof ( *cqe ) );
|
|
604
|
+ }
|
476
|
605
|
|
477
|
606
|
/* Return ownership to hardware */
|
478
|
607
|
MLX_FILL_1 ( &cqe->normal, 7, owner, 1 );
|