Преглед изворни кода

[arbel] Allocate space for GRH on UD queue pairs

As with the previous commit (for Hermon), allocate a separate ring
buffer to hold received GRHs.

Signed-off-by: Michael Brown <mcb30@ipxe.org>
tags/v1.20.1
Michael Brown пре 8 година
родитељ
комит
57c63047e3
2 измењених фајлова са 55 додато и 16 уклоњено
  1. 50
    15
      src/drivers/infiniband/arbel.c
  2. 5
    1
      src/drivers/infiniband/arbel.h

+ 50
- 15
src/drivers/infiniband/arbel.c Прегледај датотеку

@@ -897,26 +897,44 @@ static int arbel_create_send_wq ( struct arbel_send_work_queue *arbel_send_wq,
897 897
  *
898 898
  * @v arbel_recv_wq	Receive work queue
899 899
  * @v num_wqes		Number of work queue entries
900
+ * @v type		Queue pair type
900 901
  * @ret rc		Return status code
901 902
  */
902 903
 static int arbel_create_recv_wq ( struct arbel_recv_work_queue *arbel_recv_wq,
903
-				  unsigned int num_wqes ) {
904
+				  unsigned int num_wqes,
905
+				  enum ib_queue_pair_type type ) {
904 906
 	struct arbelprm_recv_wqe *wqe;
905 907
 	struct arbelprm_recv_wqe *next_wqe;
906 908
 	unsigned int wqe_idx_mask;
907 909
 	size_t nds;
908 910
 	unsigned int i;
909 911
 	unsigned int j;
912
+	int rc;
910 913
 
911 914
 	/* Allocate work queue */
912 915
 	arbel_recv_wq->wqe_size = ( num_wqes *
913 916
 				    sizeof ( arbel_recv_wq->wqe[0] ) );
914 917
 	arbel_recv_wq->wqe = malloc_dma ( arbel_recv_wq->wqe_size,
915 918
 					  sizeof ( arbel_recv_wq->wqe[0] ) );
916
-	if ( ! arbel_recv_wq->wqe )
917
-		return -ENOMEM;
919
+	if ( ! arbel_recv_wq->wqe ) {
920
+		rc = -ENOMEM;
921
+		goto err_alloc_wqe;
922
+	}
918 923
 	memset ( arbel_recv_wq->wqe, 0, arbel_recv_wq->wqe_size );
919 924
 
925
+	/* Allocate GRH entries, if needed */
926
+	if ( ( type == IB_QPT_SMI ) || ( type == IB_QPT_GSI ) ||
927
+	     ( type == IB_QPT_UD ) ) {
928
+		arbel_recv_wq->grh_size = ( num_wqes *
929
+					    sizeof ( arbel_recv_wq->grh[0] ) );
930
+		arbel_recv_wq->grh = malloc_dma ( arbel_recv_wq->grh_size,
931
+						  sizeof ( void * ) );
932
+		if ( ! arbel_recv_wq->grh ) {
933
+			rc = -ENOMEM;
934
+			goto err_alloc_grh;
935
+		}
936
+	}
937
+
920 938
 	/* Link work queue entries */
921 939
 	wqe_idx_mask = ( num_wqes - 1 );
922 940
 	nds = ( ( offsetof ( typeof ( *wqe ), data ) +
@@ -935,6 +953,12 @@ static int arbel_create_recv_wq ( struct arbel_recv_work_queue *arbel_recv_wq,
935 953
 	}
936 954
 	
937 955
 	return 0;
956
+
957
+	free_dma ( arbel_recv_wq->grh, arbel_recv_wq->grh_size );
958
+ err_alloc_grh:
959
+	free_dma ( arbel_recv_wq->wqe, arbel_recv_wq->wqe_size );
960
+ err_alloc_wqe:
961
+	return rc;
938 962
 }
939 963
 
940 964
 /**
@@ -985,8 +1009,8 @@ static int arbel_create_qp ( struct ib_device *ibdev,
985 1009
 	if ( ( rc = arbel_create_send_wq ( &arbel_qp->send,
986 1010
 					   qp->send.num_wqes ) ) != 0 )
987 1011
 		goto err_create_send_wq;
988
-	if ( ( rc = arbel_create_recv_wq ( &arbel_qp->recv,
989
-					   qp->recv.num_wqes ) ) != 0 )
1012
+	if ( ( rc = arbel_create_recv_wq ( &arbel_qp->recv, qp->recv.num_wqes,
1013
+					   qp->type ) ) != 0 )
990 1014
 		goto err_create_recv_wq;
991 1015
 
992 1016
 	/* Send and receive work queue entries must be within the same 4GB */
@@ -1078,6 +1102,7 @@ static int arbel_create_qp ( struct ib_device *ibdev,
1078 1102
 	MLX_FILL_1 ( send_db_rec, 1, res, ARBEL_UAR_RES_NONE );
1079 1103
 	MLX_FILL_1 ( recv_db_rec, 1, res, ARBEL_UAR_RES_NONE );
1080 1104
  err_unsupported_address_split:
1105
+	free_dma ( arbel_qp->recv.grh, arbel_qp->recv.grh_size );
1081 1106
 	free_dma ( arbel_qp->recv.wqe, arbel_qp->recv.wqe_size );
1082 1107
  err_create_recv_wq:
1083 1108
 	free_dma ( arbel_qp->send.wqe, arbel_qp->send.wqe_size );
@@ -1206,8 +1231,9 @@ static void arbel_destroy_qp ( struct ib_device *ibdev,
1206 1231
 	MLX_FILL_1 ( recv_db_rec, 1, res, ARBEL_UAR_RES_NONE );
1207 1232
 
1208 1233
 	/* Free memory */
1209
-	free_dma ( arbel_qp->send.wqe, arbel_qp->send.wqe_size );
1234
+	free_dma ( arbel_qp->recv.grh, arbel_qp->recv.grh_size );
1210 1235
 	free_dma ( arbel_qp->recv.wqe, arbel_qp->recv.wqe_size );
1236
+	free_dma ( arbel_qp->send.wqe, arbel_qp->send.wqe_size );
1211 1237
 	free ( arbel_qp );
1212 1238
 
1213 1239
 	/* Mark queue number as free */
@@ -1477,6 +1503,8 @@ static int arbel_post_recv ( struct ib_device *ibdev,
1477 1503
 	struct ib_work_queue *wq = &qp->recv;
1478 1504
 	struct arbel_recv_work_queue *arbel_recv_wq = &arbel_qp->recv;
1479 1505
 	struct arbelprm_recv_wqe *wqe;
1506
+	struct arbelprm_wqe_segment_data_ptr *data;
1507
+	struct ib_global_route_header *grh;
1480 1508
 	union arbelprm_doorbell_record *db_rec;
1481 1509
 	unsigned int wqe_idx_mask;
1482 1510
 
@@ -1491,12 +1519,19 @@ static int arbel_post_recv ( struct ib_device *ibdev,
1491 1519
 	wqe = &arbel_recv_wq->wqe[wq->next_idx & wqe_idx_mask].recv;
1492 1520
 
1493 1521
 	/* Construct work queue entry */
1494
-	MLX_FILL_1 ( &wqe->data[0], 0, byte_count, iob_tailroom ( iobuf ) );
1495
-	MLX_FILL_1 ( &wqe->data[0], 1, l_key, arbel->lkey );
1496
-	MLX_FILL_H ( &wqe->data[0], 2,
1497
-		     local_address_h, virt_to_bus ( iobuf->data ) );
1498
-	MLX_FILL_1 ( &wqe->data[0], 3,
1499
-		     local_address_l, virt_to_bus ( iobuf->data ) );
1522
+	data = &wqe->data[0];
1523
+	if ( arbel_recv_wq->grh ) {
1524
+		grh = &arbel_recv_wq->grh[wq->next_idx & wqe_idx_mask];
1525
+		MLX_FILL_1 ( data, 0, byte_count, sizeof ( *grh ) );
1526
+		MLX_FILL_1 ( data, 1, l_key, arbel->lkey );
1527
+		MLX_FILL_H ( data, 2, local_address_h, virt_to_bus ( grh ) );
1528
+		MLX_FILL_1 ( data, 3, local_address_l, virt_to_bus ( grh ) );
1529
+		data++;
1530
+	}
1531
+	MLX_FILL_1 ( data, 0, byte_count, iob_tailroom ( iobuf ) );
1532
+	MLX_FILL_1 ( data, 1, l_key, arbel->lkey );
1533
+	MLX_FILL_H ( data, 2, local_address_h, virt_to_bus ( iobuf->data ) );
1534
+	MLX_FILL_1 ( data, 3, local_address_l, virt_to_bus ( iobuf->data ) );
1500 1535
 
1501 1536
 	/* Update doorbell record */
1502 1537
 	barrier();
@@ -1619,9 +1654,9 @@ static int arbel_complete ( struct ib_device *ibdev,
1619 1654
 		case IB_QPT_SMI:
1620 1655
 		case IB_QPT_GSI:
1621 1656
 		case IB_QPT_UD:
1622
-			assert ( iob_len ( iobuf ) >= sizeof ( *grh ) );
1623
-			grh = iobuf->data;
1624
-			iob_pull ( iobuf, sizeof ( *grh ) );
1657
+			/* Locate corresponding GRH */
1658
+			assert ( arbel_recv_wq->grh != NULL );
1659
+			grh = &arbel_recv_wq->grh[wqe_idx];
1625 1660
 			/* Construct address vector */
1626 1661
 			source = &recv_source;
1627 1662
 			memset ( source, 0, sizeof ( *source ) );

+ 5
- 1
src/drivers/infiniband/arbel.h Прегледај датотеку

@@ -237,7 +237,7 @@ struct arbelprm_rc_send_wqe {
237 237
 	struct arbelprm_wqe_segment_data_ptr data[ARBEL_MAX_GATHER];
238 238
 } __attribute__ (( packed ));
239 239
 
240
-#define ARBEL_MAX_SCATTER 1
240
+#define ARBEL_MAX_SCATTER 2
241 241
 
242 242
 struct arbelprm_recv_wqe {
243 243
 	/* The autogenerated header is inconsistent between send and
@@ -369,6 +369,10 @@ struct arbel_recv_work_queue {
369 369
 	union arbel_recv_wqe *wqe;
370 370
 	/** Size of work queue */
371 371
 	size_t wqe_size;
372
+	/** GRH buffers (if applicable) */
373
+	struct ib_global_route_header *grh;
374
+	/** Size of GRB buffers */
375
+	size_t grh_size;
372 376
 };
373 377
 
374 378
 /** Number of special queue pairs */

Loading…
Откажи
Сачувај