|
@@ -54,44 +54,42 @@ FILE_LICENCE ( GPL2_OR_LATER );
|
54
|
54
|
*/
|
55
|
55
|
|
56
|
56
|
/**
|
57
|
|
- * Allocate queue number
|
|
57
|
+ * Allocate offset within usage bitmask
|
58
|
58
|
*
|
59
|
|
- * @v q_inuse Queue usage bitmask
|
60
|
|
- * @v max_inuse Maximum number of in-use queues
|
61
|
|
- * @ret qn_offset Free queue number offset, or negative error
|
|
59
|
+ * @v bits Usage bitmask
|
|
60
|
+ * @v bits_len Length of usage bitmask
|
|
61
|
+ * @ret bit First free bit within bitmask, or negative error
|
62
|
62
|
*/
|
63
|
|
-static int arbel_alloc_qn_offset ( arbel_bitmask_t *q_inuse,
|
64
|
|
- unsigned int max_inuse ) {
|
65
|
|
- unsigned int qn_offset = 0;
|
|
63
|
+static int arbel_bitmask_alloc ( arbel_bitmask_t *bits,
|
|
64
|
+ unsigned int bits_len ) {
|
|
65
|
+ unsigned int bit = 0;
|
66
|
66
|
arbel_bitmask_t mask = 1;
|
67
|
67
|
|
68
|
|
- while ( qn_offset < max_inuse ) {
|
69
|
|
- if ( ( mask & *q_inuse ) == 0 ) {
|
70
|
|
- *q_inuse |= mask;
|
71
|
|
- return qn_offset;
|
72
|
|
- }
|
73
|
|
- qn_offset++;
|
74
|
|
- mask <<= 1;
|
75
|
|
- if ( ! mask ) {
|
76
|
|
- mask = 1;
|
77
|
|
- q_inuse++;
|
|
68
|
+ while ( bit < bits_len ) {
|
|
69
|
+ if ( ( mask & *bits ) == 0 ) {
|
|
70
|
+ *bits |= mask;
|
|
71
|
+ return bit;
|
78
|
72
|
}
|
|
73
|
+ bit++;
|
|
74
|
+ mask = ( mask << 1 ) | ( mask >> ( 8 * sizeof ( mask ) - 1 ) );
|
|
75
|
+ if ( mask == 1 )
|
|
76
|
+ bits++;
|
79
|
77
|
}
|
80
|
78
|
return -ENFILE;
|
81
|
79
|
}
|
82
|
80
|
|
83
|
81
|
/**
|
84
|
|
- * Free queue number
|
|
82
|
+ * Free offset within usage bitmask
|
85
|
83
|
*
|
86
|
|
- * @v q_inuse Queue usage bitmask
|
87
|
|
- * @v qn_offset Queue number offset
|
|
84
|
+ * @v bits Usage bitmask
|
|
85
|
+ * @v bit Bit within bitmask
|
88
|
86
|
*/
|
89
|
|
-static void arbel_free_qn_offset ( arbel_bitmask_t *q_inuse, int qn_offset ) {
|
|
87
|
+static void arbel_bitmask_free ( arbel_bitmask_t *bits, int bit ) {
|
90
|
88
|
arbel_bitmask_t mask;
|
91
|
89
|
|
92
|
|
- mask = ( 1 << ( qn_offset % ( 8 * sizeof ( mask ) ) ) );
|
93
|
|
- q_inuse += ( qn_offset / ( 8 * sizeof ( mask ) ) );
|
94
|
|
- *q_inuse &= ~mask;
|
|
90
|
+ mask = ( 1 << ( bit % ( 8 * sizeof ( mask ) ) ) );
|
|
91
|
+ bits += ( bit / ( 8 * sizeof ( mask ) ) );
|
|
92
|
+ *bits &= ~mask;
|
95
|
93
|
}
|
96
|
94
|
|
97
|
95
|
/***************************************************************************
|
|
@@ -367,6 +365,14 @@ arbel_cmd_2rst_qpee ( struct arbel *arbel, unsigned long qpn ) {
|
367
|
365
|
0x03, NULL, qpn, NULL );
|
368
|
366
|
}
|
369
|
367
|
|
|
368
|
+static inline int
|
|
369
|
+arbel_cmd_conf_special_qp ( struct arbel *arbel, unsigned int qp_type,
|
|
370
|
+ unsigned long base_qpn ) {
|
|
371
|
+ return arbel_cmd ( arbel,
|
|
372
|
+ ARBEL_HCR_VOID_CMD ( ARBEL_HCR_CONF_SPECIAL_QP ),
|
|
373
|
+ qp_type, NULL, base_qpn, NULL );
|
|
374
|
+}
|
|
375
|
+
|
370
|
376
|
static inline int
|
371
|
377
|
arbel_cmd_mad_ifc ( struct arbel *arbel, unsigned int port,
|
372
|
378
|
union arbelprm_mad *mad ) {
|
|
@@ -556,7 +562,7 @@ static int arbel_create_cq ( struct ib_device *ibdev,
|
556
|
562
|
int rc;
|
557
|
563
|
|
558
|
564
|
/* Find a free completion queue number */
|
559
|
|
- cqn_offset = arbel_alloc_qn_offset ( arbel->cq_inuse, ARBEL_MAX_CQS );
|
|
565
|
+ cqn_offset = arbel_bitmask_alloc ( arbel->cq_inuse, ARBEL_MAX_CQS );
|
560
|
566
|
if ( cqn_offset < 0 ) {
|
561
|
567
|
DBGC ( arbel, "Arbel %p out of completion queues\n", arbel );
|
562
|
568
|
rc = cqn_offset;
|
|
@@ -570,8 +576,8 @@ static int arbel_create_cq ( struct ib_device *ibdev,
|
570
|
576
|
rc = -ENOMEM;
|
571
|
577
|
goto err_arbel_cq;
|
572
|
578
|
}
|
573
|
|
- arbel_cq->ci_doorbell_idx = arbel_cq_ci_doorbell_idx ( cqn_offset );
|
574
|
|
- arbel_cq->arm_doorbell_idx = arbel_cq_arm_doorbell_idx ( cqn_offset );
|
|
579
|
+ arbel_cq->ci_doorbell_idx = arbel_cq_ci_doorbell_idx ( arbel, cq );
|
|
580
|
+ arbel_cq->arm_doorbell_idx = arbel_cq_arm_doorbell_idx ( arbel, cq );
|
575
|
581
|
|
576
|
582
|
/* Allocate completion queue itself */
|
577
|
583
|
arbel_cq->cqe_size = ( cq->num_cqes * sizeof ( arbel_cq->cqe[0] ) );
|
|
@@ -634,7 +640,7 @@ static int arbel_create_cq ( struct ib_device *ibdev,
|
634
|
640
|
err_cqe:
|
635
|
641
|
free ( arbel_cq );
|
636
|
642
|
err_arbel_cq:
|
637
|
|
- arbel_free_qn_offset ( arbel->cq_inuse, cqn_offset );
|
|
643
|
+ arbel_bitmask_free ( arbel->cq_inuse, cqn_offset );
|
638
|
644
|
err_cqn_offset:
|
639
|
645
|
return rc;
|
640
|
646
|
}
|
|
@@ -675,7 +681,7 @@ static void arbel_destroy_cq ( struct ib_device *ibdev,
|
675
|
681
|
|
676
|
682
|
/* Mark queue number as free */
|
677
|
683
|
cqn_offset = ( cq->cqn - arbel->limits.reserved_cqs );
|
678
|
|
- arbel_free_qn_offset ( arbel->cq_inuse, cqn_offset );
|
|
684
|
+ arbel_bitmask_free ( arbel->cq_inuse, cqn_offset );
|
679
|
685
|
|
680
|
686
|
ib_cq_set_drvdata ( cq, NULL );
|
681
|
687
|
}
|
|
@@ -687,6 +693,63 @@ static void arbel_destroy_cq ( struct ib_device *ibdev,
|
687
|
693
|
***************************************************************************
|
688
|
694
|
*/
|
689
|
695
|
|
|
696
|
+/**
|
|
697
|
+ * Assign queue pair number
|
|
698
|
+ *
|
|
699
|
+ * @v ibdev Infiniband device
|
|
700
|
+ * @v qp Queue pair
|
|
701
|
+ * @ret rc Return status code
|
|
702
|
+ */
|
|
703
|
+static int arbel_alloc_qpn ( struct ib_device *ibdev,
|
|
704
|
+ struct ib_queue_pair *qp ) {
|
|
705
|
+ struct arbel *arbel = ib_get_drvdata ( ibdev );
|
|
706
|
+ unsigned int port_offset;
|
|
707
|
+ int qpn_offset;
|
|
708
|
+
|
|
709
|
+ /* Calculate queue pair number */
|
|
710
|
+ port_offset = ( ibdev->port - ARBEL_PORT_BASE );
|
|
711
|
+
|
|
712
|
+ switch ( qp->type ) {
|
|
713
|
+ case IB_QPT_SMI:
|
|
714
|
+ qp->qpn = ( arbel->special_qpn_base + port_offset );
|
|
715
|
+ return 0;
|
|
716
|
+ case IB_QPT_GSI:
|
|
717
|
+ qp->qpn = ( arbel->special_qpn_base + 2 + port_offset );
|
|
718
|
+ return 0;
|
|
719
|
+ case IB_QPT_UD:
|
|
720
|
+ /* Find a free queue pair number */
|
|
721
|
+ qpn_offset = arbel_bitmask_alloc ( arbel->qp_inuse,
|
|
722
|
+ ARBEL_MAX_QPS );
|
|
723
|
+ if ( qpn_offset < 0 ) {
|
|
724
|
+ DBGC ( arbel, "Arbel %p out of queue pairs\n",
|
|
725
|
+ arbel );
|
|
726
|
+ return qpn_offset;
|
|
727
|
+ }
|
|
728
|
+ qp->qpn = ( arbel->qpn_base + qpn_offset );
|
|
729
|
+ return 0;
|
|
730
|
+ default:
|
|
731
|
+ DBGC ( arbel, "Arbel %p unsupported QP type %d\n",
|
|
732
|
+ arbel, qp->type );
|
|
733
|
+ return -ENOTSUP;
|
|
734
|
+ }
|
|
735
|
+}
|
|
736
|
+
|
|
737
|
+/**
|
|
738
|
+ * Free queue pair number
|
|
739
|
+ *
|
|
740
|
+ * @v ibdev Infiniband device
|
|
741
|
+ * @v qp Queue pair
|
|
742
|
+ */
|
|
743
|
+static void arbel_free_qpn ( struct ib_device *ibdev,
|
|
744
|
+ struct ib_queue_pair *qp ) {
|
|
745
|
+ struct arbel *arbel = ib_get_drvdata ( ibdev );
|
|
746
|
+ int qpn_offset;
|
|
747
|
+
|
|
748
|
+ qpn_offset = ( qp->qpn - arbel->qpn_base );
|
|
749
|
+ if ( qpn_offset >= 0 )
|
|
750
|
+ arbel_bitmask_free ( arbel->qp_inuse, qpn_offset );
|
|
751
|
+}
|
|
752
|
+
|
690
|
753
|
/**
|
691
|
754
|
* Create send work queue
|
692
|
755
|
*
|
|
@@ -717,6 +780,7 @@ static int arbel_create_send_wq ( struct arbel_send_work_queue *arbel_send_wq,
|
717
|
780
|
next_wqe = &arbel_send_wq->wqe[ ( i + 1 ) & wqe_idx_mask ].ud;
|
718
|
781
|
MLX_FILL_1 ( &wqe->next, 0, nda_31_6,
|
719
|
782
|
( virt_to_bus ( next_wqe ) >> 6 ) );
|
|
783
|
+ MLX_FILL_1 ( &wqe->next, 1, always1, 1 );
|
720
|
784
|
}
|
721
|
785
|
|
722
|
786
|
return 0;
|
|
@@ -781,17 +845,11 @@ static int arbel_create_qp ( struct ib_device *ibdev,
|
781
|
845
|
struct arbelprm_qp_ee_state_transitions qpctx;
|
782
|
846
|
struct arbelprm_qp_db_record *send_db_rec;
|
783
|
847
|
struct arbelprm_qp_db_record *recv_db_rec;
|
784
|
|
- int qpn_offset;
|
785
|
848
|
int rc;
|
786
|
849
|
|
787
|
|
- /* Find a free queue pair number */
|
788
|
|
- qpn_offset = arbel_alloc_qn_offset ( arbel->qp_inuse, ARBEL_MAX_QPS );
|
789
|
|
- if ( qpn_offset < 0 ) {
|
790
|
|
- DBGC ( arbel, "Arbel %p out of queue pairs\n", arbel );
|
791
|
|
- rc = qpn_offset;
|
792
|
|
- goto err_qpn_offset;
|
793
|
|
- }
|
794
|
|
- qp->qpn = ( ARBEL_QPN_BASE + arbel->limits.reserved_qps + qpn_offset );
|
|
850
|
+ /* Calculate queue pair number */
|
|
851
|
+ if ( ( rc = arbel_alloc_qpn ( ibdev, qp ) ) != 0 )
|
|
852
|
+ goto err_alloc_qpn;
|
795
|
853
|
|
796
|
854
|
/* Allocate control structures */
|
797
|
855
|
arbel_qp = zalloc ( sizeof ( *arbel_qp ) );
|
|
@@ -799,8 +857,8 @@ static int arbel_create_qp ( struct ib_device *ibdev,
|
799
|
857
|
rc = -ENOMEM;
|
800
|
858
|
goto err_arbel_qp;
|
801
|
859
|
}
|
802
|
|
- arbel_qp->send.doorbell_idx = arbel_send_doorbell_idx ( qpn_offset );
|
803
|
|
- arbel_qp->recv.doorbell_idx = arbel_recv_doorbell_idx ( qpn_offset );
|
|
860
|
+ arbel_qp->send.doorbell_idx = arbel_send_doorbell_idx ( arbel, qp );
|
|
861
|
+ arbel_qp->recv.doorbell_idx = arbel_recv_doorbell_idx ( arbel, qp );
|
804
|
862
|
|
805
|
863
|
/* Create send and receive work queues */
|
806
|
864
|
if ( ( rc = arbel_create_send_wq ( &arbel_qp->send,
|
|
@@ -827,7 +885,9 @@ static int arbel_create_qp ( struct ib_device *ibdev,
|
827
|
885
|
MLX_FILL_3 ( &qpctx, 2,
|
828
|
886
|
qpc_eec_data.de, 1,
|
829
|
887
|
qpc_eec_data.pm_state, 0x03 /* Always 0x03 for UD */,
|
830
|
|
- qpc_eec_data.st, ARBEL_ST_UD );
|
|
888
|
+ qpc_eec_data.st,
|
|
889
|
+ ( ( qp->type == IB_QPT_UD ) ?
|
|
890
|
+ ARBEL_ST_UD : ARBEL_ST_MLX ) );
|
831
|
891
|
MLX_FILL_6 ( &qpctx, 4,
|
832
|
892
|
qpc_eec_data.mtu, ARBEL_MTU_2048,
|
833
|
893
|
qpc_eec_data.msg_max, 11 /* 2^11 = 2048 */,
|
|
@@ -897,8 +957,8 @@ static int arbel_create_qp ( struct ib_device *ibdev,
|
897
|
957
|
err_create_send_wq:
|
898
|
958
|
free ( arbel_qp );
|
899
|
959
|
err_arbel_qp:
|
900
|
|
- arbel_free_qn_offset ( arbel->qp_inuse, qpn_offset );
|
901
|
|
- err_qpn_offset:
|
|
960
|
+ arbel_free_qpn ( ibdev, qp );
|
|
961
|
+ err_alloc_qpn:
|
902
|
962
|
return rc;
|
903
|
963
|
}
|
904
|
964
|
|
|
@@ -940,7 +1000,6 @@ static void arbel_destroy_qp ( struct ib_device *ibdev,
|
940
|
1000
|
struct arbel_queue_pair *arbel_qp = ib_qp_get_drvdata ( qp );
|
941
|
1001
|
struct arbelprm_qp_db_record *send_db_rec;
|
942
|
1002
|
struct arbelprm_qp_db_record *recv_db_rec;
|
943
|
|
- int qpn_offset;
|
944
|
1003
|
int rc;
|
945
|
1004
|
|
946
|
1005
|
/* Take ownership back from hardware */
|
|
@@ -963,8 +1022,7 @@ static void arbel_destroy_qp ( struct ib_device *ibdev,
|
963
|
1022
|
free ( arbel_qp );
|
964
|
1023
|
|
965
|
1024
|
/* Mark queue number as free */
|
966
|
|
- qpn_offset = ( qp->qpn - ARBEL_QPN_BASE - arbel->limits.reserved_qps );
|
967
|
|
- arbel_free_qn_offset ( arbel->qp_inuse, qpn_offset );
|
|
1025
|
+ arbel_free_qpn ( ibdev, qp );
|
968
|
1026
|
|
969
|
1027
|
ib_qp_set_drvdata ( qp, NULL );
|
970
|
1028
|
}
|
|
@@ -1002,6 +1060,109 @@ static const union ib_gid arbel_no_gid = {
|
1002
|
1060
|
.bytes = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0 },
|
1003
|
1061
|
};
|
1004
|
1062
|
|
|
1063
|
+/**
|
|
1064
|
+ * Construct UD send work queue entry
|
|
1065
|
+ *
|
|
1066
|
+ * @v ibdev Infiniband device
|
|
1067
|
+ * @v qp Queue pair
|
|
1068
|
+ * @v av Address vector
|
|
1069
|
+ * @v iobuf I/O buffer
|
|
1070
|
+ * @v wqe Send work queue entry
|
|
1071
|
+ * @ret nds Work queue entry size
|
|
1072
|
+ */
|
|
1073
|
+static size_t arbel_fill_ud_send_wqe ( struct ib_device *ibdev,
|
|
1074
|
+ struct ib_queue_pair *qp __unused,
|
|
1075
|
+ struct ib_address_vector *av,
|
|
1076
|
+ struct io_buffer *iobuf,
|
|
1077
|
+ union arbel_send_wqe *wqe ) {
|
|
1078
|
+ struct arbel *arbel = ib_get_drvdata ( ibdev );
|
|
1079
|
+ const union ib_gid *gid;
|
|
1080
|
+
|
|
1081
|
+ /* Construct this work queue entry */
|
|
1082
|
+ MLX_FILL_1 ( &wqe->ud.ctrl, 0, always1, 1 );
|
|
1083
|
+ MLX_FILL_2 ( &wqe->ud.ud, 0,
|
|
1084
|
+ ud_address_vector.pd, ARBEL_GLOBAL_PD,
|
|
1085
|
+ ud_address_vector.port_number, ibdev->port );
|
|
1086
|
+ MLX_FILL_2 ( &wqe->ud.ud, 1,
|
|
1087
|
+ ud_address_vector.rlid, av->lid,
|
|
1088
|
+ ud_address_vector.g, av->gid_present );
|
|
1089
|
+ MLX_FILL_2 ( &wqe->ud.ud, 2,
|
|
1090
|
+ ud_address_vector.max_stat_rate,
|
|
1091
|
+ ( ( av->rate >= 3 ) ? 0 : 1 ),
|
|
1092
|
+ ud_address_vector.msg, 3 );
|
|
1093
|
+ MLX_FILL_1 ( &wqe->ud.ud, 3, ud_address_vector.sl, av->sl );
|
|
1094
|
+ gid = ( av->gid_present ? &av->gid : &arbel_no_gid );
|
|
1095
|
+ memcpy ( &wqe->ud.ud.u.dwords[4], gid, sizeof ( *gid ) );
|
|
1096
|
+ MLX_FILL_1 ( &wqe->ud.ud, 8, destination_qp, av->qpn );
|
|
1097
|
+ MLX_FILL_1 ( &wqe->ud.ud, 9, q_key, av->qkey );
|
|
1098
|
+ MLX_FILL_1 ( &wqe->ud.data[0], 0, byte_count, iob_len ( iobuf ) );
|
|
1099
|
+ MLX_FILL_1 ( &wqe->ud.data[0], 1, l_key, arbel->reserved_lkey );
|
|
1100
|
+ MLX_FILL_1 ( &wqe->ud.data[0], 3,
|
|
1101
|
+ local_address_l, virt_to_bus ( iobuf->data ) );
|
|
1102
|
+
|
|
1103
|
+ return ( offsetof ( typeof ( wqe->ud ), data[1] ) >> 4 );
|
|
1104
|
+}
|
|
1105
|
+
|
|
1106
|
+/**
|
|
1107
|
+ * Construct MLX send work queue entry
|
|
1108
|
+ *
|
|
1109
|
+ * @v ibdev Infiniband device
|
|
1110
|
+ * @v qp Queue pair
|
|
1111
|
+ * @v av Address vector
|
|
1112
|
+ * @v iobuf I/O buffer
|
|
1113
|
+ * @v wqe Send work queue entry
|
|
1114
|
+ * @v next Previous work queue entry's "next" field
|
|
1115
|
+ * @ret nds Work queue entry size
|
|
1116
|
+ */
|
|
1117
|
+static size_t arbel_fill_mlx_send_wqe ( struct ib_device *ibdev,
|
|
1118
|
+ struct ib_queue_pair *qp,
|
|
1119
|
+ struct ib_address_vector *av,
|
|
1120
|
+ struct io_buffer *iobuf,
|
|
1121
|
+ union arbel_send_wqe *wqe ) {
|
|
1122
|
+ struct arbel *arbel = ib_get_drvdata ( ibdev );
|
|
1123
|
+ struct io_buffer headers;
|
|
1124
|
+
|
|
1125
|
+ /* Construct IB headers */
|
|
1126
|
+ iob_populate ( &headers, &wqe->mlx.headers, 0,
|
|
1127
|
+ sizeof ( wqe->mlx.headers ) );
|
|
1128
|
+ iob_reserve ( &headers, sizeof ( wqe->mlx.headers ) );
|
|
1129
|
+ ib_push ( ibdev, &headers, qp, iob_len ( iobuf ), av );
|
|
1130
|
+
|
|
1131
|
+ /* Construct this work queue entry */
|
|
1132
|
+ MLX_FILL_5 ( &wqe->mlx.ctrl, 0,
|
|
1133
|
+ c, 1 /* generate completion */,
|
|
1134
|
+ icrc, 0 /* generate ICRC */,
|
|
1135
|
+ max_statrate, ( ( ( av->rate < 2 ) || ( av->rate > 10 ) )
|
|
1136
|
+ ? 8 : ( av->rate + 5 ) ),
|
|
1137
|
+ slr, 0,
|
|
1138
|
+ v15, ( ( qp->ext_qpn == IB_QPN_SMI ) ? 1 : 0 ) );
|
|
1139
|
+ MLX_FILL_1 ( &wqe->mlx.ctrl, 1, rlid, av->lid );
|
|
1140
|
+ MLX_FILL_1 ( &wqe->mlx.data[0], 0,
|
|
1141
|
+ byte_count, iob_len ( &headers ) );
|
|
1142
|
+ MLX_FILL_1 ( &wqe->mlx.data[0], 1, l_key, arbel->reserved_lkey );
|
|
1143
|
+ MLX_FILL_1 ( &wqe->mlx.data[0], 3,
|
|
1144
|
+ local_address_l, virt_to_bus ( headers.data ) );
|
|
1145
|
+ MLX_FILL_1 ( &wqe->mlx.data[1], 0,
|
|
1146
|
+ byte_count, ( iob_len ( iobuf ) + 4 /* ICRC */ ) );
|
|
1147
|
+ MLX_FILL_1 ( &wqe->mlx.data[1], 1, l_key, arbel->reserved_lkey );
|
|
1148
|
+ MLX_FILL_1 ( &wqe->mlx.data[1], 3,
|
|
1149
|
+ local_address_l, virt_to_bus ( iobuf->data ) );
|
|
1150
|
+
|
|
1151
|
+ return ( offsetof ( typeof ( wqe->mlx ), data[2] ) >> 4 );
|
|
1152
|
+}
|
|
1153
|
+
|
|
1154
|
+/** Work queue entry constructors */
|
|
1155
|
+static size_t
|
|
1156
|
+( * arbel_fill_send_wqe[] ) ( struct ib_device *ibdev,
|
|
1157
|
+ struct ib_queue_pair *qp,
|
|
1158
|
+ struct ib_address_vector *av,
|
|
1159
|
+ struct io_buffer *iobuf,
|
|
1160
|
+ union arbel_send_wqe *wqe ) = {
|
|
1161
|
+ [IB_QPT_SMI] = arbel_fill_mlx_send_wqe,
|
|
1162
|
+ [IB_QPT_GSI] = arbel_fill_mlx_send_wqe,
|
|
1163
|
+ [IB_QPT_UD] = arbel_fill_ud_send_wqe,
|
|
1164
|
+};
|
|
1165
|
+
|
1005
|
1166
|
/**
|
1006
|
1167
|
* Post send work queue entry
|
1007
|
1168
|
*
|
|
@@ -1019,11 +1180,10 @@ static int arbel_post_send ( struct ib_device *ibdev,
|
1019
|
1180
|
struct arbel_queue_pair *arbel_qp = ib_qp_get_drvdata ( qp );
|
1020
|
1181
|
struct ib_work_queue *wq = &qp->send;
|
1021
|
1182
|
struct arbel_send_work_queue *arbel_send_wq = &arbel_qp->send;
|
1022
|
|
- struct arbelprm_ud_send_wqe *prev_wqe;
|
1023
|
|
- struct arbelprm_ud_send_wqe *wqe;
|
|
1183
|
+ union arbel_send_wqe *prev_wqe;
|
|
1184
|
+ union arbel_send_wqe *wqe;
|
1024
|
1185
|
struct arbelprm_qp_db_record *qp_db_rec;
|
1025
|
1186
|
union arbelprm_doorbell_register db_reg;
|
1026
|
|
- const union ib_gid *gid;
|
1027
|
1187
|
unsigned int wqe_idx_mask;
|
1028
|
1188
|
size_t nds;
|
1029
|
1189
|
|
|
@@ -1034,41 +1194,22 @@ static int arbel_post_send ( struct ib_device *ibdev,
|
1034
|
1194
|
return -ENOBUFS;
|
1035
|
1195
|
}
|
1036
|
1196
|
wq->iobufs[wq->next_idx & wqe_idx_mask] = iobuf;
|
1037
|
|
- prev_wqe = &arbel_send_wq->wqe[(wq->next_idx - 1) & wqe_idx_mask].ud;
|
1038
|
|
- wqe = &arbel_send_wq->wqe[wq->next_idx & wqe_idx_mask].ud;
|
|
1197
|
+ prev_wqe = &arbel_send_wq->wqe[(wq->next_idx - 1) & wqe_idx_mask];
|
|
1198
|
+ wqe = &arbel_send_wq->wqe[wq->next_idx & wqe_idx_mask];
|
1039
|
1199
|
|
1040
|
1200
|
/* Construct work queue entry */
|
1041
|
|
- MLX_FILL_1 ( &wqe->next, 1, always1, 1 );
|
1042
|
|
- memset ( &wqe->ctrl, 0, sizeof ( wqe->ctrl ) );
|
1043
|
|
- MLX_FILL_1 ( &wqe->ctrl, 0, always1, 1 );
|
1044
|
|
- memset ( &wqe->ud, 0, sizeof ( wqe->ud ) );
|
1045
|
|
- MLX_FILL_2 ( &wqe->ud, 0,
|
1046
|
|
- ud_address_vector.pd, ARBEL_GLOBAL_PD,
|
1047
|
|
- ud_address_vector.port_number, ibdev->port );
|
1048
|
|
- MLX_FILL_2 ( &wqe->ud, 1,
|
1049
|
|
- ud_address_vector.rlid, av->lid,
|
1050
|
|
- ud_address_vector.g, av->gid_present );
|
1051
|
|
- MLX_FILL_2 ( &wqe->ud, 2,
|
1052
|
|
- ud_address_vector.max_stat_rate,
|
1053
|
|
- ( ( av->rate >= 3 ) ? 0 : 1 ),
|
1054
|
|
- ud_address_vector.msg, 3 );
|
1055
|
|
- MLX_FILL_1 ( &wqe->ud, 3, ud_address_vector.sl, av->sl );
|
1056
|
|
- gid = ( av->gid_present ? &av->gid : &arbel_no_gid );
|
1057
|
|
- memcpy ( &wqe->ud.u.dwords[4], gid, sizeof ( *gid ) );
|
1058
|
|
- MLX_FILL_1 ( &wqe->ud, 8, destination_qp, av->qpn );
|
1059
|
|
- MLX_FILL_1 ( &wqe->ud, 9, q_key, av->qkey );
|
1060
|
|
- MLX_FILL_1 ( &wqe->data[0], 0, byte_count, iob_len ( iobuf ) );
|
1061
|
|
- MLX_FILL_1 ( &wqe->data[0], 1, l_key, arbel->reserved_lkey );
|
1062
|
|
- MLX_FILL_1 ( &wqe->data[0], 3,
|
1063
|
|
- local_address_l, virt_to_bus ( iobuf->data ) );
|
|
1201
|
+ memset ( ( ( ( void * ) wqe ) + sizeof ( wqe->next ) ), 0,
|
|
1202
|
+ ( sizeof ( *wqe ) - sizeof ( wqe->next ) ) );
|
|
1203
|
+ assert ( qp->type < ( sizeof ( arbel_fill_send_wqe ) /
|
|
1204
|
+ sizeof ( arbel_fill_send_wqe[0] ) ) );
|
|
1205
|
+ assert ( arbel_fill_send_wqe[qp->type] != NULL );
|
|
1206
|
+ nds = arbel_fill_send_wqe[qp->type] ( ibdev, qp, av, iobuf, wqe );
|
1064
|
1207
|
|
1065
|
1208
|
/* Update previous work queue entry's "next" field */
|
1066
|
|
- nds = ( ( offsetof ( typeof ( *wqe ), data ) +
|
1067
|
|
- sizeof ( wqe->data[0] ) ) >> 4 );
|
1068
|
1209
|
MLX_SET ( &prev_wqe->next, nopcode, ARBEL_OPCODE_SEND );
|
1069
|
1210
|
MLX_FILL_3 ( &prev_wqe->next, 1,
|
1070
|
1211
|
nds, nds,
|
1071
|
|
- f, 1,
|
|
1212
|
+ f, 0,
|
1072
|
1213
|
always1, 1 );
|
1073
|
1214
|
|
1074
|
1215
|
/* Update doorbell record */
|
|
@@ -1211,7 +1352,7 @@ static int arbel_complete ( struct ib_device *ibdev,
|
1211
|
1352
|
iobuf = wq->iobufs[wqe_idx];
|
1212
|
1353
|
if ( ! iobuf ) {
|
1213
|
1354
|
DBGC ( arbel, "Arbel %p CQN %lx QPN %lx empty WQE %x\n",
|
1214
|
|
- arbel, cq->cqn, qpn, wqe_idx );
|
|
1355
|
+ arbel, cq->cqn, qp->qpn, wqe_idx );
|
1215
|
1356
|
return -EIO;
|
1216
|
1357
|
}
|
1217
|
1358
|
wq->iobufs[wqe_idx] = NULL;
|
|
@@ -1553,6 +1694,27 @@ static void arbel_close ( struct ib_device *ibdev ) {
|
1553
|
1694
|
}
|
1554
|
1695
|
}
|
1555
|
1696
|
|
|
1697
|
+/**
|
|
1698
|
+ * Set port information
|
|
1699
|
+ *
|
|
1700
|
+ * @v ibdev Infiniband device
|
|
1701
|
+ * @v mad Set port information MAD
|
|
1702
|
+ * @ret rc Return status code
|
|
1703
|
+ */
|
|
1704
|
+static int arbel_set_port_info ( struct ib_device *ibdev,
|
|
1705
|
+ union ib_mad *mad ) {
|
|
1706
|
+ int rc;
|
|
1707
|
+
|
|
1708
|
+ /* Send the MAD to the embedded SMA */
|
|
1709
|
+ if ( ( rc = arbel_mad ( ibdev, mad ) ) != 0 )
|
|
1710
|
+ return rc;
|
|
1711
|
+
|
|
1712
|
+ /* Update parameters held in software */
|
|
1713
|
+ ib_smc_update ( ibdev, arbel_mad );
|
|
1714
|
+
|
|
1715
|
+ return 0;
|
|
1716
|
+}
|
|
1717
|
+
|
1556
|
1718
|
/***************************************************************************
|
1557
|
1719
|
*
|
1558
|
1720
|
* Multicast group operations
|
|
@@ -1664,6 +1826,7 @@ static struct ib_device_operations arbel_ib_operations = {
|
1664
|
1826
|
.close = arbel_close,
|
1665
|
1827
|
.mcast_attach = arbel_mcast_attach,
|
1666
|
1828
|
.mcast_detach = arbel_mcast_detach,
|
|
1829
|
+ .set_port_info = arbel_set_port_info,
|
1667
|
1830
|
};
|
1668
|
1831
|
|
1669
|
1832
|
/***************************************************************************
|
|
@@ -1862,7 +2025,8 @@ static int arbel_alloc_icm ( struct arbel *arbel,
|
1862
|
2025
|
icm_offset = ( ( arbel->limits.reserved_uars + 1 ) << 12 );
|
1863
|
2026
|
|
1864
|
2027
|
/* Queue pair contexts */
|
1865
|
|
- log_num_qps = fls ( arbel->limits.reserved_qps + ARBEL_MAX_QPS - 1 );
|
|
2028
|
+ log_num_qps = fls ( arbel->limits.reserved_qps +
|
|
2029
|
+ ARBEL_RSVD_SPECIAL_QPS + ARBEL_MAX_QPS - 1 );
|
1866
|
2030
|
MLX_FILL_2 ( init_hca, 13,
|
1867
|
2031
|
qpc_eec_cqc_eqc_rdb_parameters.qpc_base_addr_l,
|
1868
|
2032
|
( icm_offset >> 7 ),
|
|
@@ -2085,7 +2249,44 @@ static int arbel_setup_mpt ( struct arbel *arbel ) {
|
2085
|
2249
|
|
2086
|
2250
|
return 0;
|
2087
|
2251
|
}
|
2088
|
|
-
|
|
2252
|
+
|
|
2253
|
+/**
|
|
2254
|
+ * Configure special queue pairs
|
|
2255
|
+ *
|
|
2256
|
+ * @v arbel Arbel device
|
|
2257
|
+ * @ret rc Return status code
|
|
2258
|
+ */
|
|
2259
|
+static int arbel_configure_special_qps ( struct arbel *arbel ) {
|
|
2260
|
+ unsigned int smi_qpn_base;
|
|
2261
|
+ unsigned int gsi_qpn_base;
|
|
2262
|
+ int rc;
|
|
2263
|
+
|
|
2264
|
+ /* Special QP block must be aligned on an even number */
|
|
2265
|
+ arbel->special_qpn_base = ( ( arbel->limits.reserved_qps + 1 ) & ~1 );
|
|
2266
|
+ arbel->qpn_base = ( arbel->special_qpn_base +
|
|
2267
|
+ ARBEL_NUM_SPECIAL_QPS );
|
|
2268
|
+ DBGC ( arbel, "Arbel %p special QPs at [%lx,%lx]\n", arbel,
|
|
2269
|
+ arbel->special_qpn_base, ( arbel->qpn_base - 1 ) );
|
|
2270
|
+ smi_qpn_base = arbel->special_qpn_base;
|
|
2271
|
+ gsi_qpn_base = ( smi_qpn_base + 2 );
|
|
2272
|
+
|
|
2273
|
+ /* Issue commands to configure special QPs */
|
|
2274
|
+ if ( ( rc = arbel_cmd_conf_special_qp ( arbel, 0,
|
|
2275
|
+ smi_qpn_base ) ) != 0 ) {
|
|
2276
|
+ DBGC ( arbel, "Arbel %p could not configure SMI QPs: %s\n",
|
|
2277
|
+ arbel, strerror ( rc ) );
|
|
2278
|
+ return rc;
|
|
2279
|
+ }
|
|
2280
|
+ if ( ( rc = arbel_cmd_conf_special_qp ( arbel, 1,
|
|
2281
|
+ gsi_qpn_base ) ) != 0 ) {
|
|
2282
|
+ DBGC ( arbel, "Arbel %p could not configure GSI QPs: %s\n",
|
|
2283
|
+ arbel, strerror ( rc ) );
|
|
2284
|
+ return rc;
|
|
2285
|
+ }
|
|
2286
|
+
|
|
2287
|
+ return 0;
|
|
2288
|
+}
|
|
2289
|
+
|
2089
|
2290
|
/**
|
2090
|
2291
|
* Probe PCI device
|
2091
|
2292
|
*
|
|
@@ -2174,6 +2375,10 @@ static int arbel_probe ( struct pci_device *pci,
|
2174
|
2375
|
if ( ( rc = arbel_create_eq ( arbel ) ) != 0 )
|
2175
|
2376
|
goto err_create_eq;
|
2176
|
2377
|
|
|
2378
|
+ /* Configure special QPs */
|
|
2379
|
+ if ( ( rc = arbel_configure_special_qps ( arbel ) ) != 0 )
|
|
2380
|
+ goto err_conf_special_qps;
|
|
2381
|
+
|
2177
|
2382
|
/* Initialise parameters using SMC */
|
2178
|
2383
|
for ( i = 0 ; i < ARBEL_NUM_PORTS ; i++ )
|
2179
|
2384
|
ib_smc_init ( arbel->ibdev[i], arbel_mad );
|
|
@@ -2193,6 +2398,7 @@ static int arbel_probe ( struct pci_device *pci,
|
2193
|
2398
|
err_register_ibdev:
|
2194
|
2399
|
for ( i-- ; i >= 0 ; i-- )
|
2195
|
2400
|
unregister_ibdev ( arbel->ibdev[i] );
|
|
2401
|
+ err_conf_special_qps:
|
2196
|
2402
|
arbel_destroy_eq ( arbel );
|
2197
|
2403
|
err_create_eq:
|
2198
|
2404
|
err_setup_mpt:
|