|
@@ -1111,6 +1111,8 @@ static int hermon_create_qp ( struct ib_device *ibdev,
|
1111
|
1111
|
struct hermon *hermon = ib_get_drvdata ( ibdev );
|
1112
|
1112
|
struct hermon_queue_pair *hermon_qp;
|
1113
|
1113
|
struct hermonprm_qp_ee_state_transitions qpctx;
|
|
1114
|
+ struct hermonprm_wqe_segment_data_ptr *data;
|
|
1115
|
+ unsigned int i;
|
1114
|
1116
|
int rc;
|
1115
|
1117
|
|
1116
|
1118
|
/* Calculate queue pair number */
|
|
@@ -1147,8 +1149,14 @@ static int hermon_create_qp ( struct ib_device *ibdev,
|
1147
|
1149
|
sizeof ( hermon_qp->send.wqe[0] ) );
|
1148
|
1150
|
hermon_qp->recv.wqe_size = ( qp->recv.num_wqes *
|
1149
|
1151
|
sizeof ( hermon_qp->recv.wqe[0] ) );
|
|
1152
|
+ if ( ( qp->type == IB_QPT_SMI ) || ( qp->type == IB_QPT_GSI ) ||
|
|
1153
|
+ ( qp->type == IB_QPT_UD ) ) {
|
|
1154
|
+ hermon_qp->recv.grh_size = ( qp->recv.num_wqes *
|
|
1155
|
+ sizeof ( hermon_qp->recv.grh[0] ));
|
|
1156
|
+ }
|
1150
|
1157
|
hermon_qp->wqe_size = ( hermon_qp->send.wqe_size +
|
1151
|
|
- hermon_qp->recv.wqe_size );
|
|
1158
|
+ hermon_qp->recv.wqe_size +
|
|
1159
|
+ hermon_qp->recv.grh_size );
|
1152
|
1160
|
hermon_qp->wqe = malloc_dma ( hermon_qp->wqe_size,
|
1153
|
1161
|
sizeof ( hermon_qp->send.wqe[0] ) );
|
1154
|
1162
|
if ( ! hermon_qp->wqe ) {
|
|
@@ -1156,9 +1164,21 @@ static int hermon_create_qp ( struct ib_device *ibdev,
|
1156
|
1164
|
goto err_alloc_wqe;
|
1157
|
1165
|
}
|
1158
|
1166
|
hermon_qp->send.wqe = hermon_qp->wqe;
|
1159
|
|
- memset ( hermon_qp->send.wqe, 0xff, hermon_qp->send.wqe_size );
|
1160
|
1167
|
hermon_qp->recv.wqe = ( hermon_qp->wqe + hermon_qp->send.wqe_size );
|
|
1168
|
+ if ( hermon_qp->recv.grh_size ) {
|
|
1169
|
+ hermon_qp->recv.grh = ( hermon_qp->wqe +
|
|
1170
|
+ hermon_qp->send.wqe_size +
|
|
1171
|
+ hermon_qp->recv.wqe_size );
|
|
1172
|
+ }
|
|
1173
|
+
|
|
1174
|
+ /* Initialise work queue entries */
|
|
1175
|
+ memset ( hermon_qp->send.wqe, 0xff, hermon_qp->send.wqe_size );
|
1161
|
1176
|
memset ( hermon_qp->recv.wqe, 0, hermon_qp->recv.wqe_size );
|
|
1177
|
+ data = &hermon_qp->recv.wqe[0].recv.data[0];
|
|
1178
|
+ for ( i = 0 ; i < ( hermon_qp->recv.wqe_size / sizeof ( *data ) ); i++){
|
|
1179
|
+ MLX_FILL_1 ( data, 1, l_key, HERMON_INVALID_LKEY );
|
|
1180
|
+ data++;
|
|
1181
|
+ }
|
1162
|
1182
|
|
1163
|
1183
|
/* Allocate MTT entries */
|
1164
|
1184
|
if ( ( rc = hermon_alloc_mtt ( hermon, hermon_qp->wqe,
|
|
@@ -1633,6 +1653,8 @@ static int hermon_post_recv ( struct ib_device *ibdev,
|
1633
|
1653
|
struct ib_work_queue *wq = &qp->recv;
|
1634
|
1654
|
struct hermon_recv_work_queue *hermon_recv_wq = &hermon_qp->recv;
|
1635
|
1655
|
struct hermonprm_recv_wqe *wqe;
|
|
1656
|
+ struct hermonprm_wqe_segment_data_ptr *data;
|
|
1657
|
+ struct ib_global_route_header *grh;
|
1636
|
1658
|
unsigned int wqe_idx_mask;
|
1637
|
1659
|
|
1638
|
1660
|
/* Allocate work queue entry */
|
|
@@ -1646,12 +1668,19 @@ static int hermon_post_recv ( struct ib_device *ibdev,
|
1646
|
1668
|
wqe = &hermon_recv_wq->wqe[wq->next_idx & wqe_idx_mask].recv;
|
1647
|
1669
|
|
1648
|
1670
|
/* Construct work queue entry */
|
1649
|
|
- MLX_FILL_1 ( &wqe->data[0], 0, byte_count, iob_tailroom ( iobuf ) );
|
1650
|
|
- MLX_FILL_1 ( &wqe->data[0], 1, l_key, hermon->lkey );
|
1651
|
|
- MLX_FILL_H ( &wqe->data[0], 2,
|
1652
|
|
- local_address_h, virt_to_bus ( iobuf->data ) );
|
1653
|
|
- MLX_FILL_1 ( &wqe->data[0], 3,
|
1654
|
|
- local_address_l, virt_to_bus ( iobuf->data ) );
|
|
1671
|
+ data = &wqe->data[0];
|
|
1672
|
+ if ( hermon_qp->recv.grh ) {
|
|
1673
|
+ grh = &hermon_qp->recv.grh[wq->next_idx & wqe_idx_mask];
|
|
1674
|
+ MLX_FILL_1 ( data, 0, byte_count, sizeof ( *grh ) );
|
|
1675
|
+ MLX_FILL_1 ( data, 1, l_key, hermon->lkey );
|
|
1676
|
+ MLX_FILL_H ( data, 2, local_address_h, virt_to_bus ( grh ) );
|
|
1677
|
+ MLX_FILL_1 ( data, 3, local_address_l, virt_to_bus ( grh ) );
|
|
1678
|
+ data++;
|
|
1679
|
+ }
|
|
1680
|
+ MLX_FILL_1 ( data, 0, byte_count, iob_tailroom ( iobuf ) );
|
|
1681
|
+ MLX_FILL_1 ( data, 1, l_key, hermon->lkey );
|
|
1682
|
+ MLX_FILL_H ( data, 2, local_address_h, virt_to_bus ( iobuf->data ) );
|
|
1683
|
+ MLX_FILL_1 ( data, 3, local_address_l, virt_to_bus ( iobuf->data ) );
|
1655
|
1684
|
|
1656
|
1685
|
/* Update work queue's index */
|
1657
|
1686
|
wq->next_idx++;
|
|
@@ -1676,6 +1705,7 @@ static int hermon_complete ( struct ib_device *ibdev,
|
1676
|
1705
|
struct ib_completion_queue *cq,
|
1677
|
1706
|
union hermonprm_completion_entry *cqe ) {
|
1678
|
1707
|
struct hermon *hermon = ib_get_drvdata ( ibdev );
|
|
1708
|
+ struct hermon_queue_pair *hermon_qp;
|
1679
|
1709
|
struct ib_work_queue *wq;
|
1680
|
1710
|
struct ib_queue_pair *qp;
|
1681
|
1711
|
struct io_buffer *iobuf;
|
|
@@ -1713,6 +1743,7 @@ static int hermon_complete ( struct ib_device *ibdev,
|
1713
|
1743
|
return -EIO;
|
1714
|
1744
|
}
|
1715
|
1745
|
qp = wq->qp;
|
|
1746
|
+ hermon_qp = ib_qp_get_drvdata ( qp );
|
1716
|
1747
|
|
1717
|
1748
|
/* Identify work queue entry */
|
1718
|
1749
|
wqe_idx = MLX_GET ( &cqe->normal, wqe_counter );
|
|
@@ -1747,9 +1778,9 @@ static int hermon_complete ( struct ib_device *ibdev,
|
1747
|
1778
|
case IB_QPT_SMI:
|
1748
|
1779
|
case IB_QPT_GSI:
|
1749
|
1780
|
case IB_QPT_UD:
|
1750
|
|
- assert ( iob_len ( iobuf ) >= sizeof ( *grh ) );
|
1751
|
|
- grh = iobuf->data;
|
1752
|
|
- iob_pull ( iobuf, sizeof ( *grh ) );
|
|
1781
|
+ /* Locate corresponding GRH */
|
|
1782
|
+ assert ( hermon_qp->recv.grh != NULL );
|
|
1783
|
+ grh = &hermon_qp->recv.grh[ wqe_idx & wqe_idx_mask ];
|
1753
|
1784
|
/* Construct address vector */
|
1754
|
1785
|
source = &recv_source;
|
1755
|
1786
|
source->qpn = MLX_GET ( &cqe->normal, srq_rqpn );
|