|
@@ -466,7 +466,7 @@ arbel_cmd_hw2sw_cq ( struct arbel *arbel, unsigned long cqn ) {
|
466
|
466
|
|
467
|
467
|
static inline int
|
468
|
468
|
arbel_cmd_rst2init_qpee ( struct arbel *arbel, unsigned long qpn,
|
469
|
|
- struct arbelprm_queue_pair_ee_context_entry *ctx ) {
|
|
469
|
+ const struct arbelprm_qp_ee_state_transitions *ctx ){
|
470
|
470
|
return arbel_cmd ( arbel,
|
471
|
471
|
ARBEL_HCR_IN_CMD ( ARBEL_HCR_RST2INIT_QPEE,
|
472
|
472
|
1, sizeof ( *ctx ) ),
|
|
@@ -475,7 +475,7 @@ arbel_cmd_rst2init_qpee ( struct arbel *arbel, unsigned long qpn,
|
475
|
475
|
|
476
|
476
|
static inline int
|
477
|
477
|
arbel_cmd_init2rtr_qpee ( struct arbel *arbel, unsigned long qpn,
|
478
|
|
- struct arbelprm_queue_pair_ee_context_entry *ctx ) {
|
|
478
|
+ const struct arbelprm_qp_ee_state_transitions *ctx ){
|
479
|
479
|
return arbel_cmd ( arbel,
|
480
|
480
|
ARBEL_HCR_IN_CMD ( ARBEL_HCR_INIT2RTR_QPEE,
|
481
|
481
|
1, sizeof ( *ctx ) ),
|
|
@@ -484,7 +484,7 @@ arbel_cmd_init2rtr_qpee ( struct arbel *arbel, unsigned long qpn,
|
484
|
484
|
|
485
|
485
|
static inline int
|
486
|
486
|
arbel_cmd_rtr2rts_qpee ( struct arbel *arbel, unsigned long qpn,
|
487
|
|
- struct arbelprm_queue_pair_ee_context_entry *ctx ) {
|
|
487
|
+ const struct arbelprm_qp_ee_state_transitions *ctx ) {
|
488
|
488
|
return arbel_cmd ( arbel,
|
489
|
489
|
ARBEL_HCR_IN_CMD ( ARBEL_HCR_RTR2RTS_QPEE,
|
490
|
490
|
1, sizeof ( *ctx ) ),
|
|
@@ -574,7 +574,7 @@ static int arbel_create_cq ( struct ib_device *ibdev,
|
574
|
574
|
virt_to_bus ( arbel_cq->cqe ) );
|
575
|
575
|
MLX_FILL_2 ( &cqctx, 3,
|
576
|
576
|
usr_page, arbel->limits.reserved_uars,
|
577
|
|
- log_cq_size, ( fls ( cq->num_cqes ) - 1 ) );
|
|
577
|
+ log_cq_size, fls ( cq->num_cqes - 1 ) );
|
578
|
578
|
MLX_FILL_1 ( &cqctx, 5, c_eqn, arbel->eqn );
|
579
|
579
|
MLX_FILL_1 ( &cqctx, 6, pd, ARBEL_GLOBAL_PD );
|
580
|
580
|
MLX_FILL_1 ( &cqctx, 7, l_key, arbel->reserved_lkey );
|
|
@@ -651,35 +651,76 @@ static void arbel_destroy_cq ( struct ib_device *ibdev,
|
651
|
651
|
***************************************************************************
|
652
|
652
|
*/
|
653
|
653
|
|
|
654
|
+/**
|
|
655
|
+ * Create send work queue
|
|
656
|
+ *
|
|
657
|
+ * @v arbel_send_wq Send work queue
|
|
658
|
+ * @v num_wqes Number of work queue entries
|
|
659
|
+ * @ret rc Return status code
|
|
660
|
+ */
|
654
|
661
|
static int arbel_create_send_wq ( struct arbel_send_work_queue *arbel_send_wq,
|
655
|
662
|
unsigned int num_wqes ) {
|
|
663
|
+ struct arbelprm_ud_send_wqe *wqe;
|
|
664
|
+ struct arbelprm_ud_send_wqe *next_wqe;
|
|
665
|
+ unsigned int wqe_idx_mask;
|
|
666
|
+ unsigned int i;
|
656
|
667
|
|
|
668
|
+ /* Allocate work queue */
|
657
|
669
|
arbel_send_wq->wqe_size = ( num_wqes *
|
658
|
670
|
sizeof ( arbel_send_wq->wqe[0] ) );
|
659
|
671
|
arbel_send_wq->wqe = malloc_dma ( arbel_send_wq->wqe_size,
|
660
|
672
|
sizeof ( arbel_send_wq->wqe[0] ) );
|
661
|
673
|
if ( ! arbel_send_wq->wqe )
|
662
|
674
|
return -ENOMEM;
|
663
|
|
-
|
664
|
|
- // initialise (prelink?)
|
|
675
|
+ memset ( arbel_send_wq->wqe, 0, arbel_send_wq->wqe_size );
|
|
676
|
+
|
|
677
|
+ /* Link work queue entries */
|
|
678
|
+ wqe_idx_mask = ( num_wqes - 1 );
|
|
679
|
+ for ( i = 0 ; i < num_wqes ; i++ ) {
|
|
680
|
+ wqe = &arbel_send_wq->wqe[i].ud;
|
|
681
|
+ next_wqe = &arbel_send_wq->wqe[ ( i + 1 ) & wqe_idx_mask ].ud;
|
|
682
|
+ MLX_FILL_1 ( &wqe->next, 0, nda_31_6,
|
|
683
|
+ ( virt_to_bus ( next_wqe ) >> 6 ) );
|
|
684
|
+ }
|
|
685
|
+
|
|
686
|
+ return 0;
|
665
|
687
|
}
|
666
|
688
|
|
|
689
|
+/**
|
|
690
|
+ * Create receive work queue
|
|
691
|
+ *
|
|
692
|
+ * @v arbel_recv_wq Receive work queue
|
|
693
|
+ * @v num_wqes Number of work queue entries
|
|
694
|
+ * @ret rc Return status code
|
|
695
|
+ */
|
667
|
696
|
static int arbel_create_recv_wq ( struct arbel_recv_work_queue *arbel_recv_wq,
|
668
|
697
|
unsigned int num_wqes ) {
|
|
698
|
+ struct arbelprm_recv_wqe *wqe;
|
|
699
|
+ struct arbelprm_recv_wqe *next_wqe;
|
|
700
|
+ unsigned int wqe_idx_mask;
|
|
701
|
+ unsigned int i;
|
669
|
702
|
|
|
703
|
+ /* Allocate work queue */
|
670
|
704
|
arbel_recv_wq->wqe_size = ( num_wqes *
|
671
|
705
|
sizeof ( arbel_recv_wq->wqe[0] ) );
|
672
|
706
|
arbel_recv_wq->wqe = malloc_dma ( arbel_recv_wq->wqe_size,
|
673
|
707
|
sizeof ( arbel_recv_wq->wqe[0] ) );
|
674
|
708
|
if ( ! arbel_recv_wq->wqe )
|
675
|
709
|
return -ENOMEM;
|
676
|
|
-
|
677
|
|
- // initialise (prelink?)
|
|
710
|
+ memset ( arbel_recv_wq->wqe, 0, arbel_recv_wq->wqe_size );
|
|
711
|
+
|
|
712
|
+ /* Link work queue entries */
|
|
713
|
+ wqe_idx_mask = ( num_wqes - 1 );
|
|
714
|
+ for ( i = 0 ; i < num_wqes ; i++ ) {
|
|
715
|
+ wqe = &arbel_recv_wq->wqe[i].recv;
|
|
716
|
+ next_wqe = &arbel_recv_wq->wqe[( i + 1 ) & wqe_idx_mask].recv;
|
|
717
|
+ MLX_FILL_1 ( &wqe->next, 0, nda_31_6,
|
|
718
|
+ ( virt_to_bus ( next_wqe ) >> 6 ) );
|
|
719
|
+ }
|
|
720
|
+
|
|
721
|
+ return 0;
|
678
|
722
|
}
|
679
|
723
|
|
680
|
|
-
|
681
|
|
-
|
682
|
|
-
|
683
|
724
|
/**
|
684
|
725
|
* Create queue pair
|
685
|
726
|
*
|
|
@@ -691,7 +732,7 @@ static int arbel_create_qp ( struct ib_device *ibdev,
|
691
|
732
|
struct ib_queue_pair *qp ) {
|
692
|
733
|
struct arbel *arbel = ibdev->dev_priv;
|
693
|
734
|
struct arbel_queue_pair *arbel_qp;
|
694
|
|
- struct arbelprm_queue_pair_ee_context_entry qpctx;
|
|
735
|
+ struct arbelprm_qp_ee_state_transitions qpctx;
|
695
|
736
|
struct arbelprm_qp_db_record *send_db_rec;
|
696
|
737
|
struct arbelprm_qp_db_record *recv_db_rec;
|
697
|
738
|
int qpn_offset;
|
|
@@ -737,17 +778,53 @@ static int arbel_create_qp ( struct ib_device *ibdev,
|
737
|
778
|
|
738
|
779
|
/* Hand queue over to hardware */
|
739
|
780
|
memset ( &qpctx, 0, sizeof ( qpctx ) );
|
740
|
|
- // ... fill in context
|
|
781
|
+ MLX_FILL_3 ( &qpctx, 2,
|
|
782
|
+ qpc_eec_data.de, 1,
|
|
783
|
+ qpc_eec_data.pm_state, 0x03 /* Always 0x03 for UD */,
|
|
784
|
+ qpc_eec_data.st, ARBEL_ST_UD );
|
|
785
|
+ MLX_FILL_6 ( &qpctx, 4,
|
|
786
|
+ qpc_eec_data.mtu, ARBEL_MTU_2048,
|
|
787
|
+ qpc_eec_data.msg_max, 11 /* 2^11 = 2048 */,
|
|
788
|
+ qpc_eec_data.log_rq_size, fls ( qp->recv.num_wqes - 1 ),
|
|
789
|
+ qpc_eec_data.log_rq_stride,
|
|
790
|
+ ( fls ( sizeof ( arbel_qp->send.wqe[0] ) - 1 ) - 4 ),
|
|
791
|
+ qpc_eec_data.log_sq_size, fls ( qp->send.num_wqes - 1 ),
|
|
792
|
+ qpc_eec_data.log_sq_stride,
|
|
793
|
+ ( fls ( sizeof ( arbel_qp->recv.wqe[0] ) - 1 ) - 4 ) );
|
|
794
|
+ MLX_FILL_1 ( &qpctx, 5,
|
|
795
|
+ qpc_eec_data.usr_page, arbel->limits.reserved_uars );
|
|
796
|
+ MLX_FILL_1 ( &qpctx, 10, qpc_eec_data.primary_address_path.port_number,
|
|
797
|
+ PXE_IB_PORT );
|
|
798
|
+ MLX_FILL_1 ( &qpctx, 27, qpc_eec_data.pd, ARBEL_GLOBAL_PD );
|
|
799
|
+ MLX_FILL_1 ( &qpctx, 29, qpc_eec_data.wqe_lkey, arbel->reserved_lkey );
|
|
800
|
+ MLX_FILL_1 ( &qpctx, 30, qpc_eec_data.ssc, 1 );
|
|
801
|
+ MLX_FILL_1 ( &qpctx, 33, qpc_eec_data.cqn_snd, qp->send.cq->cqn );
|
|
802
|
+ MLX_FILL_1 ( &qpctx, 34, qpc_eec_data.snd_wqe_base_adr_l,
|
|
803
|
+ ( virt_to_bus ( arbel_qp->send.wqe ) >> 6 ) );
|
|
804
|
+ MLX_FILL_1 ( &qpctx, 35, qpc_eec_data.snd_db_record_index,
|
|
805
|
+ arbel_qp->send.doorbell_idx );
|
|
806
|
+ MLX_FILL_1 ( &qpctx, 38, qpc_eec_data.rsc, 1 );
|
|
807
|
+ MLX_FILL_1 ( &qpctx, 41, qpc_eec_data.cqn_rcv, qp->recv.cq->cqn );
|
|
808
|
+ MLX_FILL_1 ( &qpctx, 42, qpc_eec_data.rcv_wqe_base_adr_l,
|
|
809
|
+ ( virt_to_bus ( arbel_qp->recv.wqe ) >> 6 ) );
|
|
810
|
+ MLX_FILL_1 ( &qpctx, 43, qpc_eec_data.rcv_db_record_index,
|
|
811
|
+ arbel_qp->recv.doorbell_idx );
|
|
812
|
+ MLX_FILL_1 ( &qpctx, 44, qpc_eec_data.q_key, qp->qkey );
|
741
|
813
|
if ( ( rc = arbel_cmd_rst2init_qpee ( arbel, qp->qpn, &qpctx )) != 0 ){
|
742
|
814
|
DBGC ( arbel, "Arbel %p RST2INIT_QPEE failed: %s\n",
|
743
|
815
|
arbel, strerror ( rc ) );
|
744
|
816
|
goto err_rst2init_qpee;
|
745
|
817
|
}
|
|
818
|
+ memset ( &qpctx, 0, sizeof ( qpctx ) );
|
|
819
|
+ MLX_FILL_2 ( &qpctx, 4,
|
|
820
|
+ qpc_eec_data.mtu, ARBEL_MTU_2048,
|
|
821
|
+ qpc_eec_data.msg_max, 11 /* 2^11 = 2048 */ );
|
746
|
822
|
if ( ( rc = arbel_cmd_init2rtr_qpee ( arbel, qp->qpn, &qpctx )) != 0 ){
|
747
|
823
|
DBGC ( arbel, "Arbel %p INIT2RTR_QPEE failed: %s\n",
|
748
|
824
|
arbel, strerror ( rc ) );
|
749
|
825
|
goto err_init2rtr_qpee;
|
750
|
826
|
}
|
|
827
|
+ memset ( &qpctx, 0, sizeof ( qpctx ) );
|
751
|
828
|
if ( ( rc = arbel_cmd_rtr2rts_qpee ( arbel, qp->qpn, &qpctx ) ) != 0 ){
|
752
|
829
|
DBGC ( arbel, "Arbel %p RTR2RTS_QPEE failed: %s\n",
|
753
|
830
|
arbel, strerror ( rc ) );
|
|
@@ -1215,8 +1292,13 @@ static int arbel_probe ( struct pci_device *pci,
|
1215
|
1292
|
( 1 << MLX_GET ( &dev_lim, log2_rsvd_cqs ) );
|
1216
|
1293
|
arbel->limits.reserved_qps =
|
1217
|
1294
|
( 1 << MLX_GET ( &dev_lim, log2_rsvd_qps ) );
|
1218
|
|
- DBG ( "Device limits:\n ");
|
1219
|
|
- DBG_HD ( &dev_lim, sizeof ( dev_lim ) );
|
|
1295
|
+
|
|
1296
|
+ DBG ( "MADS SND CQN = %#lx\n", dev_ib_data.mads_qp.snd_cq.cqn );
|
|
1297
|
+ struct ib_completion_queue *test_cq;
|
|
1298
|
+ test_cq = ib_create_cq ( &static_ibdev, 32 );
|
|
1299
|
+ if ( test_cq ) {
|
|
1300
|
+ DBG ( "Woot: create_cq() passed!\n" );
|
|
1301
|
+ }
|
1220
|
1302
|
|
1221
|
1303
|
/* Register network device */
|
1222
|
1304
|
if ( ( rc = register_netdev ( netdev ) ) != 0 )
|