Преглед изворни кода

create_qp() and destroy_qp() now written (but not tested).

tags/v0.9.3
Michael Brown пре 17 година
родитељ
комит
7e85f0d296

+ 7
- 0
src/drivers/net/mlx_ipoib/arbel.h Прегледај датотеку

@@ -34,6 +34,12 @@
34 34
 #define ARBEL_HCR_RTR2RTS_QPEE		0x001b
35 35
 #define ARBEL_HCR_2RST_QPEE		0x0021
36 36
 
37
+/* Service types */
38
+#define ARBEL_ST_UD			0x01
39
+
40
+/* MTUs */
41
+#define ARBEL_MTU_2048			0x04
42
+
37 43
 /*
38 44
  * Wrapper structures for hardware datatypes
39 45
  *
@@ -46,6 +52,7 @@ struct MLX_DECLARE_STRUCT ( arbelprm_cq_arm_db_record );
46 52
 struct MLX_DECLARE_STRUCT ( arbelprm_cq_ci_db_record );
47 53
 struct MLX_DECLARE_STRUCT ( arbelprm_hca_command_register );
48 54
 struct MLX_DECLARE_STRUCT ( arbelprm_qp_db_record );
55
+struct MLX_DECLARE_STRUCT ( arbelprm_qp_ee_state_transitions );
49 56
 struct MLX_DECLARE_STRUCT ( arbelprm_query_dev_lim );
50 57
 struct MLX_DECLARE_STRUCT ( arbelprm_queue_pair_ee_context_entry );
51 58
 struct MLX_DECLARE_STRUCT ( arbelprm_recv_wqe_segment_next );

+ 23
- 0
src/drivers/net/mlx_ipoib/bit_ops.h Прегледај датотеку

@@ -204,6 +204,14 @@ struct addr_64_st {
204 204
 	( MLX_ASSEMBLE_1 ( _structure_st, _index, _field, _value ) |	     \
205 205
 	  MLX_ASSEMBLE_3 ( _structure_st, _index, __VA_ARGS__ ) )
206 206
 
207
+#define MLX_ASSEMBLE_5( _structure_st, _index, _field, _value, ... )	     \
208
+	( MLX_ASSEMBLE_1 ( _structure_st, _index, _field, _value ) |	     \
209
+	  MLX_ASSEMBLE_4 ( _structure_st, _index, __VA_ARGS__ ) )
210
+
211
+#define MLX_ASSEMBLE_6( _structure_st, _index, _field, _value, ... )	     \
212
+	( MLX_ASSEMBLE_1 ( _structure_st, _index, _field, _value ) |	     \
213
+	  MLX_ASSEMBLE_5 ( _structure_st, _index, __VA_ARGS__ ) )
214
+
207 215
 /*
208 216
  * Build native-endian (positive) dword bitmasks from named fields
209 217
  *
@@ -225,6 +233,14 @@ struct addr_64_st {
225 233
 	( MLX_MASK_1 ( _structure_st, _index, _field ) |		     \
226 234
 	  MLX_MASK_3 ( _structure_st, _index, __VA_ARGS__ ) )
227 235
 
236
+#define MLX_MASK_5( _structure_st, _index, _field, ... )		     \
237
+	( MLX_MASK_1 ( _structure_st, _index, _field ) |		     \
238
+	  MLX_MASK_4 ( _structure_st, _index, __VA_ARGS__ ) )
239
+
240
+#define MLX_MASK_6( _structure_st, _index, _field, ... )		     \
241
+	( MLX_MASK_1 ( _structure_st, _index, _field ) |		     \
242
+	  MLX_MASK_5 ( _structure_st, _index, __VA_ARGS__ ) )
243
+
228 244
 /*
229 245
  * Populate big-endian dwords from named fields and values
230 246
  *
@@ -253,6 +269,13 @@ struct addr_64_st {
253 269
 	MLX_FILL ( _ptr, _index, MLX_ASSEMBLE_4 ( MLX_PSEUDO_STRUCT ( _ptr ),\
254 270
 						  _index, __VA_ARGS__ ) )
255 271
 
272
+#define MLX_FILL_5( _ptr, _index, ... )					     \
273
+	MLX_FILL ( _ptr, _index, MLX_ASSEMBLE_5 ( MLX_PSEUDO_STRUCT ( _ptr ),\
274
+						  _index, __VA_ARGS__ ) )
275
+
276
+#define MLX_FILL_6( _ptr, _index, ... )					     \
277
+	MLX_FILL ( _ptr, _index, MLX_ASSEMBLE_6 ( MLX_PSEUDO_STRUCT ( _ptr ),\
278
+						  _index, __VA_ARGS__ ) )
256 279
 
257 280
 /*
258 281
  * Modify big-endian dword using named field and value

+ 1
- 1
src/drivers/net/mlx_ipoib/ib_driver.h Прегледај датотеку

@@ -55,7 +55,7 @@ enum {
55 55
 };
56 56
 
57 57
 enum {
58
-	MADS_SND_CQN_SN,
58
+	MADS_SND_CQN_SN = 4,
59 59
 	MADS_RCV_CQN_SN,
60 60
 	IPOIB_SND_CQN_SN,
61 61
 	IPOIB_RCV_CQN_SN,

+ 97
- 15
src/drivers/net/mlx_ipoib/mt25218.c Прегледај датотеку

@@ -466,7 +466,7 @@ arbel_cmd_hw2sw_cq ( struct arbel *arbel, unsigned long cqn ) {
466 466
 
467 467
 static inline int
468 468
 arbel_cmd_rst2init_qpee ( struct arbel *arbel, unsigned long qpn,
469
-			  struct arbelprm_queue_pair_ee_context_entry *ctx ) {
469
+			  const struct arbelprm_qp_ee_state_transitions *ctx ){
470 470
 	return arbel_cmd ( arbel,
471 471
 			   ARBEL_HCR_IN_CMD ( ARBEL_HCR_RST2INIT_QPEE,
472 472
 					      1, sizeof ( *ctx ) ),
@@ -475,7 +475,7 @@ arbel_cmd_rst2init_qpee ( struct arbel *arbel, unsigned long qpn,
475 475
 
476 476
 static inline int
477 477
 arbel_cmd_init2rtr_qpee ( struct arbel *arbel, unsigned long qpn,
478
-			  struct arbelprm_queue_pair_ee_context_entry *ctx ) {
478
+			  const struct arbelprm_qp_ee_state_transitions *ctx ){
479 479
 	return arbel_cmd ( arbel,
480 480
 			   ARBEL_HCR_IN_CMD ( ARBEL_HCR_INIT2RTR_QPEE,
481 481
 					      1, sizeof ( *ctx ) ),
@@ -484,7 +484,7 @@ arbel_cmd_init2rtr_qpee ( struct arbel *arbel, unsigned long qpn,
484 484
 
485 485
 static inline int
486 486
 arbel_cmd_rtr2rts_qpee ( struct arbel *arbel, unsigned long qpn,
487
-			 struct arbelprm_queue_pair_ee_context_entry *ctx ) {
487
+			 const struct arbelprm_qp_ee_state_transitions *ctx ) {
488 488
 	return arbel_cmd ( arbel,
489 489
 			   ARBEL_HCR_IN_CMD ( ARBEL_HCR_RTR2RTS_QPEE,
490 490
 					      1, sizeof ( *ctx ) ),
@@ -574,7 +574,7 @@ static int arbel_create_cq ( struct ib_device *ibdev,
574 574
 		     virt_to_bus ( arbel_cq->cqe ) );
575 575
 	MLX_FILL_2 ( &cqctx, 3,
576 576
 		     usr_page, arbel->limits.reserved_uars,
577
-		     log_cq_size, ( fls ( cq->num_cqes ) - 1 ) );
577
+		     log_cq_size, fls ( cq->num_cqes - 1 ) );
578 578
 	MLX_FILL_1 ( &cqctx, 5, c_eqn, arbel->eqn );
579 579
 	MLX_FILL_1 ( &cqctx, 6, pd, ARBEL_GLOBAL_PD );
580 580
 	MLX_FILL_1 ( &cqctx, 7, l_key, arbel->reserved_lkey );
@@ -651,35 +651,76 @@ static void arbel_destroy_cq ( struct ib_device *ibdev,
651 651
  ***************************************************************************
652 652
  */
653 653
 
654
+/**
655
+ * Create send work queue
656
+ *
657
+ * @v arbel_send_wq	Send work queue
658
+ * @v num_wqes		Number of work queue entries
659
+ * @ret rc		Return status code
660
+ */
654 661
 static int arbel_create_send_wq ( struct arbel_send_work_queue *arbel_send_wq,
655 662
 				  unsigned int num_wqes ) {
663
+	struct arbelprm_ud_send_wqe *wqe;
664
+	struct arbelprm_ud_send_wqe *next_wqe;
665
+	unsigned int wqe_idx_mask;
666
+	unsigned int i;
656 667
 
668
+	/* Allocate work queue */
657 669
 	arbel_send_wq->wqe_size = ( num_wqes *
658 670
 				    sizeof ( arbel_send_wq->wqe[0] ) );
659 671
 	arbel_send_wq->wqe = malloc_dma ( arbel_send_wq->wqe_size,
660 672
 					  sizeof ( arbel_send_wq->wqe[0] ) );
661 673
 	if ( ! arbel_send_wq->wqe )
662 674
 		return -ENOMEM;
663
-
664
-	// initialise (prelink?)
675
+	memset ( arbel_send_wq->wqe, 0, arbel_send_wq->wqe_size );
676
+
677
+	/* Link work queue entries */
678
+	wqe_idx_mask = ( num_wqes - 1 );
679
+	for ( i = 0 ; i < num_wqes ; i++ ) {
680
+		wqe = &arbel_send_wq->wqe[i].ud;
681
+		next_wqe = &arbel_send_wq->wqe[ ( i + 1 ) & wqe_idx_mask ].ud;
682
+		MLX_FILL_1 ( &wqe->next, 0, nda_31_6,
683
+			     ( virt_to_bus ( next_wqe ) >> 6 ) );
684
+	}
685
+	
686
+	return 0;
665 687
 }
666 688
 
689
+/**
690
+ * Create receive work queue
691
+ *
692
+ * @v arbel_recv_wq	Receive work queue
693
+ * @v num_wqes		Number of work queue entries
694
+ * @ret rc		Return status code
695
+ */
667 696
 static int arbel_create_recv_wq ( struct arbel_recv_work_queue *arbel_recv_wq,
668 697
 				  unsigned int num_wqes ) {
698
+	struct arbelprm_recv_wqe *wqe;
699
+	struct arbelprm_recv_wqe *next_wqe;
700
+	unsigned int wqe_idx_mask;
701
+	unsigned int i;
669 702
 
703
+	/* Allocate work queue */
670 704
 	arbel_recv_wq->wqe_size = ( num_wqes *
671 705
 				    sizeof ( arbel_recv_wq->wqe[0] ) );
672 706
 	arbel_recv_wq->wqe = malloc_dma ( arbel_recv_wq->wqe_size,
673 707
 					  sizeof ( arbel_recv_wq->wqe[0] ) );
674 708
 	if ( ! arbel_recv_wq->wqe )
675 709
 		return -ENOMEM;
676
-
677
-	// initialise (prelink?)
710
+	memset ( arbel_recv_wq->wqe, 0, arbel_recv_wq->wqe_size );
711
+
712
+	/* Link work queue entries */
713
+	wqe_idx_mask = ( num_wqes - 1 );
714
+	for ( i = 0 ; i < num_wqes ; i++ ) {
715
+		wqe = &arbel_recv_wq->wqe[i].recv;
716
+		next_wqe = &arbel_recv_wq->wqe[( i + 1 ) & wqe_idx_mask].recv;
717
+		MLX_FILL_1 ( &wqe->next, 0, nda_31_6,
718
+			     ( virt_to_bus ( next_wqe ) >> 6 ) );
719
+	}
720
+	
721
+	return 0;
678 722
 }
679 723
 
680
-
681
-
682
-
683 724
 /**
684 725
  * Create queue pair
685 726
  *
@@ -691,7 +732,7 @@ static int arbel_create_qp ( struct ib_device *ibdev,
691 732
 			     struct ib_queue_pair *qp ) {
692 733
 	struct arbel *arbel = ibdev->dev_priv;
693 734
 	struct arbel_queue_pair *arbel_qp;
694
-	struct arbelprm_queue_pair_ee_context_entry qpctx;
735
+	struct arbelprm_qp_ee_state_transitions qpctx;
695 736
 	struct arbelprm_qp_db_record *send_db_rec;
696 737
 	struct arbelprm_qp_db_record *recv_db_rec;
697 738
 	int qpn_offset;
@@ -737,17 +778,53 @@ static int arbel_create_qp ( struct ib_device *ibdev,
737 778
 
738 779
 	/* Hand queue over to hardware */
739 780
 	memset ( &qpctx, 0, sizeof ( qpctx ) );
740
-	// ...  fill in context
781
+	MLX_FILL_3 ( &qpctx, 2,
782
+		     qpc_eec_data.de, 1,
783
+		     qpc_eec_data.pm_state, 0x03 /* Always 0x03 for UD */,
784
+		     qpc_eec_data.st, ARBEL_ST_UD );
785
+	MLX_FILL_6 ( &qpctx, 4,
786
+		     qpc_eec_data.mtu, ARBEL_MTU_2048,
787
+		     qpc_eec_data.msg_max, 11 /* 2^11 = 2048 */,
788
+		     qpc_eec_data.log_rq_size, fls ( qp->recv.num_wqes - 1 ),
789
+		     qpc_eec_data.log_rq_stride,
790
+		     ( fls ( sizeof ( arbel_qp->send.wqe[0] ) - 1 ) - 4 ),
791
+		     qpc_eec_data.log_sq_size, fls ( qp->send.num_wqes - 1 ),
792
+		     qpc_eec_data.log_sq_stride,
793
+		     ( fls ( sizeof ( arbel_qp->recv.wqe[0] ) - 1 ) - 4 ) );
794
+	MLX_FILL_1 ( &qpctx, 5,
795
+		     qpc_eec_data.usr_page, arbel->limits.reserved_uars );
796
+	MLX_FILL_1 ( &qpctx, 10, qpc_eec_data.primary_address_path.port_number,
797
+		     PXE_IB_PORT );
798
+	MLX_FILL_1 ( &qpctx, 27, qpc_eec_data.pd, ARBEL_GLOBAL_PD );
799
+	MLX_FILL_1 ( &qpctx, 29, qpc_eec_data.wqe_lkey, arbel->reserved_lkey );
800
+	MLX_FILL_1 ( &qpctx, 30, qpc_eec_data.ssc, 1 );
801
+	MLX_FILL_1 ( &qpctx, 33, qpc_eec_data.cqn_snd, qp->send.cq->cqn );
802
+	MLX_FILL_1 ( &qpctx, 34, qpc_eec_data.snd_wqe_base_adr_l,
803
+		     ( virt_to_bus ( arbel_qp->send.wqe ) >> 6 ) );
804
+	MLX_FILL_1 ( &qpctx, 35, qpc_eec_data.snd_db_record_index,
805
+		     arbel_qp->send.doorbell_idx );
806
+	MLX_FILL_1 ( &qpctx, 38, qpc_eec_data.rsc, 1 );
807
+	MLX_FILL_1 ( &qpctx, 41, qpc_eec_data.cqn_rcv, qp->recv.cq->cqn );
808
+	MLX_FILL_1 ( &qpctx, 42, qpc_eec_data.rcv_wqe_base_adr_l,
809
+		     ( virt_to_bus ( arbel_qp->recv.wqe ) >> 6 ) );
810
+	MLX_FILL_1 ( &qpctx, 43, qpc_eec_data.rcv_db_record_index,
811
+		     arbel_qp->recv.doorbell_idx );
812
+	MLX_FILL_1 ( &qpctx, 44, qpc_eec_data.q_key, qp->qkey );
741 813
 	if ( ( rc = arbel_cmd_rst2init_qpee ( arbel, qp->qpn, &qpctx )) != 0 ){
742 814
 		DBGC ( arbel, "Arbel %p RST2INIT_QPEE failed: %s\n",
743 815
 		       arbel, strerror ( rc ) );
744 816
 		goto err_rst2init_qpee;
745 817
 	}
818
+	memset ( &qpctx, 0, sizeof ( qpctx ) );
819
+	MLX_FILL_2 ( &qpctx, 4,
820
+		     qpc_eec_data.mtu, ARBEL_MTU_2048,
821
+		     qpc_eec_data.msg_max, 11 /* 2^11 = 2048 */ );
746 822
 	if ( ( rc = arbel_cmd_init2rtr_qpee ( arbel, qp->qpn, &qpctx )) != 0 ){
747 823
 		DBGC ( arbel, "Arbel %p INIT2RTR_QPEE failed: %s\n",
748 824
 		       arbel, strerror ( rc ) );
749 825
 		goto err_init2rtr_qpee;
750 826
 	}
827
+	memset ( &qpctx, 0, sizeof ( qpctx ) );
751 828
 	if ( ( rc = arbel_cmd_rtr2rts_qpee ( arbel, qp->qpn, &qpctx ) ) != 0 ){
752 829
 		DBGC ( arbel, "Arbel %p RTR2RTS_QPEE failed: %s\n",
753 830
 		       arbel, strerror ( rc ) );
@@ -1215,8 +1292,13 @@ static int arbel_probe ( struct pci_device *pci,
1215 1292
 		( 1 << MLX_GET ( &dev_lim, log2_rsvd_cqs ) );
1216 1293
 	arbel->limits.reserved_qps =
1217 1294
 		( 1 << MLX_GET ( &dev_lim, log2_rsvd_qps ) );
1218
-	DBG ( "Device limits:\n ");
1219
-	DBG_HD ( &dev_lim, sizeof ( dev_lim ) );
1295
+
1296
+	DBG ( "MADS SND CQN = %#lx\n", dev_ib_data.mads_qp.snd_cq.cqn );
1297
+	struct ib_completion_queue *test_cq;
1298
+	test_cq = ib_create_cq ( &static_ibdev, 32 );
1299
+	if ( test_cq ) {
1300
+		DBG ( "Woot: create_cq() passed!\n" );
1301
+	}
1220 1302
 
1221 1303
 	/* Register network device */
1222 1304
 	if ( ( rc = register_netdev ( netdev ) ) != 0 )

+ 3
- 1
src/include/gpxe/infiniband.h Прегледај датотеку

@@ -97,6 +97,8 @@ struct ib_work_queue {
97 97
 struct ib_queue_pair {
98 98
 	/** Queue Pair Number */
99 99
 	unsigned long qpn;
100
+	/** Queue key */
101
+	unsigned long qkey;
100 102
 	/** Send queue */
101 103
 	struct ib_work_queue send;
102 104
 	/** Receive queue */
@@ -267,7 +269,7 @@ extern void ib_destroy_cq ( struct ib_device *ibdev,
267 269
 extern struct ib_queue_pair *
268 270
 ib_create_qp ( struct ib_device *ibdev, unsigned int num_send_wqes,
269 271
 	       struct ib_completion_queue *send_cq, unsigned int num_recv_wqes,
270
-	       struct ib_completion_queue *recv_cq );
272
+	       struct ib_completion_queue *recv_cq, unsigned long qkey );
271 273
 extern void ib_destroy_qp ( struct ib_device *ibdev,
272 274
 			    struct ib_queue_pair *qp );
273 275
 extern struct ib_work_queue * ib_find_wq ( struct ib_completion_queue *cq,

+ 4
- 1
src/net/infiniband.c Прегледај датотеку

@@ -92,13 +92,15 @@ void ib_destroy_cq ( struct ib_device *ibdev,
92 92
  * @v send_cq		Send completion queue
93 93
  * @v num_recv_wqes	Number of receive work queue entries
94 94
  * @v recv_cq		Receive completion queue
95
+ * @v qkey		Queue key
95 96
  * @ret qp		Queue pair
96 97
  */
97 98
 struct ib_queue_pair * ib_create_qp ( struct ib_device *ibdev,
98 99
 				      unsigned int num_send_wqes,
99 100
 				      struct ib_completion_queue *send_cq,
100 101
 				      unsigned int num_recv_wqes,
101
-				      struct ib_completion_queue *recv_cq ) {
102
+				      struct ib_completion_queue *recv_cq,
103
+				      unsigned long qkey ) {
102 104
 	struct ib_queue_pair *qp;
103 105
 	int rc;
104 106
 
@@ -110,6 +112,7 @@ struct ib_queue_pair * ib_create_qp ( struct ib_device *ibdev,
110 112
 		      ( num_recv_wqes * sizeof ( qp->recv.iobufs[0] ) ) );
111 113
 	if ( ! qp )
112 114
 		return NULL;
115
+	qp->qkey = qkey;
113 116
 	qp->send.qp = qp;
114 117
 	qp->send.is_send = 1;
115 118
 	qp->send.cq = send_cq;

Loading…
Откажи
Сачувај