Browse Source

create_qp() and destroy_qp() now written (but not tested).

tags/v0.9.3
Michael Brown 17 years ago
parent
commit
7e85f0d296

+ 7
- 0
src/drivers/net/mlx_ipoib/arbel.h View File

34
 #define ARBEL_HCR_RTR2RTS_QPEE		0x001b
34
 #define ARBEL_HCR_RTR2RTS_QPEE		0x001b
35
 #define ARBEL_HCR_2RST_QPEE		0x0021
35
 #define ARBEL_HCR_2RST_QPEE		0x0021
36
 
36
 
37
+/* Service types */
38
+#define ARBEL_ST_UD			0x01
39
+
40
+/* MTUs */
41
+#define ARBEL_MTU_2048			0x04
42
+
37
 /*
43
 /*
38
  * Wrapper structures for hardware datatypes
44
  * Wrapper structures for hardware datatypes
39
  *
45
  *
46
 struct MLX_DECLARE_STRUCT ( arbelprm_cq_ci_db_record );
52
 struct MLX_DECLARE_STRUCT ( arbelprm_cq_ci_db_record );
47
 struct MLX_DECLARE_STRUCT ( arbelprm_hca_command_register );
53
 struct MLX_DECLARE_STRUCT ( arbelprm_hca_command_register );
48
 struct MLX_DECLARE_STRUCT ( arbelprm_qp_db_record );
54
 struct MLX_DECLARE_STRUCT ( arbelprm_qp_db_record );
55
+struct MLX_DECLARE_STRUCT ( arbelprm_qp_ee_state_transitions );
49
 struct MLX_DECLARE_STRUCT ( arbelprm_query_dev_lim );
56
 struct MLX_DECLARE_STRUCT ( arbelprm_query_dev_lim );
50
 struct MLX_DECLARE_STRUCT ( arbelprm_queue_pair_ee_context_entry );
57
 struct MLX_DECLARE_STRUCT ( arbelprm_queue_pair_ee_context_entry );
51
 struct MLX_DECLARE_STRUCT ( arbelprm_recv_wqe_segment_next );
58
 struct MLX_DECLARE_STRUCT ( arbelprm_recv_wqe_segment_next );

+ 23
- 0
src/drivers/net/mlx_ipoib/bit_ops.h View File

204
 	( MLX_ASSEMBLE_1 ( _structure_st, _index, _field, _value ) |	     \
204
 	( MLX_ASSEMBLE_1 ( _structure_st, _index, _field, _value ) |	     \
205
 	  MLX_ASSEMBLE_3 ( _structure_st, _index, __VA_ARGS__ ) )
205
 	  MLX_ASSEMBLE_3 ( _structure_st, _index, __VA_ARGS__ ) )
206
 
206
 
207
+#define MLX_ASSEMBLE_5( _structure_st, _index, _field, _value, ... )	     \
208
+	( MLX_ASSEMBLE_1 ( _structure_st, _index, _field, _value ) |	     \
209
+	  MLX_ASSEMBLE_4 ( _structure_st, _index, __VA_ARGS__ ) )
210
+
211
+#define MLX_ASSEMBLE_6( _structure_st, _index, _field, _value, ... )	     \
212
+	( MLX_ASSEMBLE_1 ( _structure_st, _index, _field, _value ) |	     \
213
+	  MLX_ASSEMBLE_5 ( _structure_st, _index, __VA_ARGS__ ) )
214
+
207
 /*
215
 /*
208
  * Build native-endian (positive) dword bitmasks from named fields
216
  * Build native-endian (positive) dword bitmasks from named fields
209
  *
217
  *
225
 	( MLX_MASK_1 ( _structure_st, _index, _field ) |		     \
233
 	( MLX_MASK_1 ( _structure_st, _index, _field ) |		     \
226
 	  MLX_MASK_3 ( _structure_st, _index, __VA_ARGS__ ) )
234
 	  MLX_MASK_3 ( _structure_st, _index, __VA_ARGS__ ) )
227
 
235
 
236
+#define MLX_MASK_5( _structure_st, _index, _field, ... )		     \
237
+	( MLX_MASK_1 ( _structure_st, _index, _field ) |		     \
238
+	  MLX_MASK_4 ( _structure_st, _index, __VA_ARGS__ ) )
239
+
240
+#define MLX_MASK_6( _structure_st, _index, _field, ... )		     \
241
+	( MLX_MASK_1 ( _structure_st, _index, _field ) |		     \
242
+	  MLX_MASK_5 ( _structure_st, _index, __VA_ARGS__ ) )
243
+
228
 /*
244
 /*
229
  * Populate big-endian dwords from named fields and values
245
  * Populate big-endian dwords from named fields and values
230
  *
246
  *
253
 	MLX_FILL ( _ptr, _index, MLX_ASSEMBLE_4 ( MLX_PSEUDO_STRUCT ( _ptr ),\
269
 	MLX_FILL ( _ptr, _index, MLX_ASSEMBLE_4 ( MLX_PSEUDO_STRUCT ( _ptr ),\
254
 						  _index, __VA_ARGS__ ) )
270
 						  _index, __VA_ARGS__ ) )
255
 
271
 
272
+#define MLX_FILL_5( _ptr, _index, ... )					     \
273
+	MLX_FILL ( _ptr, _index, MLX_ASSEMBLE_5 ( MLX_PSEUDO_STRUCT ( _ptr ),\
274
+						  _index, __VA_ARGS__ ) )
275
+
276
+#define MLX_FILL_6( _ptr, _index, ... )					     \
277
+	MLX_FILL ( _ptr, _index, MLX_ASSEMBLE_6 ( MLX_PSEUDO_STRUCT ( _ptr ),\
278
+						  _index, __VA_ARGS__ ) )
256
 
279
 
257
 /*
280
 /*
258
  * Modify big-endian dword using named field and value
281
  * Modify big-endian dword using named field and value

+ 1
- 1
src/drivers/net/mlx_ipoib/ib_driver.h View File

55
 };
55
 };
56
 
56
 
57
 enum {
57
 enum {
58
-	MADS_SND_CQN_SN,
58
+	MADS_SND_CQN_SN = 4,
59
 	MADS_RCV_CQN_SN,
59
 	MADS_RCV_CQN_SN,
60
 	IPOIB_SND_CQN_SN,
60
 	IPOIB_SND_CQN_SN,
61
 	IPOIB_RCV_CQN_SN,
61
 	IPOIB_RCV_CQN_SN,

+ 97
- 15
src/drivers/net/mlx_ipoib/mt25218.c View File

466
 
466
 
467
 static inline int
467
 static inline int
468
 arbel_cmd_rst2init_qpee ( struct arbel *arbel, unsigned long qpn,
468
 arbel_cmd_rst2init_qpee ( struct arbel *arbel, unsigned long qpn,
469
-			  struct arbelprm_queue_pair_ee_context_entry *ctx ) {
469
+			  const struct arbelprm_qp_ee_state_transitions *ctx ){
470
 	return arbel_cmd ( arbel,
470
 	return arbel_cmd ( arbel,
471
 			   ARBEL_HCR_IN_CMD ( ARBEL_HCR_RST2INIT_QPEE,
471
 			   ARBEL_HCR_IN_CMD ( ARBEL_HCR_RST2INIT_QPEE,
472
 					      1, sizeof ( *ctx ) ),
472
 					      1, sizeof ( *ctx ) ),
475
 
475
 
476
 static inline int
476
 static inline int
477
 arbel_cmd_init2rtr_qpee ( struct arbel *arbel, unsigned long qpn,
477
 arbel_cmd_init2rtr_qpee ( struct arbel *arbel, unsigned long qpn,
478
-			  struct arbelprm_queue_pair_ee_context_entry *ctx ) {
478
+			  const struct arbelprm_qp_ee_state_transitions *ctx ){
479
 	return arbel_cmd ( arbel,
479
 	return arbel_cmd ( arbel,
480
 			   ARBEL_HCR_IN_CMD ( ARBEL_HCR_INIT2RTR_QPEE,
480
 			   ARBEL_HCR_IN_CMD ( ARBEL_HCR_INIT2RTR_QPEE,
481
 					      1, sizeof ( *ctx ) ),
481
 					      1, sizeof ( *ctx ) ),
484
 
484
 
485
 static inline int
485
 static inline int
486
 arbel_cmd_rtr2rts_qpee ( struct arbel *arbel, unsigned long qpn,
486
 arbel_cmd_rtr2rts_qpee ( struct arbel *arbel, unsigned long qpn,
487
-			 struct arbelprm_queue_pair_ee_context_entry *ctx ) {
487
+			 const struct arbelprm_qp_ee_state_transitions *ctx ) {
488
 	return arbel_cmd ( arbel,
488
 	return arbel_cmd ( arbel,
489
 			   ARBEL_HCR_IN_CMD ( ARBEL_HCR_RTR2RTS_QPEE,
489
 			   ARBEL_HCR_IN_CMD ( ARBEL_HCR_RTR2RTS_QPEE,
490
 					      1, sizeof ( *ctx ) ),
490
 					      1, sizeof ( *ctx ) ),
574
 		     virt_to_bus ( arbel_cq->cqe ) );
574
 		     virt_to_bus ( arbel_cq->cqe ) );
575
 	MLX_FILL_2 ( &cqctx, 3,
575
 	MLX_FILL_2 ( &cqctx, 3,
576
 		     usr_page, arbel->limits.reserved_uars,
576
 		     usr_page, arbel->limits.reserved_uars,
577
-		     log_cq_size, ( fls ( cq->num_cqes ) - 1 ) );
577
+		     log_cq_size, fls ( cq->num_cqes - 1 ) );
578
 	MLX_FILL_1 ( &cqctx, 5, c_eqn, arbel->eqn );
578
 	MLX_FILL_1 ( &cqctx, 5, c_eqn, arbel->eqn );
579
 	MLX_FILL_1 ( &cqctx, 6, pd, ARBEL_GLOBAL_PD );
579
 	MLX_FILL_1 ( &cqctx, 6, pd, ARBEL_GLOBAL_PD );
580
 	MLX_FILL_1 ( &cqctx, 7, l_key, arbel->reserved_lkey );
580
 	MLX_FILL_1 ( &cqctx, 7, l_key, arbel->reserved_lkey );
651
  ***************************************************************************
651
  ***************************************************************************
652
  */
652
  */
653
 
653
 
654
+/**
655
+ * Create send work queue
656
+ *
657
+ * @v arbel_send_wq	Send work queue
658
+ * @v num_wqes		Number of work queue entries
659
+ * @ret rc		Return status code
660
+ */
654
 static int arbel_create_send_wq ( struct arbel_send_work_queue *arbel_send_wq,
661
 static int arbel_create_send_wq ( struct arbel_send_work_queue *arbel_send_wq,
655
 				  unsigned int num_wqes ) {
662
 				  unsigned int num_wqes ) {
663
+	struct arbelprm_ud_send_wqe *wqe;
664
+	struct arbelprm_ud_send_wqe *next_wqe;
665
+	unsigned int wqe_idx_mask;
666
+	unsigned int i;
656
 
667
 
668
+	/* Allocate work queue */
657
 	arbel_send_wq->wqe_size = ( num_wqes *
669
 	arbel_send_wq->wqe_size = ( num_wqes *
658
 				    sizeof ( arbel_send_wq->wqe[0] ) );
670
 				    sizeof ( arbel_send_wq->wqe[0] ) );
659
 	arbel_send_wq->wqe = malloc_dma ( arbel_send_wq->wqe_size,
671
 	arbel_send_wq->wqe = malloc_dma ( arbel_send_wq->wqe_size,
660
 					  sizeof ( arbel_send_wq->wqe[0] ) );
672
 					  sizeof ( arbel_send_wq->wqe[0] ) );
661
 	if ( ! arbel_send_wq->wqe )
673
 	if ( ! arbel_send_wq->wqe )
662
 		return -ENOMEM;
674
 		return -ENOMEM;
663
-
664
-	// initialise (prelink?)
675
+	memset ( arbel_send_wq->wqe, 0, arbel_send_wq->wqe_size );
676
+
677
+	/* Link work queue entries */
678
+	wqe_idx_mask = ( num_wqes - 1 );
679
+	for ( i = 0 ; i < num_wqes ; i++ ) {
680
+		wqe = &arbel_send_wq->wqe[i].ud;
681
+		next_wqe = &arbel_send_wq->wqe[ ( i + 1 ) & wqe_idx_mask ].ud;
682
+		MLX_FILL_1 ( &wqe->next, 0, nda_31_6,
683
+			     ( virt_to_bus ( next_wqe ) >> 6 ) );
684
+	}
685
+	
686
+	return 0;
665
 }
687
 }
666
 
688
 
689
+/**
690
+ * Create receive work queue
691
+ *
692
+ * @v arbel_recv_wq	Receive work queue
693
+ * @v num_wqes		Number of work queue entries
694
+ * @ret rc		Return status code
695
+ */
667
 static int arbel_create_recv_wq ( struct arbel_recv_work_queue *arbel_recv_wq,
696
 static int arbel_create_recv_wq ( struct arbel_recv_work_queue *arbel_recv_wq,
668
 				  unsigned int num_wqes ) {
697
 				  unsigned int num_wqes ) {
698
+	struct arbelprm_recv_wqe *wqe;
699
+	struct arbelprm_recv_wqe *next_wqe;
700
+	unsigned int wqe_idx_mask;
701
+	unsigned int i;
669
 
702
 
703
+	/* Allocate work queue */
670
 	arbel_recv_wq->wqe_size = ( num_wqes *
704
 	arbel_recv_wq->wqe_size = ( num_wqes *
671
 				    sizeof ( arbel_recv_wq->wqe[0] ) );
705
 				    sizeof ( arbel_recv_wq->wqe[0] ) );
672
 	arbel_recv_wq->wqe = malloc_dma ( arbel_recv_wq->wqe_size,
706
 	arbel_recv_wq->wqe = malloc_dma ( arbel_recv_wq->wqe_size,
673
 					  sizeof ( arbel_recv_wq->wqe[0] ) );
707
 					  sizeof ( arbel_recv_wq->wqe[0] ) );
674
 	if ( ! arbel_recv_wq->wqe )
708
 	if ( ! arbel_recv_wq->wqe )
675
 		return -ENOMEM;
709
 		return -ENOMEM;
676
-
677
-	// initialise (prelink?)
710
+	memset ( arbel_recv_wq->wqe, 0, arbel_recv_wq->wqe_size );
711
+
712
+	/* Link work queue entries */
713
+	wqe_idx_mask = ( num_wqes - 1 );
714
+	for ( i = 0 ; i < num_wqes ; i++ ) {
715
+		wqe = &arbel_recv_wq->wqe[i].recv;
716
+		next_wqe = &arbel_recv_wq->wqe[( i + 1 ) & wqe_idx_mask].recv;
717
+		MLX_FILL_1 ( &wqe->next, 0, nda_31_6,
718
+			     ( virt_to_bus ( next_wqe ) >> 6 ) );
719
+	}
720
+	
721
+	return 0;
678
 }
722
 }
679
 
723
 
680
-
681
-
682
-
683
 /**
724
 /**
684
  * Create queue pair
725
  * Create queue pair
685
  *
726
  *
691
 			     struct ib_queue_pair *qp ) {
732
 			     struct ib_queue_pair *qp ) {
692
 	struct arbel *arbel = ibdev->dev_priv;
733
 	struct arbel *arbel = ibdev->dev_priv;
693
 	struct arbel_queue_pair *arbel_qp;
734
 	struct arbel_queue_pair *arbel_qp;
694
-	struct arbelprm_queue_pair_ee_context_entry qpctx;
735
+	struct arbelprm_qp_ee_state_transitions qpctx;
695
 	struct arbelprm_qp_db_record *send_db_rec;
736
 	struct arbelprm_qp_db_record *send_db_rec;
696
 	struct arbelprm_qp_db_record *recv_db_rec;
737
 	struct arbelprm_qp_db_record *recv_db_rec;
697
 	int qpn_offset;
738
 	int qpn_offset;
737
 
778
 
738
 	/* Hand queue over to hardware */
779
 	/* Hand queue over to hardware */
739
 	memset ( &qpctx, 0, sizeof ( qpctx ) );
780
 	memset ( &qpctx, 0, sizeof ( qpctx ) );
740
-	// ...  fill in context
781
+	MLX_FILL_3 ( &qpctx, 2,
782
+		     qpc_eec_data.de, 1,
783
+		     qpc_eec_data.pm_state, 0x03 /* Always 0x03 for UD */,
784
+		     qpc_eec_data.st, ARBEL_ST_UD );
785
+	MLX_FILL_6 ( &qpctx, 4,
786
+		     qpc_eec_data.mtu, ARBEL_MTU_2048,
787
+		     qpc_eec_data.msg_max, 11 /* 2^11 = 2048 */,
788
+		     qpc_eec_data.log_rq_size, fls ( qp->recv.num_wqes - 1 ),
789
+		     qpc_eec_data.log_rq_stride,
790
+		     ( fls ( sizeof ( arbel_qp->send.wqe[0] ) - 1 ) - 4 ),
791
+		     qpc_eec_data.log_sq_size, fls ( qp->send.num_wqes - 1 ),
792
+		     qpc_eec_data.log_sq_stride,
793
+		     ( fls ( sizeof ( arbel_qp->recv.wqe[0] ) - 1 ) - 4 ) );
794
+	MLX_FILL_1 ( &qpctx, 5,
795
+		     qpc_eec_data.usr_page, arbel->limits.reserved_uars );
796
+	MLX_FILL_1 ( &qpctx, 10, qpc_eec_data.primary_address_path.port_number,
797
+		     PXE_IB_PORT );
798
+	MLX_FILL_1 ( &qpctx, 27, qpc_eec_data.pd, ARBEL_GLOBAL_PD );
799
+	MLX_FILL_1 ( &qpctx, 29, qpc_eec_data.wqe_lkey, arbel->reserved_lkey );
800
+	MLX_FILL_1 ( &qpctx, 30, qpc_eec_data.ssc, 1 );
801
+	MLX_FILL_1 ( &qpctx, 33, qpc_eec_data.cqn_snd, qp->send.cq->cqn );
802
+	MLX_FILL_1 ( &qpctx, 34, qpc_eec_data.snd_wqe_base_adr_l,
803
+		     ( virt_to_bus ( arbel_qp->send.wqe ) >> 6 ) );
804
+	MLX_FILL_1 ( &qpctx, 35, qpc_eec_data.snd_db_record_index,
805
+		     arbel_qp->send.doorbell_idx );
806
+	MLX_FILL_1 ( &qpctx, 38, qpc_eec_data.rsc, 1 );
807
+	MLX_FILL_1 ( &qpctx, 41, qpc_eec_data.cqn_rcv, qp->recv.cq->cqn );
808
+	MLX_FILL_1 ( &qpctx, 42, qpc_eec_data.rcv_wqe_base_adr_l,
809
+		     ( virt_to_bus ( arbel_qp->recv.wqe ) >> 6 ) );
810
+	MLX_FILL_1 ( &qpctx, 43, qpc_eec_data.rcv_db_record_index,
811
+		     arbel_qp->recv.doorbell_idx );
812
+	MLX_FILL_1 ( &qpctx, 44, qpc_eec_data.q_key, qp->qkey );
741
 	if ( ( rc = arbel_cmd_rst2init_qpee ( arbel, qp->qpn, &qpctx )) != 0 ){
813
 	if ( ( rc = arbel_cmd_rst2init_qpee ( arbel, qp->qpn, &qpctx )) != 0 ){
742
 		DBGC ( arbel, "Arbel %p RST2INIT_QPEE failed: %s\n",
814
 		DBGC ( arbel, "Arbel %p RST2INIT_QPEE failed: %s\n",
743
 		       arbel, strerror ( rc ) );
815
 		       arbel, strerror ( rc ) );
744
 		goto err_rst2init_qpee;
816
 		goto err_rst2init_qpee;
745
 	}
817
 	}
818
+	memset ( &qpctx, 0, sizeof ( qpctx ) );
819
+	MLX_FILL_2 ( &qpctx, 4,
820
+		     qpc_eec_data.mtu, ARBEL_MTU_2048,
821
+		     qpc_eec_data.msg_max, 11 /* 2^11 = 2048 */ );
746
 	if ( ( rc = arbel_cmd_init2rtr_qpee ( arbel, qp->qpn, &qpctx )) != 0 ){
822
 	if ( ( rc = arbel_cmd_init2rtr_qpee ( arbel, qp->qpn, &qpctx )) != 0 ){
747
 		DBGC ( arbel, "Arbel %p INIT2RTR_QPEE failed: %s\n",
823
 		DBGC ( arbel, "Arbel %p INIT2RTR_QPEE failed: %s\n",
748
 		       arbel, strerror ( rc ) );
824
 		       arbel, strerror ( rc ) );
749
 		goto err_init2rtr_qpee;
825
 		goto err_init2rtr_qpee;
750
 	}
826
 	}
827
+	memset ( &qpctx, 0, sizeof ( qpctx ) );
751
 	if ( ( rc = arbel_cmd_rtr2rts_qpee ( arbel, qp->qpn, &qpctx ) ) != 0 ){
828
 	if ( ( rc = arbel_cmd_rtr2rts_qpee ( arbel, qp->qpn, &qpctx ) ) != 0 ){
752
 		DBGC ( arbel, "Arbel %p RTR2RTS_QPEE failed: %s\n",
829
 		DBGC ( arbel, "Arbel %p RTR2RTS_QPEE failed: %s\n",
753
 		       arbel, strerror ( rc ) );
830
 		       arbel, strerror ( rc ) );
1215
 		( 1 << MLX_GET ( &dev_lim, log2_rsvd_cqs ) );
1292
 		( 1 << MLX_GET ( &dev_lim, log2_rsvd_cqs ) );
1216
 	arbel->limits.reserved_qps =
1293
 	arbel->limits.reserved_qps =
1217
 		( 1 << MLX_GET ( &dev_lim, log2_rsvd_qps ) );
1294
 		( 1 << MLX_GET ( &dev_lim, log2_rsvd_qps ) );
1218
-	DBG ( "Device limits:\n ");
1219
-	DBG_HD ( &dev_lim, sizeof ( dev_lim ) );
1295
+
1296
+	DBG ( "MADS SND CQN = %#lx\n", dev_ib_data.mads_qp.snd_cq.cqn );
1297
+	struct ib_completion_queue *test_cq;
1298
+	test_cq = ib_create_cq ( &static_ibdev, 32 );
1299
+	if ( test_cq ) {
1300
+		DBG ( "Woot: create_cq() passed!\n" );
1301
+	}
1220
 
1302
 
1221
 	/* Register network device */
1303
 	/* Register network device */
1222
 	if ( ( rc = register_netdev ( netdev ) ) != 0 )
1304
 	if ( ( rc = register_netdev ( netdev ) ) != 0 )

+ 3
- 1
src/include/gpxe/infiniband.h View File

97
 struct ib_queue_pair {
97
 struct ib_queue_pair {
98
 	/** Queue Pair Number */
98
 	/** Queue Pair Number */
99
 	unsigned long qpn;
99
 	unsigned long qpn;
100
+	/** Queue key */
101
+	unsigned long qkey;
100
 	/** Send queue */
102
 	/** Send queue */
101
 	struct ib_work_queue send;
103
 	struct ib_work_queue send;
102
 	/** Receive queue */
104
 	/** Receive queue */
267
 extern struct ib_queue_pair *
269
 extern struct ib_queue_pair *
268
 ib_create_qp ( struct ib_device *ibdev, unsigned int num_send_wqes,
270
 ib_create_qp ( struct ib_device *ibdev, unsigned int num_send_wqes,
269
 	       struct ib_completion_queue *send_cq, unsigned int num_recv_wqes,
271
 	       struct ib_completion_queue *send_cq, unsigned int num_recv_wqes,
270
-	       struct ib_completion_queue *recv_cq );
272
+	       struct ib_completion_queue *recv_cq, unsigned long qkey );
271
 extern void ib_destroy_qp ( struct ib_device *ibdev,
273
 extern void ib_destroy_qp ( struct ib_device *ibdev,
272
 			    struct ib_queue_pair *qp );
274
 			    struct ib_queue_pair *qp );
273
 extern struct ib_work_queue * ib_find_wq ( struct ib_completion_queue *cq,
275
 extern struct ib_work_queue * ib_find_wq ( struct ib_completion_queue *cq,

+ 4
- 1
src/net/infiniband.c View File

92
  * @v send_cq		Send completion queue
92
  * @v send_cq		Send completion queue
93
  * @v num_recv_wqes	Number of receive work queue entries
93
  * @v num_recv_wqes	Number of receive work queue entries
94
  * @v recv_cq		Receive completion queue
94
  * @v recv_cq		Receive completion queue
95
+ * @v qkey		Queue key
95
  * @ret qp		Queue pair
96
  * @ret qp		Queue pair
96
  */
97
  */
97
 struct ib_queue_pair * ib_create_qp ( struct ib_device *ibdev,
98
 struct ib_queue_pair * ib_create_qp ( struct ib_device *ibdev,
98
 				      unsigned int num_send_wqes,
99
 				      unsigned int num_send_wqes,
99
 				      struct ib_completion_queue *send_cq,
100
 				      struct ib_completion_queue *send_cq,
100
 				      unsigned int num_recv_wqes,
101
 				      unsigned int num_recv_wqes,
101
-				      struct ib_completion_queue *recv_cq ) {
102
+				      struct ib_completion_queue *recv_cq,
103
+				      unsigned long qkey ) {
102
 	struct ib_queue_pair *qp;
104
 	struct ib_queue_pair *qp;
103
 	int rc;
105
 	int rc;
104
 
106
 
110
 		      ( num_recv_wqes * sizeof ( qp->recv.iobufs[0] ) ) );
112
 		      ( num_recv_wqes * sizeof ( qp->recv.iobufs[0] ) ) );
111
 	if ( ! qp )
113
 	if ( ! qp )
112
 		return NULL;
114
 		return NULL;
115
+	qp->qkey = qkey;
113
 	qp->send.qp = qp;
116
 	qp->send.qp = qp;
114
 	qp->send.is_send = 1;
117
 	qp->send.is_send = 1;
115
 	qp->send.cq = send_cq;
118
 	qp->send.cq = send_cq;

Loading…
Cancel
Save