瀏覽代碼

[Infiniband] Add multiport support for Arbel cards

tags/v0.9.4
Michael Brown 16 年之前
父節點
當前提交
c9fb012d4f
共有 3 個檔案被更改,包括 304 行新增18 行删除
  1. 237
    14
      src/drivers/infiniband/arbel.c
  2. 65
    1
      src/drivers/infiniband/arbel.h
  3. 2
    3
      src/drivers/infiniband/hermon.h

+ 237
- 14
src/drivers/infiniband/arbel.c 查看文件

@@ -275,20 +275,31 @@ arbel_cmd_sw2hw_mpt ( struct arbel *arbel, unsigned int index,
275 275
 			   0, mpt, index, NULL );
276 276
 }
277 277
 
278
+static inline int
279
+arbel_cmd_map_eq ( struct arbel *arbel, unsigned long index_map,
280
+		   const struct arbelprm_event_mask *mask ) {
281
+	return arbel_cmd ( arbel,
282
+			   ARBEL_HCR_IN_CMD ( ARBEL_HCR_MAP_EQ,
283
+					      0, sizeof ( *mask ) ),
284
+			   0, mask, index_map, NULL );
285
+}
286
+
278 287
 static inline int
279 288
 arbel_cmd_sw2hw_eq ( struct arbel *arbel, unsigned int index,
280
-		     const struct arbelprm_eqc *eqc ) {
289
+		     const struct arbelprm_eqc *eqctx ) {
281 290
 	return arbel_cmd ( arbel,
282 291
 			   ARBEL_HCR_IN_CMD ( ARBEL_HCR_SW2HW_EQ,
283
-					      1, sizeof ( *eqc ) ),
284
-			   0, eqc, index, NULL );
292
+					      1, sizeof ( *eqctx ) ),
293
+			   0, eqctx, index, NULL );
285 294
 }
286 295
 
287 296
 static inline int
288
-arbel_cmd_hw2sw_eq ( struct arbel *arbel, unsigned int index ) {
297
+arbel_cmd_hw2sw_eq ( struct arbel *arbel, unsigned int index,
298
+		     struct arbelprm_eqc *eqctx ) {
289 299
 	return arbel_cmd ( arbel,
290
-			   ARBEL_HCR_VOID_CMD ( ARBEL_HCR_HW2SW_EQ ),
291
-			   1, NULL, index, NULL );
300
+			   ARBEL_HCR_OUT_CMD ( ARBEL_HCR_HW2SW_EQ,
301
+					       1, sizeof ( *eqctx ) ),
302
+			   1, NULL, index, eqctx );
292 303
 }
293 304
 
294 305
 static inline int
@@ -336,6 +347,15 @@ arbel_cmd_rtr2rts_qpee ( struct arbel *arbel, unsigned long qpn,
336 347
 			   0, ctx, qpn, NULL );
337 348
 }
338 349
 
350
+static inline int
351
+arbel_cmd_rts2rts_qp ( struct arbel *arbel, unsigned long qpn,
352
+		       const struct arbelprm_qp_ee_state_transitions *ctx ) {
353
+	return arbel_cmd ( arbel,
354
+			   ARBEL_HCR_IN_CMD ( ARBEL_HCR_RTS2RTS_QPEE,
355
+					      1, sizeof ( *ctx ) ),
356
+			   0, ctx, qpn, NULL );
357
+}
358
+
339 359
 static inline int
340 360
 arbel_cmd_2rst_qpee ( struct arbel *arbel, unsigned long qpn ) {
341 361
 	return arbel_cmd ( arbel,
@@ -847,13 +867,25 @@ static int arbel_modify_qp ( struct ib_device *ibdev,
847 867
 			     struct ib_queue_pair *qp,
848 868
 			     unsigned long mod_list ) {
849 869
 	struct arbel *arbel = ib_get_drvdata ( ibdev );
870
+	struct arbelprm_qp_ee_state_transitions qpctx;
871
+	unsigned long optparammask = 0;
872
+	int rc;
850 873
 
851
-	/* TODO */
852
-	( void ) arbel;
853
-	( void ) qp;
854
-	( void ) mod_list;
874
+	/* Construct optparammask */
875
+	if ( mod_list & IB_MODIFY_QKEY )
876
+		optparammask |= ARBEL_QPEE_OPT_PARAM_QKEY;
855 877
 
856
-	return -ENOTSUP;
878
+	/* Issue RTS2RTS_QP */
879
+	memset ( &qpctx, 0, sizeof ( qpctx ) );
880
+	MLX_FILL_1 ( &qpctx, 0, opt_param_mask, optparammask );
881
+	MLX_FILL_1 ( &qpctx, 44, qpc_eec_data.q_key, qp->qkey );
882
+	if ( ( rc = arbel_cmd_rts2rts_qp ( arbel, qp->qpn, &qpctx ) ) != 0 ){
883
+		DBGC ( arbel, "Arbel %p RTS2RTS_QP failed: %s\n",
884
+		       arbel, strerror ( rc ) );
885
+		return rc;
886
+	}
887
+
888
+	return 0;
857 889
 }
858 890
 
859 891
 /**
@@ -1230,6 +1262,145 @@ static void arbel_poll_cq ( struct ib_device *ibdev,
1230 1262
  ***************************************************************************
1231 1263
  */
1232 1264
 
1265
+/**
1266
+ * Create event queue
1267
+ *
1268
+ * @v arbel		Arbel device
1269
+ * @ret rc		Return status code
1270
+ */
1271
+static int arbel_create_eq ( struct arbel *arbel ) {
1272
+	struct arbel_event_queue *arbel_eq = &arbel->eq;
1273
+	struct arbelprm_eqc eqctx;
1274
+	struct arbelprm_event_mask mask;
1275
+	unsigned int i;
1276
+	int rc;
1277
+
1278
+	/* Select event queue number */
1279
+	arbel_eq->eqn = arbel->limits.reserved_eqs;
1280
+
1281
+	/* Calculate doorbell address */
1282
+	arbel_eq->doorbell = ( arbel->eq_ci_doorbells +
1283
+			       ARBEL_DB_EQ_OFFSET ( arbel_eq->eqn ) );
1284
+
1285
+	/* Allocate event queue itself */
1286
+	arbel_eq->eqe_size =
1287
+		( ARBEL_NUM_EQES * sizeof ( arbel_eq->eqe[0] ) );
1288
+	arbel_eq->eqe = malloc_dma ( arbel_eq->eqe_size,
1289
+				     sizeof ( arbel_eq->eqe[0] ) );
1290
+	if ( ! arbel_eq->eqe ) {
1291
+		rc = -ENOMEM;
1292
+		goto err_eqe;
1293
+	}
1294
+	memset ( arbel_eq->eqe, 0, arbel_eq->eqe_size );
1295
+	for ( i = 0 ; i < ARBEL_NUM_EQES ; i++ ) {
1296
+		MLX_FILL_1 ( &arbel_eq->eqe[i].generic, 7, owner, 1 );
1297
+	}
1298
+	barrier();
1299
+
1300
+	/* Hand queue over to hardware */
1301
+	memset ( &eqctx, 0, sizeof ( eqctx ) );
1302
+	MLX_FILL_1 ( &eqctx, 0, st, 0xa /* "Fired" */ );
1303
+	MLX_FILL_1 ( &eqctx, 2,
1304
+		     start_address_l, virt_to_phys ( arbel_eq->eqe ) );
1305
+	MLX_FILL_1 ( &eqctx, 3, log_eq_size, fls ( ARBEL_NUM_EQES - 1 ) );
1306
+	MLX_FILL_1 ( &eqctx, 6, pd, ARBEL_GLOBAL_PD );
1307
+	MLX_FILL_1 ( &eqctx, 7, lkey, arbel->reserved_lkey );
1308
+	if ( ( rc = arbel_cmd_sw2hw_eq ( arbel, arbel_eq->eqn,
1309
+					 &eqctx ) ) != 0 ) {
1310
+		DBGC ( arbel, "Arbel %p SW2HW_EQ failed: %s\n",
1311
+		       arbel, strerror ( rc ) );
1312
+		goto err_sw2hw_eq;
1313
+	}
1314
+
1315
+	/* Map events to this event queue */
1316
+	memset ( &mask, 0, sizeof ( mask ) );
1317
+	MLX_FILL_1 ( &mask, 1, port_state_change, 1 );
1318
+	if ( ( rc = arbel_cmd_map_eq ( arbel,
1319
+				       ( ARBEL_MAP_EQ | arbel_eq->eqn ),
1320
+				       &mask ) ) != 0 ) {
1321
+		DBGC ( arbel, "Arbel %p MAP_EQ failed: %s\n",
1322
+		       arbel, strerror ( rc )  );
1323
+		goto err_map_eq;
1324
+	}
1325
+
1326
+	DBGC ( arbel, "Arbel %p EQN %#lx ring at [%p,%p])\n",
1327
+	       arbel, arbel_eq->eqn, arbel_eq->eqe,
1328
+	       ( ( ( void * ) arbel_eq->eqe ) + arbel_eq->eqe_size ) );
1329
+	return 0;
1330
+
1331
+ err_map_eq:
1332
+	arbel_cmd_hw2sw_eq ( arbel, arbel_eq->eqn, &eqctx );
1333
+ err_sw2hw_eq:
1334
+	free_dma ( arbel_eq->eqe, arbel_eq->eqe_size );
1335
+ err_eqe:
1336
+	memset ( arbel_eq, 0, sizeof ( *arbel_eq ) );
1337
+	return rc;
1338
+}
1339
+
1340
+/**
1341
+ * Destroy event queue
1342
+ *
1343
+ * @v arbel		Arbel device
1344
+ */
1345
+static void arbel_destroy_eq ( struct arbel *arbel ) {
1346
+	struct arbel_event_queue *arbel_eq = &arbel->eq;
1347
+	struct arbelprm_eqc eqctx;
1348
+	struct arbelprm_event_mask mask;
1349
+	int rc;
1350
+
1351
+	/* Unmap events from event queue */
1352
+	memset ( &mask, 0, sizeof ( mask ) );
1353
+	MLX_FILL_1 ( &mask, 1, port_state_change, 1 );
1354
+	if ( ( rc = arbel_cmd_map_eq ( arbel,
1355
+				       ( ARBEL_UNMAP_EQ | arbel_eq->eqn ),
1356
+				       &mask ) ) != 0 ) {
1357
+		DBGC ( arbel, "Arbel %p FATAL MAP_EQ failed to unmap: %s\n",
1358
+		       arbel, strerror ( rc ) );
1359
+		/* Continue; HCA may die but system should survive */
1360
+	}
1361
+
1362
+	/* Take ownership back from hardware */
1363
+	if ( ( rc = arbel_cmd_hw2sw_eq ( arbel, arbel_eq->eqn,
1364
+					 &eqctx ) ) != 0 ) {
1365
+		DBGC ( arbel, "Arbel %p FATAL HW2SW_EQ failed: %s\n",
1366
+		       arbel, strerror ( rc ) );
1367
+		/* Leak memory and return; at least we avoid corruption */
1368
+		return;
1369
+	}
1370
+
1371
+	/* Free memory */
1372
+	free_dma ( arbel_eq->eqe, arbel_eq->eqe_size );
1373
+	memset ( arbel_eq, 0, sizeof ( *arbel_eq ) );
1374
+}
1375
+
1376
+/**
1377
+ * Handle port state event
1378
+ *
1379
+ * @v arbel		Arbel device
1380
+ * @v eqe		Port state change event queue entry
1381
+ */
1382
+static void arbel_event_port_state_change ( struct arbel *arbel,
1383
+					    union arbelprm_event_entry *eqe){
1384
+	unsigned int port;
1385
+	int link_up;
1386
+
1387
+	/* Get port and link status */
1388
+	port = ( MLX_GET ( &eqe->port_state_change, data.p ) - 1 );
1389
+	link_up = ( MLX_GET ( &eqe->generic, event_sub_type ) & 0x04 );
1390
+	DBGC ( arbel, "Arbel %p port %d link %s\n", arbel, ( port + 1 ),
1391
+	       ( link_up ? "up" : "down" ) );
1392
+
1393
+	/* Sanity check */
1394
+	if ( port >= ARBEL_NUM_PORTS ) {
1395
+		DBGC ( arbel, "Arbel %p port %d does not exist!\n",
1396
+		       arbel, ( port + 1 ) );
1397
+		return;
1398
+	}
1399
+
1400
+	/* Notify Infiniband core of link state change */
1401
+	ib_link_state_changed ( arbel->ibdev[port] );
1402
+}
1403
+
1233 1404
 /**
1234 1405
  * Poll event queue
1235 1406
  *
@@ -1237,9 +1408,48 @@ static void arbel_poll_cq ( struct ib_device *ibdev,
1237 1408
  */
1238 1409
 static void arbel_poll_eq ( struct ib_device *ibdev ) {
1239 1410
 	struct arbel *arbel = ib_get_drvdata ( ibdev );
1411
+	struct arbel_event_queue *arbel_eq = &arbel->eq;
1412
+	union arbelprm_event_entry *eqe;
1413
+	unsigned int eqe_idx_mask;
1414
+	unsigned int event_type;
1240 1415
 
1241
-	/* TODO */
1242
-	( void ) arbel;
1416
+	while ( 1 ) {
1417
+		/* Look for event entry */
1418
+		eqe_idx_mask = ( ARBEL_NUM_EQES - 1 );
1419
+		eqe = &arbel_eq->eqe[arbel_eq->next_idx & eqe_idx_mask];
1420
+		if ( MLX_GET ( &eqe->generic, owner ) != 0 ) {
1421
+			/* Entry still owned by hardware; end of poll */
1422
+			break;
1423
+		}
1424
+		DBGCP ( arbel, "Arbel %p event:\n", arbel );
1425
+		DBGCP_HD ( arbel, eqe, sizeof ( *eqe ) );
1426
+
1427
+		/* Handle event */
1428
+		event_type = MLX_GET ( &eqe->generic, event_type );
1429
+		switch ( event_type ) {
1430
+		case ARBEL_EV_PORT_STATE_CHANGE:
1431
+			arbel_event_port_state_change ( arbel, eqe );
1432
+			break;
1433
+		default:
1434
+			DBGC ( arbel, "Arbel %p unrecognised event type "
1435
+			       "%#x:\n", arbel, event_type );
1436
+			DBGC_HD ( arbel, eqe, sizeof ( *eqe ) );
1437
+			break;
1438
+		}
1439
+
1440
+		/* Return ownership to hardware */
1441
+		MLX_FILL_1 ( &eqe->generic, 7, owner, 1 );
1442
+		barrier();
1443
+
1444
+		/* Update event queue's index */
1445
+		arbel_eq->next_idx++;
1446
+
1447
+		/* Ring doorbell */
1448
+		DBGCP ( arbel, "Ringing doorbell %08lx with %08lx\n",
1449
+			virt_to_phys ( arbel_eq->doorbell ),
1450
+			arbel_eq->next_idx );
1451
+		writel ( arbel_eq->next_idx, arbel_eq->doorbell );
1452
+	}
1243 1453
 }
1244 1454
 
1245 1455
 /***************************************************************************
@@ -1473,6 +1683,7 @@ static int arbel_start_firmware ( struct arbel *arbel ) {
1473 1683
 	unsigned int log2_fw_pages;
1474 1684
 	size_t fw_size;
1475 1685
 	physaddr_t fw_base;
1686
+	uint64_t eq_set_ci_base_addr;
1476 1687
 	int rc;
1477 1688
 
1478 1689
 	/* Get firmware parameters */
@@ -1489,6 +1700,10 @@ static int arbel_start_firmware ( struct arbel *arbel ) {
1489 1700
 	fw_pages = ( 1 << log2_fw_pages );
1490 1701
 	DBGC ( arbel, "Arbel %p requires %d kB for firmware\n",
1491 1702
 	       arbel, ( fw_pages * 4 ) );
1703
+	eq_set_ci_base_addr =
1704
+		( ( (uint64_t) MLX_GET ( &fw, eq_set_ci_base_addr_h ) << 32 ) |
1705
+		  ( (uint64_t) MLX_GET ( &fw, eq_set_ci_base_addr_l ) ) );
1706
+	arbel->eq_ci_doorbells = ioremap ( eq_set_ci_base_addr, 0x200 );
1492 1707
 
1493 1708
 	/* Enable locally-attached memory.  Ignore failure; there may
1494 1709
 	 * be no attached memory.
@@ -1591,6 +1806,7 @@ static int arbel_get_limits ( struct arbel *arbel ) {
1591 1806
 	arbel->limits.reserved_cqs =
1592 1807
 		( 1 << MLX_GET ( &dev_lim, log2_rsvd_cqs ) );
1593 1808
 	arbel->limits.cqc_entry_size = MLX_GET ( &dev_lim, cqc_entry_sz );
1809
+	arbel->limits.reserved_eqs = MLX_GET ( &dev_lim, num_rsvd_eqs );
1594 1810
 	arbel->limits.reserved_mtts =
1595 1811
 		( 1 << MLX_GET ( &dev_lim, log2_rsvd_mtts ) );
1596 1812
 	arbel->limits.mtt_entry_size = MLX_GET ( &dev_lim, mtt_entry_sz );
@@ -1721,7 +1937,7 @@ static int arbel_alloc_icm ( struct arbel *arbel,
1721 1937
 	icm_offset += icm_usage ( log_num_rdbs, 32 );
1722 1938
 
1723 1939
 	/* Event queue contexts */
1724
-	log_num_eqs = 6;
1940
+	log_num_eqs =  fls ( arbel->limits.reserved_eqs + ARBEL_MAX_EQS - 1 );
1725 1941
 	MLX_FILL_2 ( init_hca, 33,
1726 1942
 		     qpc_eec_cqc_eqc_rdb_parameters.eqc_base_addr_l,
1727 1943
 		     ( icm_offset >> 6 ),
@@ -1950,6 +2166,10 @@ static int arbel_probe ( struct pci_device *pci,
1950 2166
 	if ( ( rc = arbel_setup_mpt ( arbel ) ) != 0 )
1951 2167
 		goto err_setup_mpt;
1952 2168
 
2169
+	/* Set up event queue */
2170
+	if ( ( rc = arbel_create_eq ( arbel ) ) != 0 )
2171
+		goto err_create_eq;
2172
+
1953 2173
 	/* Register Infiniband devices */
1954 2174
 	for ( i = 0 ; i < ARBEL_NUM_PORTS ; i++ ) {
1955 2175
 		if ( ( rc = register_ibdev ( arbel->ibdev[i] ) ) != 0 ) {
@@ -1965,6 +2185,8 @@ static int arbel_probe ( struct pci_device *pci,
1965 2185
  err_register_ibdev:
1966 2186
 	for ( ; i >= 0 ; i-- )
1967 2187
 		unregister_ibdev ( arbel->ibdev[i] );
2188
+	arbel_destroy_eq ( arbel );
2189
+ err_create_eq:
1968 2190
  err_setup_mpt:
1969 2191
 	arbel_cmd_close_hca ( arbel );
1970 2192
  err_init_hca:
@@ -1997,6 +2219,7 @@ static void arbel_remove ( struct pci_device *pci ) {
1997 2219
 
1998 2220
 	for ( i = ( ARBEL_NUM_PORTS - 1 ) ; i >= 0 ; i-- )
1999 2221
 		unregister_ibdev ( arbel->ibdev[i] );
2222
+	arbel_destroy_eq ( arbel );
2000 2223
 	arbel_cmd_close_hca ( arbel );
2001 2224
 	arbel_free_icm ( arbel );
2002 2225
 	arbel_stop_firmware ( arbel );

+ 65
- 1
src/drivers/infiniband/arbel.h 查看文件

@@ -18,7 +18,7 @@
18 18
  */
19 19
 
20 20
 /* Ports in existence */
21
-#define ARBEL_NUM_PORTS			1
21
+#define ARBEL_NUM_PORTS			2
22 22
 #define ARBEL_PORT_BASE			1
23 23
 
24 24
 /* PCI BARs */
@@ -57,6 +57,7 @@
57 57
 #define ARBEL_HCR_RST2INIT_QPEE		0x0019
58 58
 #define ARBEL_HCR_INIT2RTR_QPEE		0x001a
59 59
 #define ARBEL_HCR_RTR2RTS_QPEE		0x001b
60
+#define ARBEL_HCR_RTS2RTS_QPEE		0x001c
60 61
 #define ARBEL_HCR_2RST_QPEE		0x0021
61 62
 #define ARBEL_HCR_MAD_IFC		0x0024
62 63
 #define ARBEL_HCR_READ_MGM		0x0025
@@ -86,6 +87,14 @@
86 87
 #define ARBEL_PAGE_SIZE			4096
87 88
 
88 89
 #define ARBEL_DB_POST_SND_OFFSET	0x10
90
+#define ARBEL_DB_EQ_OFFSET(_eqn)	( 0x08 * (_eqn) )
91
+
92
+#define ARBEL_QPEE_OPT_PARAM_QKEY	0x00000020UL
93
+
94
+#define ARBEL_MAP_EQ			( 0UL << 31 )
95
+#define ARBEL_UNMAP_EQ			( 1UL << 31 )
96
+
97
+#define ARBEL_EV_PORT_STATE_CHANGE	0x09
89 98
 
90 99
 /*
91 100
  * Datatypes that seem to be missing from the autogenerated documentation
@@ -104,6 +113,20 @@ struct arbelprm_scalar_parameter_st {
104 113
 	pseudo_bit_t value[0x00020];
105 114
 } __attribute__ (( packed ));
106 115
 
116
+struct arbelprm_event_mask_st {
117
+	pseudo_bit_t reserved0[0x00020];
118
+/* -------------- */
119
+	pseudo_bit_t completion[0x00001];
120
+	pseudo_bit_t reserved1[0x0008];
121
+	pseudo_bit_t port_state_change[0x00001];
122
+	pseudo_bit_t reserved2[0x00016];
123
+} __attribute__ (( packed ));
124
+
125
+struct arbelprm_port_state_change_event_st {
126
+	pseudo_bit_t reserved[0x00020];
127
+	struct arbelprm_port_state_change_st data;
128
+} __attribute__ (( packed ));
129
+
107 130
 /*
108 131
  * Wrapper structures for hardware datatypes
109 132
  *
@@ -115,6 +138,8 @@ struct MLX_DECLARE_STRUCT ( arbelprm_completion_queue_entry );
115 138
 struct MLX_DECLARE_STRUCT ( arbelprm_completion_with_error );
116 139
 struct MLX_DECLARE_STRUCT ( arbelprm_cq_arm_db_record );
117 140
 struct MLX_DECLARE_STRUCT ( arbelprm_cq_ci_db_record );
141
+struct MLX_DECLARE_STRUCT ( arbelprm_event_mask );
142
+struct MLX_DECLARE_STRUCT ( arbelprm_event_queue_entry );
118 143
 struct MLX_DECLARE_STRUCT ( arbelprm_eqc );
119 144
 struct MLX_DECLARE_STRUCT ( arbelprm_hca_command_register );
120 145
 struct MLX_DECLARE_STRUCT ( arbelprm_init_hca );
@@ -123,6 +148,7 @@ struct MLX_DECLARE_STRUCT ( arbelprm_mad_ifc );
123 148
 struct MLX_DECLARE_STRUCT ( arbelprm_mgm_entry );
124 149
 struct MLX_DECLARE_STRUCT ( arbelprm_mgm_hash );
125 150
 struct MLX_DECLARE_STRUCT ( arbelprm_mpt );
151
+struct MLX_DECLARE_STRUCT ( arbelprm_port_state_change_event );
126 152
 struct MLX_DECLARE_STRUCT ( arbelprm_qp_db_record );
127 153
 struct MLX_DECLARE_STRUCT ( arbelprm_qp_ee_state_transitions );
128 154
 struct MLX_DECLARE_STRUCT ( arbelprm_query_dev_lim );
@@ -172,6 +198,11 @@ union arbelprm_completion_entry {
172 198
 	struct arbelprm_completion_with_error error;
173 199
 } __attribute__ (( packed ));
174 200
 
201
+union arbelprm_event_entry {
202
+	struct arbelprm_event_queue_entry generic;
203
+	struct arbelprm_port_state_change_event port_state_change;
204
+} __attribute__ (( packed ));
205
+
175 206
 union arbelprm_doorbell_record {
176 207
 	struct arbelprm_cq_arm_db_record cq_arm;
177 208
 	struct arbelprm_cq_ci_db_record cq_ci;
@@ -215,6 +246,8 @@ struct arbel_dev_limits {
215 246
 	unsigned int reserved_cqs;
216 247
 	/** CQ context entry size */
217 248
 	size_t cqc_entry_size;
249
+	/** Number of reserved EQs */
250
+	unsigned int reserved_eqs;
218 251
 	/** Number of reserved MTTs */
219 252
 	unsigned int reserved_mtts;
220 253
 	/** MTT entry size */
@@ -304,6 +337,33 @@ struct arbel_completion_queue {
304 337
 	size_t cqe_size;
305 338
 };
306 339
 
340
+/** Maximum number of allocatable event queues
341
+ *
342
+ * This is a policy decision, not a device limit.
343
+ */
344
+#define ARBEL_MAX_EQS		64
345
+
346
+/** A Arbel event queue */
347
+struct arbel_event_queue {
348
+	/** Event queue entries */
349
+	union arbelprm_event_entry *eqe;
350
+	/** Size of event queue */
351
+	size_t eqe_size;
352
+	/** Event queue number */
353
+	unsigned long eqn;
354
+	/** Next event queue entry index */
355
+	unsigned long next_idx;
356
+	/** Doorbell register */
357
+	void *doorbell;
358
+};
359
+
360
+/** Number of event queue entries
361
+ *
362
+ * This is a policy decision.
363
+ */
364
+#define ARBEL_NUM_EQES		4
365
+
366
+
307 367
 /** An Arbel resource bitmask */
308 368
 typedef uint32_t arbel_bitmask_t;
309 369
 
@@ -318,6 +378,8 @@ struct arbel {
318 378
 	void *config;
319 379
 	/** PCI user Access Region */
320 380
 	void *uar;
381
+	/** Event queue consumer index doorbells */
382
+	void *eq_ci_doorbells;
321 383
 
322 384
 	/** Command input mailbox */
323 385
 	void *mailbox_in;
@@ -333,6 +395,8 @@ struct arbel {
333 395
 	/** ICM area */
334 396
 	userptr_t icm;
335 397
 
398
+	/** Event queue */
399
+	struct arbel_event_queue eq;
336 400
 	/** Doorbell records */
337 401
 	union arbelprm_doorbell_record *db_rec;
338 402
 	/** Reserved LKey

+ 2
- 3
src/drivers/infiniband/hermon.h 查看文件

@@ -456,15 +456,14 @@ struct hermon {
456 456
 	/** ICM area */
457 457
 	userptr_t icm;
458 458
 
459
+	/** Event queue */
460
+	struct hermon_event_queue eq;
459 461
 	/** Reserved LKey
460 462
 	 *
461 463
 	 * Used to get unrestricted memory access.
462 464
 	 */
463 465
 	unsigned long reserved_lkey;
464 466
 
465
-	/** Event queue */
466
-	struct hermon_event_queue eq;
467
-
468 467
 	/** Completion queue in-use bitmask */
469 468
 	hermon_bitmask_t cq_inuse[ HERMON_BITMASK_SIZE ( HERMON_MAX_CQS ) ];
470 469
 	/** Queue pair in-use bitmask */

Loading…
取消
儲存