|  | @@ -275,20 +275,31 @@ arbel_cmd_sw2hw_mpt ( struct arbel *arbel, unsigned int index,
 | 
		
	
		
			
			| 275 | 275 |  			   0, mpt, index, NULL );
 | 
		
	
		
			
			| 276 | 276 |  }
 | 
		
	
		
			
			| 277 | 277 |  
 | 
		
	
		
			
			|  | 278 | +static inline int
 | 
		
	
		
			
			|  | 279 | +arbel_cmd_map_eq ( struct arbel *arbel, unsigned long index_map,
 | 
		
	
		
			
			|  | 280 | +		   const struct arbelprm_event_mask *mask ) {
 | 
		
	
		
			
			|  | 281 | +	return arbel_cmd ( arbel,
 | 
		
	
		
			
			|  | 282 | +			   ARBEL_HCR_IN_CMD ( ARBEL_HCR_MAP_EQ,
 | 
		
	
		
			
			|  | 283 | +					      0, sizeof ( *mask ) ),
 | 
		
	
		
			
			|  | 284 | +			   0, mask, index_map, NULL );
 | 
		
	
		
			
			|  | 285 | +}
 | 
		
	
		
			
			|  | 286 | +
 | 
		
	
		
			
			| 278 | 287 |  static inline int
 | 
		
	
		
			
			| 279 | 288 |  arbel_cmd_sw2hw_eq ( struct arbel *arbel, unsigned int index,
 | 
		
	
		
			
			| 280 |  | -		     const struct arbelprm_eqc *eqc ) {
 | 
		
	
		
			
			|  | 289 | +		     const struct arbelprm_eqc *eqctx ) {
 | 
		
	
		
			
			| 281 | 290 |  	return arbel_cmd ( arbel,
 | 
		
	
		
			
			| 282 | 291 |  			   ARBEL_HCR_IN_CMD ( ARBEL_HCR_SW2HW_EQ,
 | 
		
	
		
			
			| 283 |  | -					      1, sizeof ( *eqc ) ),
 | 
		
	
		
			
			| 284 |  | -			   0, eqc, index, NULL );
 | 
		
	
		
			
			|  | 292 | +					      1, sizeof ( *eqctx ) ),
 | 
		
	
		
			
			|  | 293 | +			   0, eqctx, index, NULL );
 | 
		
	
		
			
			| 285 | 294 |  }
 | 
		
	
		
			
			| 286 | 295 |  
 | 
		
	
		
			
			| 287 | 296 |  static inline int
 | 
		
	
		
			
			| 288 |  | -arbel_cmd_hw2sw_eq ( struct arbel *arbel, unsigned int index ) {
 | 
		
	
		
			
			|  | 297 | +arbel_cmd_hw2sw_eq ( struct arbel *arbel, unsigned int index,
 | 
		
	
		
			
			|  | 298 | +		     struct arbelprm_eqc *eqctx ) {
 | 
		
	
		
			
			| 289 | 299 |  	return arbel_cmd ( arbel,
 | 
		
	
		
			
			| 290 |  | -			   ARBEL_HCR_VOID_CMD ( ARBEL_HCR_HW2SW_EQ ),
 | 
		
	
		
			
			| 291 |  | -			   1, NULL, index, NULL );
 | 
		
	
		
			
			|  | 300 | +			   ARBEL_HCR_OUT_CMD ( ARBEL_HCR_HW2SW_EQ,
 | 
		
	
		
			
			|  | 301 | +					       1, sizeof ( *eqctx ) ),
 | 
		
	
		
			
			|  | 302 | +			   1, NULL, index, eqctx );
 | 
		
	
		
			
			| 292 | 303 |  }
 | 
		
	
		
			
			| 293 | 304 |  
 | 
		
	
		
			
			| 294 | 305 |  static inline int
 | 
		
	
	
		
			
			|  | @@ -336,6 +347,15 @@ arbel_cmd_rtr2rts_qpee ( struct arbel *arbel, unsigned long qpn,
 | 
		
	
		
			
			| 336 | 347 |  			   0, ctx, qpn, NULL );
 | 
		
	
		
			
			| 337 | 348 |  }
 | 
		
	
		
			
			| 338 | 349 |  
 | 
		
	
		
			
			|  | 350 | +static inline int
 | 
		
	
		
			
			|  | 351 | +arbel_cmd_rts2rts_qp ( struct arbel *arbel, unsigned long qpn,
 | 
		
	
		
			
			|  | 352 | +		       const struct arbelprm_qp_ee_state_transitions *ctx ) {
 | 
		
	
		
			
			|  | 353 | +	return arbel_cmd ( arbel,
 | 
		
	
		
			
			|  | 354 | +			   ARBEL_HCR_IN_CMD ( ARBEL_HCR_RTS2RTS_QPEE,
 | 
		
	
		
			
			|  | 355 | +					      1, sizeof ( *ctx ) ),
 | 
		
	
		
			
			|  | 356 | +			   0, ctx, qpn, NULL );
 | 
		
	
		
			
			|  | 357 | +}
 | 
		
	
		
			
			|  | 358 | +
 | 
		
	
		
			
			| 339 | 359 |  static inline int
 | 
		
	
		
			
			| 340 | 360 |  arbel_cmd_2rst_qpee ( struct arbel *arbel, unsigned long qpn ) {
 | 
		
	
		
			
			| 341 | 361 |  	return arbel_cmd ( arbel,
 | 
		
	
	
		
			
			|  | @@ -847,13 +867,25 @@ static int arbel_modify_qp ( struct ib_device *ibdev,
 | 
		
	
		
			
			| 847 | 867 |  			     struct ib_queue_pair *qp,
 | 
		
	
		
			
			| 848 | 868 |  			     unsigned long mod_list ) {
 | 
		
	
		
			
			| 849 | 869 |  	struct arbel *arbel = ib_get_drvdata ( ibdev );
 | 
		
	
		
			
			|  | 870 | +	struct arbelprm_qp_ee_state_transitions qpctx;
 | 
		
	
		
			
			|  | 871 | +	unsigned long optparammask = 0;
 | 
		
	
		
			
			|  | 872 | +	int rc;
 | 
		
	
		
			
			| 850 | 873 |  
 | 
		
	
		
			
			| 851 |  | -	/* TODO */
 | 
		
	
		
			
			| 852 |  | -	( void ) arbel;
 | 
		
	
		
			
			| 853 |  | -	( void ) qp;
 | 
		
	
		
			
			| 854 |  | -	( void ) mod_list;
 | 
		
	
		
			
			|  | 874 | +	/* Construct optparammask */
 | 
		
	
		
			
			|  | 875 | +	if ( mod_list & IB_MODIFY_QKEY )
 | 
		
	
		
			
			|  | 876 | +		optparammask |= ARBEL_QPEE_OPT_PARAM_QKEY;
 | 
		
	
		
			
			| 855 | 877 |  
 | 
		
	
		
			
			| 856 |  | -	return -ENOTSUP;
 | 
		
	
		
			
			|  | 878 | +	/* Issue RTS2RTS_QP */
 | 
		
	
		
			
			|  | 879 | +	memset ( &qpctx, 0, sizeof ( qpctx ) );
 | 
		
	
		
			
			|  | 880 | +	MLX_FILL_1 ( &qpctx, 0, opt_param_mask, optparammask );
 | 
		
	
		
			
			|  | 881 | +	MLX_FILL_1 ( &qpctx, 44, qpc_eec_data.q_key, qp->qkey );
 | 
		
	
		
			
			|  | 882 | +	if ( ( rc = arbel_cmd_rts2rts_qp ( arbel, qp->qpn, &qpctx ) ) != 0 ){
 | 
		
	
		
			
			|  | 883 | +		DBGC ( arbel, "Arbel %p RTS2RTS_QP failed: %s\n",
 | 
		
	
		
			
			|  | 884 | +		       arbel, strerror ( rc ) );
 | 
		
	
		
			
			|  | 885 | +		return rc;
 | 
		
	
		
			
			|  | 886 | +	}
 | 
		
	
		
			
			|  | 887 | +
 | 
		
	
		
			
			|  | 888 | +	return 0;
 | 
		
	
		
			
			| 857 | 889 |  }
 | 
		
	
		
			
			| 858 | 890 |  
 | 
		
	
		
			
			| 859 | 891 |  /**
 | 
		
	
	
		
			
			|  | @@ -1230,6 +1262,145 @@ static void arbel_poll_cq ( struct ib_device *ibdev,
 | 
		
	
		
			
			| 1230 | 1262 |   ***************************************************************************
 | 
		
	
		
			
			| 1231 | 1263 |   */
 | 
		
	
		
			
			| 1232 | 1264 |  
 | 
		
	
		
			
			|  | 1265 | +/**
 | 
		
	
		
			
			|  | 1266 | + * Create event queue
 | 
		
	
		
			
			|  | 1267 | + *
 | 
		
	
		
			
			|  | 1268 | + * @v arbel		Arbel device
 | 
		
	
		
			
			|  | 1269 | + * @ret rc		Return status code
 | 
		
	
		
			
			|  | 1270 | + */
 | 
		
	
		
			
			|  | 1271 | +static int arbel_create_eq ( struct arbel *arbel ) {
 | 
		
	
		
			
			|  | 1272 | +	struct arbel_event_queue *arbel_eq = &arbel->eq;
 | 
		
	
		
			
			|  | 1273 | +	struct arbelprm_eqc eqctx;
 | 
		
	
		
			
			|  | 1274 | +	struct arbelprm_event_mask mask;
 | 
		
	
		
			
			|  | 1275 | +	unsigned int i;
 | 
		
	
		
			
			|  | 1276 | +	int rc;
 | 
		
	
		
			
			|  | 1277 | +
 | 
		
	
		
			
			|  | 1278 | +	/* Select event queue number */
 | 
		
	
		
			
			|  | 1279 | +	arbel_eq->eqn = arbel->limits.reserved_eqs;
 | 
		
	
		
			
			|  | 1280 | +
 | 
		
	
		
			
			|  | 1281 | +	/* Calculate doorbell address */
 | 
		
	
		
			
			|  | 1282 | +	arbel_eq->doorbell = ( arbel->eq_ci_doorbells +
 | 
		
	
		
			
			|  | 1283 | +			       ARBEL_DB_EQ_OFFSET ( arbel_eq->eqn ) );
 | 
		
	
		
			
			|  | 1284 | +
 | 
		
	
		
			
			|  | 1285 | +	/* Allocate event queue itself */
 | 
		
	
		
			
			|  | 1286 | +	arbel_eq->eqe_size =
 | 
		
	
		
			
			|  | 1287 | +		( ARBEL_NUM_EQES * sizeof ( arbel_eq->eqe[0] ) );
 | 
		
	
		
			
			|  | 1288 | +	arbel_eq->eqe = malloc_dma ( arbel_eq->eqe_size,
 | 
		
	
		
			
			|  | 1289 | +				     sizeof ( arbel_eq->eqe[0] ) );
 | 
		
	
		
			
			|  | 1290 | +	if ( ! arbel_eq->eqe ) {
 | 
		
	
		
			
			|  | 1291 | +		rc = -ENOMEM;
 | 
		
	
		
			
			|  | 1292 | +		goto err_eqe;
 | 
		
	
		
			
			|  | 1293 | +	}
 | 
		
	
		
			
			|  | 1294 | +	memset ( arbel_eq->eqe, 0, arbel_eq->eqe_size );
 | 
		
	
		
			
			|  | 1295 | +	for ( i = 0 ; i < ARBEL_NUM_EQES ; i++ ) {
 | 
		
	
		
			
			|  | 1296 | +		MLX_FILL_1 ( &arbel_eq->eqe[i].generic, 7, owner, 1 );
 | 
		
	
		
			
			|  | 1297 | +	}
 | 
		
	
		
			
			|  | 1298 | +	barrier();
 | 
		
	
		
			
			|  | 1299 | +
 | 
		
	
		
			
			|  | 1300 | +	/* Hand queue over to hardware */
 | 
		
	
		
			
			|  | 1301 | +	memset ( &eqctx, 0, sizeof ( eqctx ) );
 | 
		
	
		
			
			|  | 1302 | +	MLX_FILL_1 ( &eqctx, 0, st, 0xa /* "Fired" */ );
 | 
		
	
		
			
			|  | 1303 | +	MLX_FILL_1 ( &eqctx, 2,
 | 
		
	
		
			
			|  | 1304 | +		     start_address_l, virt_to_phys ( arbel_eq->eqe ) );
 | 
		
	
		
			
			|  | 1305 | +	MLX_FILL_1 ( &eqctx, 3, log_eq_size, fls ( ARBEL_NUM_EQES - 1 ) );
 | 
		
	
		
			
			|  | 1306 | +	MLX_FILL_1 ( &eqctx, 6, pd, ARBEL_GLOBAL_PD );
 | 
		
	
		
			
			|  | 1307 | +	MLX_FILL_1 ( &eqctx, 7, lkey, arbel->reserved_lkey );
 | 
		
	
		
			
			|  | 1308 | +	if ( ( rc = arbel_cmd_sw2hw_eq ( arbel, arbel_eq->eqn,
 | 
		
	
		
			
			|  | 1309 | +					 &eqctx ) ) != 0 ) {
 | 
		
	
		
			
			|  | 1310 | +		DBGC ( arbel, "Arbel %p SW2HW_EQ failed: %s\n",
 | 
		
	
		
			
			|  | 1311 | +		       arbel, strerror ( rc ) );
 | 
		
	
		
			
			|  | 1312 | +		goto err_sw2hw_eq;
 | 
		
	
		
			
			|  | 1313 | +	}
 | 
		
	
		
			
			|  | 1314 | +
 | 
		
	
		
			
			|  | 1315 | +	/* Map events to this event queue */
 | 
		
	
		
			
			|  | 1316 | +	memset ( &mask, 0, sizeof ( mask ) );
 | 
		
	
		
			
			|  | 1317 | +	MLX_FILL_1 ( &mask, 1, port_state_change, 1 );
 | 
		
	
		
			
			|  | 1318 | +	if ( ( rc = arbel_cmd_map_eq ( arbel,
 | 
		
	
		
			
			|  | 1319 | +				       ( ARBEL_MAP_EQ | arbel_eq->eqn ),
 | 
		
	
		
			
			|  | 1320 | +				       &mask ) ) != 0 ) {
 | 
		
	
		
			
			|  | 1321 | +		DBGC ( arbel, "Arbel %p MAP_EQ failed: %s\n",
 | 
		
	
		
			
			|  | 1322 | +		       arbel, strerror ( rc )  );
 | 
		
	
		
			
			|  | 1323 | +		goto err_map_eq;
 | 
		
	
		
			
			|  | 1324 | +	}
 | 
		
	
		
			
			|  | 1325 | +
 | 
		
	
		
			
			|  | 1326 | +	DBGC ( arbel, "Arbel %p EQN %#lx ring at [%p,%p])\n",
 | 
		
	
		
			
			|  | 1327 | +	       arbel, arbel_eq->eqn, arbel_eq->eqe,
 | 
		
	
		
			
			|  | 1328 | +	       ( ( ( void * ) arbel_eq->eqe ) + arbel_eq->eqe_size ) );
 | 
		
	
		
			
			|  | 1329 | +	return 0;
 | 
		
	
		
			
			|  | 1330 | +
 | 
		
	
		
			
			|  | 1331 | + err_map_eq:
 | 
		
	
		
			
			|  | 1332 | +	arbel_cmd_hw2sw_eq ( arbel, arbel_eq->eqn, &eqctx );
 | 
		
	
		
			
			|  | 1333 | + err_sw2hw_eq:
 | 
		
	
		
			
			|  | 1334 | +	free_dma ( arbel_eq->eqe, arbel_eq->eqe_size );
 | 
		
	
		
			
			|  | 1335 | + err_eqe:
 | 
		
	
		
			
			|  | 1336 | +	memset ( arbel_eq, 0, sizeof ( *arbel_eq ) );
 | 
		
	
		
			
			|  | 1337 | +	return rc;
 | 
		
	
		
			
			|  | 1338 | +}
 | 
		
	
		
			
			|  | 1339 | +
 | 
		
	
		
			
			|  | 1340 | +/**
 | 
		
	
		
			
			|  | 1341 | + * Destroy event queue
 | 
		
	
		
			
			|  | 1342 | + *
 | 
		
	
		
			
			|  | 1343 | + * @v arbel		Arbel device
 | 
		
	
		
			
			|  | 1344 | + */
 | 
		
	
		
			
			|  | 1345 | +static void arbel_destroy_eq ( struct arbel *arbel ) {
 | 
		
	
		
			
			|  | 1346 | +	struct arbel_event_queue *arbel_eq = &arbel->eq;
 | 
		
	
		
			
			|  | 1347 | +	struct arbelprm_eqc eqctx;
 | 
		
	
		
			
			|  | 1348 | +	struct arbelprm_event_mask mask;
 | 
		
	
		
			
			|  | 1349 | +	int rc;
 | 
		
	
		
			
			|  | 1350 | +
 | 
		
	
		
			
			|  | 1351 | +	/* Unmap events from event queue */
 | 
		
	
		
			
			|  | 1352 | +	memset ( &mask, 0, sizeof ( mask ) );
 | 
		
	
		
			
			|  | 1353 | +	MLX_FILL_1 ( &mask, 1, port_state_change, 1 );
 | 
		
	
		
			
			|  | 1354 | +	if ( ( rc = arbel_cmd_map_eq ( arbel,
 | 
		
	
		
			
			|  | 1355 | +				       ( ARBEL_UNMAP_EQ | arbel_eq->eqn ),
 | 
		
	
		
			
			|  | 1356 | +				       &mask ) ) != 0 ) {
 | 
		
	
		
			
			|  | 1357 | +		DBGC ( arbel, "Arbel %p FATAL MAP_EQ failed to unmap: %s\n",
 | 
		
	
		
			
			|  | 1358 | +		       arbel, strerror ( rc ) );
 | 
		
	
		
			
			|  | 1359 | +		/* Continue; HCA may die but system should survive */
 | 
		
	
		
			
			|  | 1360 | +	}
 | 
		
	
		
			
			|  | 1361 | +
 | 
		
	
		
			
			|  | 1362 | +	/* Take ownership back from hardware */
 | 
		
	
		
			
			|  | 1363 | +	if ( ( rc = arbel_cmd_hw2sw_eq ( arbel, arbel_eq->eqn,
 | 
		
	
		
			
			|  | 1364 | +					 &eqctx ) ) != 0 ) {
 | 
		
	
		
			
			|  | 1365 | +		DBGC ( arbel, "Arbel %p FATAL HW2SW_EQ failed: %s\n",
 | 
		
	
		
			
			|  | 1366 | +		       arbel, strerror ( rc ) );
 | 
		
	
		
			
			|  | 1367 | +		/* Leak memory and return; at least we avoid corruption */
 | 
		
	
		
			
			|  | 1368 | +		return;
 | 
		
	
		
			
			|  | 1369 | +	}
 | 
		
	
		
			
			|  | 1370 | +
 | 
		
	
		
			
			|  | 1371 | +	/* Free memory */
 | 
		
	
		
			
			|  | 1372 | +	free_dma ( arbel_eq->eqe, arbel_eq->eqe_size );
 | 
		
	
		
			
			|  | 1373 | +	memset ( arbel_eq, 0, sizeof ( *arbel_eq ) );
 | 
		
	
		
			
			|  | 1374 | +}
 | 
		
	
		
			
			|  | 1375 | +
 | 
		
	
		
			
			|  | 1376 | +/**
 | 
		
	
		
			
			|  | 1377 | + * Handle port state event
 | 
		
	
		
			
			|  | 1378 | + *
 | 
		
	
		
			
			|  | 1379 | + * @v arbel		Arbel device
 | 
		
	
		
			
			|  | 1380 | + * @v eqe		Port state change event queue entry
 | 
		
	
		
			
			|  | 1381 | + */
 | 
		
	
		
			
			|  | 1382 | +static void arbel_event_port_state_change ( struct arbel *arbel,
 | 
		
	
		
			
			|  | 1383 | +					    union arbelprm_event_entry *eqe){
 | 
		
	
		
			
			|  | 1384 | +	unsigned int port;
 | 
		
	
		
			
			|  | 1385 | +	int link_up;
 | 
		
	
		
			
			|  | 1386 | +
 | 
		
	
		
			
			|  | 1387 | +	/* Get port and link status */
 | 
		
	
		
			
			|  | 1388 | +	port = ( MLX_GET ( &eqe->port_state_change, data.p ) - 1 );
 | 
		
	
		
			
			|  | 1389 | +	link_up = ( MLX_GET ( &eqe->generic, event_sub_type ) & 0x04 );
 | 
		
	
		
			
			|  | 1390 | +	DBGC ( arbel, "Arbel %p port %d link %s\n", arbel, ( port + 1 ),
 | 
		
	
		
			
			|  | 1391 | +	       ( link_up ? "up" : "down" ) );
 | 
		
	
		
			
			|  | 1392 | +
 | 
		
	
		
			
			|  | 1393 | +	/* Sanity check */
 | 
		
	
		
			
			|  | 1394 | +	if ( port >= ARBEL_NUM_PORTS ) {
 | 
		
	
		
			
			|  | 1395 | +		DBGC ( arbel, "Arbel %p port %d does not exist!\n",
 | 
		
	
		
			
			|  | 1396 | +		       arbel, ( port + 1 ) );
 | 
		
	
		
			
			|  | 1397 | +		return;
 | 
		
	
		
			
			|  | 1398 | +	}
 | 
		
	
		
			
			|  | 1399 | +
 | 
		
	
		
			
			|  | 1400 | +	/* Notify Infiniband core of link state change */
 | 
		
	
		
			
			|  | 1401 | +	ib_link_state_changed ( arbel->ibdev[port] );
 | 
		
	
		
			
			|  | 1402 | +}
 | 
		
	
		
			
			|  | 1403 | +
 | 
		
	
		
			
			| 1233 | 1404 |  /**
 | 
		
	
		
			
			| 1234 | 1405 |   * Poll event queue
 | 
		
	
		
			
			| 1235 | 1406 |   *
 | 
		
	
	
		
			
			|  | @@ -1237,9 +1408,48 @@ static void arbel_poll_cq ( struct ib_device *ibdev,
 | 
		
	
		
			
			| 1237 | 1408 |   */
 | 
		
	
		
			
			| 1238 | 1409 |  static void arbel_poll_eq ( struct ib_device *ibdev ) {
 | 
		
	
		
			
			| 1239 | 1410 |  	struct arbel *arbel = ib_get_drvdata ( ibdev );
 | 
		
	
		
			
			|  | 1411 | +	struct arbel_event_queue *arbel_eq = &arbel->eq;
 | 
		
	
		
			
			|  | 1412 | +	union arbelprm_event_entry *eqe;
 | 
		
	
		
			
			|  | 1413 | +	unsigned int eqe_idx_mask;
 | 
		
	
		
			
			|  | 1414 | +	unsigned int event_type;
 | 
		
	
		
			
			| 1240 | 1415 |  
 | 
		
	
		
			
			| 1241 |  | -	/* TODO */
 | 
		
	
		
			
			| 1242 |  | -	( void ) arbel;
 | 
		
	
		
			
			|  | 1416 | +	while ( 1 ) {
 | 
		
	
		
			
			|  | 1417 | +		/* Look for event entry */
 | 
		
	
		
			
			|  | 1418 | +		eqe_idx_mask = ( ARBEL_NUM_EQES - 1 );
 | 
		
	
		
			
			|  | 1419 | +		eqe = &arbel_eq->eqe[arbel_eq->next_idx & eqe_idx_mask];
 | 
		
	
		
			
			|  | 1420 | +		if ( MLX_GET ( &eqe->generic, owner ) != 0 ) {
 | 
		
	
		
			
			|  | 1421 | +			/* Entry still owned by hardware; end of poll */
 | 
		
	
		
			
			|  | 1422 | +			break;
 | 
		
	
		
			
			|  | 1423 | +		}
 | 
		
	
		
			
			|  | 1424 | +		DBGCP ( arbel, "Arbel %p event:\n", arbel );
 | 
		
	
		
			
			|  | 1425 | +		DBGCP_HD ( arbel, eqe, sizeof ( *eqe ) );
 | 
		
	
		
			
			|  | 1426 | +
 | 
		
	
		
			
			|  | 1427 | +		/* Handle event */
 | 
		
	
		
			
			|  | 1428 | +		event_type = MLX_GET ( &eqe->generic, event_type );
 | 
		
	
		
			
			|  | 1429 | +		switch ( event_type ) {
 | 
		
	
		
			
			|  | 1430 | +		case ARBEL_EV_PORT_STATE_CHANGE:
 | 
		
	
		
			
			|  | 1431 | +			arbel_event_port_state_change ( arbel, eqe );
 | 
		
	
		
			
			|  | 1432 | +			break;
 | 
		
	
		
			
			|  | 1433 | +		default:
 | 
		
	
		
			
			|  | 1434 | +			DBGC ( arbel, "Arbel %p unrecognised event type "
 | 
		
	
		
			
			|  | 1435 | +			       "%#x:\n", arbel, event_type );
 | 
		
	
		
			
			|  | 1436 | +			DBGC_HD ( arbel, eqe, sizeof ( *eqe ) );
 | 
		
	
		
			
			|  | 1437 | +			break;
 | 
		
	
		
			
			|  | 1438 | +		}
 | 
		
	
		
			
			|  | 1439 | +
 | 
		
	
		
			
			|  | 1440 | +		/* Return ownership to hardware */
 | 
		
	
		
			
			|  | 1441 | +		MLX_FILL_1 ( &eqe->generic, 7, owner, 1 );
 | 
		
	
		
			
			|  | 1442 | +		barrier();
 | 
		
	
		
			
			|  | 1443 | +
 | 
		
	
		
			
			|  | 1444 | +		/* Update event queue's index */
 | 
		
	
		
			
			|  | 1445 | +		arbel_eq->next_idx++;
 | 
		
	
		
			
			|  | 1446 | +
 | 
		
	
		
			
			|  | 1447 | +		/* Ring doorbell */
 | 
		
	
		
			
			|  | 1448 | +		DBGCP ( arbel, "Ringing doorbell %08lx with %08lx\n",
 | 
		
	
		
			
			|  | 1449 | +			virt_to_phys ( arbel_eq->doorbell ),
 | 
		
	
		
			
			|  | 1450 | +			arbel_eq->next_idx );
 | 
		
	
		
			
			|  | 1451 | +		writel ( arbel_eq->next_idx, arbel_eq->doorbell );
 | 
		
	
		
			
			|  | 1452 | +	}
 | 
		
	
		
			
			| 1243 | 1453 |  }
 | 
		
	
		
			
			| 1244 | 1454 |  
 | 
		
	
		
			
			| 1245 | 1455 |  /***************************************************************************
 | 
		
	
	
		
			
			|  | @@ -1473,6 +1683,7 @@ static int arbel_start_firmware ( struct arbel *arbel ) {
 | 
		
	
		
			
			| 1473 | 1683 |  	unsigned int log2_fw_pages;
 | 
		
	
		
			
			| 1474 | 1684 |  	size_t fw_size;
 | 
		
	
		
			
			| 1475 | 1685 |  	physaddr_t fw_base;
 | 
		
	
		
			
			|  | 1686 | +	uint64_t eq_set_ci_base_addr;
 | 
		
	
		
			
			| 1476 | 1687 |  	int rc;
 | 
		
	
		
			
			| 1477 | 1688 |  
 | 
		
	
		
			
			| 1478 | 1689 |  	/* Get firmware parameters */
 | 
		
	
	
		
			
			|  | @@ -1489,6 +1700,10 @@ static int arbel_start_firmware ( struct arbel *arbel ) {
 | 
		
	
		
			
			| 1489 | 1700 |  	fw_pages = ( 1 << log2_fw_pages );
 | 
		
	
		
			
			| 1490 | 1701 |  	DBGC ( arbel, "Arbel %p requires %d kB for firmware\n",
 | 
		
	
		
			
			| 1491 | 1702 |  	       arbel, ( fw_pages * 4 ) );
 | 
		
	
		
			
			|  | 1703 | +	eq_set_ci_base_addr =
 | 
		
	
		
			
			|  | 1704 | +		( ( (uint64_t) MLX_GET ( &fw, eq_set_ci_base_addr_h ) << 32 ) |
 | 
		
	
		
			
			|  | 1705 | +		  ( (uint64_t) MLX_GET ( &fw, eq_set_ci_base_addr_l ) ) );
 | 
		
	
		
			
			|  | 1706 | +	arbel->eq_ci_doorbells = ioremap ( eq_set_ci_base_addr, 0x200 );
 | 
		
	
		
			
			| 1492 | 1707 |  
 | 
		
	
		
			
			| 1493 | 1708 |  	/* Enable locally-attached memory.  Ignore failure; there may
 | 
		
	
		
			
			| 1494 | 1709 |  	 * be no attached memory.
 | 
		
	
	
		
			
			|  | @@ -1591,6 +1806,7 @@ static int arbel_get_limits ( struct arbel *arbel ) {
 | 
		
	
		
			
			| 1591 | 1806 |  	arbel->limits.reserved_cqs =
 | 
		
	
		
			
			| 1592 | 1807 |  		( 1 << MLX_GET ( &dev_lim, log2_rsvd_cqs ) );
 | 
		
	
		
			
			| 1593 | 1808 |  	arbel->limits.cqc_entry_size = MLX_GET ( &dev_lim, cqc_entry_sz );
 | 
		
	
		
			
			|  | 1809 | +	arbel->limits.reserved_eqs = MLX_GET ( &dev_lim, num_rsvd_eqs );
 | 
		
	
		
			
			| 1594 | 1810 |  	arbel->limits.reserved_mtts =
 | 
		
	
		
			
			| 1595 | 1811 |  		( 1 << MLX_GET ( &dev_lim, log2_rsvd_mtts ) );
 | 
		
	
		
			
			| 1596 | 1812 |  	arbel->limits.mtt_entry_size = MLX_GET ( &dev_lim, mtt_entry_sz );
 | 
		
	
	
		
			
			|  | @@ -1721,7 +1937,7 @@ static int arbel_alloc_icm ( struct arbel *arbel,
 | 
		
	
		
			
			| 1721 | 1937 |  	icm_offset += icm_usage ( log_num_rdbs, 32 );
 | 
		
	
		
			
			| 1722 | 1938 |  
 | 
		
	
		
			
			| 1723 | 1939 |  	/* Event queue contexts */
 | 
		
	
		
			
			| 1724 |  | -	log_num_eqs = 6;
 | 
		
	
		
			
			|  | 1940 | +	log_num_eqs =  fls ( arbel->limits.reserved_eqs + ARBEL_MAX_EQS - 1 );
 | 
		
	
		
			
			| 1725 | 1941 |  	MLX_FILL_2 ( init_hca, 33,
 | 
		
	
		
			
			| 1726 | 1942 |  		     qpc_eec_cqc_eqc_rdb_parameters.eqc_base_addr_l,
 | 
		
	
		
			
			| 1727 | 1943 |  		     ( icm_offset >> 6 ),
 | 
		
	
	
		
			
			|  | @@ -1950,6 +2166,10 @@ static int arbel_probe ( struct pci_device *pci,
 | 
		
	
		
			
			| 1950 | 2166 |  	if ( ( rc = arbel_setup_mpt ( arbel ) ) != 0 )
 | 
		
	
		
			
			| 1951 | 2167 |  		goto err_setup_mpt;
 | 
		
	
		
			
			| 1952 | 2168 |  
 | 
		
	
		
			
			|  | 2169 | +	/* Set up event queue */
 | 
		
	
		
			
			|  | 2170 | +	if ( ( rc = arbel_create_eq ( arbel ) ) != 0 )
 | 
		
	
		
			
			|  | 2171 | +		goto err_create_eq;
 | 
		
	
		
			
			|  | 2172 | +
 | 
		
	
		
			
			| 1953 | 2173 |  	/* Register Infiniband devices */
 | 
		
	
		
			
			| 1954 | 2174 |  	for ( i = 0 ; i < ARBEL_NUM_PORTS ; i++ ) {
 | 
		
	
		
			
			| 1955 | 2175 |  		if ( ( rc = register_ibdev ( arbel->ibdev[i] ) ) != 0 ) {
 | 
		
	
	
		
			
			|  | @@ -1965,6 +2185,8 @@ static int arbel_probe ( struct pci_device *pci,
 | 
		
	
		
			
			| 1965 | 2185 |   err_register_ibdev:
 | 
		
	
		
			
			| 1966 | 2186 |  	for ( ; i >= 0 ; i-- )
 | 
		
	
		
			
			| 1967 | 2187 |  		unregister_ibdev ( arbel->ibdev[i] );
 | 
		
	
		
			
			|  | 2188 | +	arbel_destroy_eq ( arbel );
 | 
		
	
		
			
			|  | 2189 | + err_create_eq:
 | 
		
	
		
			
			| 1968 | 2190 |   err_setup_mpt:
 | 
		
	
		
			
			| 1969 | 2191 |  	arbel_cmd_close_hca ( arbel );
 | 
		
	
		
			
			| 1970 | 2192 |   err_init_hca:
 | 
		
	
	
		
			
			|  | @@ -1997,6 +2219,7 @@ static void arbel_remove ( struct pci_device *pci ) {
 | 
		
	
		
			
			| 1997 | 2219 |  
 | 
		
	
		
			
			| 1998 | 2220 |  	for ( i = ( ARBEL_NUM_PORTS - 1 ) ; i >= 0 ; i-- )
 | 
		
	
		
			
			| 1999 | 2221 |  		unregister_ibdev ( arbel->ibdev[i] );
 | 
		
	
		
			
			|  | 2222 | +	arbel_destroy_eq ( arbel );
 | 
		
	
		
			
			| 2000 | 2223 |  	arbel_cmd_close_hca ( arbel );
 | 
		
	
		
			
			| 2001 | 2224 |  	arbel_free_icm ( arbel );
 | 
		
	
		
			
			| 2002 | 2225 |  	arbel_stop_firmware ( arbel );
 |