Browse Source

[arbel] Minimise the number of VPM mapping operations

Mapping a single page at a time causes a several-second delay at
device initialisation time.  Reduce this by mapping multiple pages at
a time, using the largest block sizes possible given the alignment
constraints.

Signed-off-by: Michael Brown <mcb30@ipxe.org>
tags/v1.20.1
Michael Brown 14 years ago
parent
commit
8cd2b170b6
2 changed files with 129 additions and 50 deletions
  1. 125
    47
      src/drivers/infiniband/arbel.c
  2. 4
    3
      src/drivers/infiniband/arbel.h

+ 125
- 47
src/drivers/infiniband/arbel.c View File

457
 }
457
 }
458
 
458
 
459
 static inline int
459
 static inline int
460
-arbel_cmd_unmap_icm ( struct arbel *arbel, unsigned int page_count ) {
460
+arbel_cmd_unmap_icm ( struct arbel *arbel, unsigned int page_count,
461
+		      const struct arbelprm_scalar_parameter *offset ) {
461
 	return arbel_cmd ( arbel,
462
 	return arbel_cmd ( arbel,
462
-			   ARBEL_HCR_VOID_CMD ( ARBEL_HCR_UNMAP_ICM ),
463
-			   0, NULL, page_count, NULL );
463
+			   ARBEL_HCR_IN_CMD ( ARBEL_HCR_UNMAP_ICM, 0,
464
+					      sizeof ( *offset ) ),
465
+			   0, offset, page_count, NULL );
464
 }
466
 }
465
 
467
 
466
 static inline int
468
 static inline int
1982
 			     const struct arbelprm_virtual_physical_mapping* ),
1984
 			     const struct arbelprm_virtual_physical_mapping* ),
1983
 			   uint64_t va, physaddr_t pa, size_t len ) {
1985
 			   uint64_t va, physaddr_t pa, size_t len ) {
1984
 	struct arbelprm_virtual_physical_mapping mapping;
1986
 	struct arbelprm_virtual_physical_mapping mapping;
1987
+	physaddr_t start;
1988
+	physaddr_t low;
1989
+	physaddr_t high;
1990
+	physaddr_t end;
1991
+	size_t size;
1985
 	int rc;
1992
 	int rc;
1986
 
1993
 
1994
+	/* Sanity checks */
1987
 	assert ( ( va & ( ARBEL_PAGE_SIZE - 1 ) ) == 0 );
1995
 	assert ( ( va & ( ARBEL_PAGE_SIZE - 1 ) ) == 0 );
1988
 	assert ( ( pa & ( ARBEL_PAGE_SIZE - 1 ) ) == 0 );
1996
 	assert ( ( pa & ( ARBEL_PAGE_SIZE - 1 ) ) == 0 );
1989
 	assert ( ( len & ( ARBEL_PAGE_SIZE - 1 ) ) == 0 );
1997
 	assert ( ( len & ( ARBEL_PAGE_SIZE - 1 ) ) == 0 );
1990
 
1998
 
1999
+	/* Calculate starting points */
2000
+	start = pa;
2001
+	end = ( start + len );
2002
+	size = ( 1UL << ( fls ( start ^ end ) - 1 ) );
2003
+	low = high = ( end & ~( size - 1 ) );
2004
+	assert ( start < low );
2005
+	assert ( high <= end );
2006
+
1991
 	/* These mappings tend to generate huge volumes of
2007
 	/* These mappings tend to generate huge volumes of
1992
 	 * uninteresting debug data, which basically makes it
2008
 	 * uninteresting debug data, which basically makes it
1993
 	 * impossible to use debugging otherwise.
2009
 	 * impossible to use debugging otherwise.
1994
 	 */
2010
 	 */
1995
 	DBG_DISABLE ( DBGLVL_LOG | DBGLVL_EXTRA );
2011
 	DBG_DISABLE ( DBGLVL_LOG | DBGLVL_EXTRA );
1996
 
2012
 
1997
-	while ( len ) {
2013
+	/* Map blocks in descending order of size */
2014
+	while ( size >= ARBEL_PAGE_SIZE ) {
2015
+
2016
+		/* Find the next candidate block */
2017
+		if ( ( low - size ) >= start ) {
2018
+			low -= size;
2019
+			pa = low;
2020
+		} else if ( ( high + size ) <= end ) {
2021
+			pa = high;
2022
+			high += size;
2023
+		} else {
2024
+			size >>= 1;
2025
+			continue;
2026
+		}
2027
+		assert ( ( va & ( size - 1 ) ) == 0 );
2028
+		assert ( ( pa & ( size - 1 ) ) == 0 );
2029
+
2030
+		/* Map this block */
1998
 		memset ( &mapping, 0, sizeof ( mapping ) );
2031
 		memset ( &mapping, 0, sizeof ( mapping ) );
1999
 		MLX_FILL_1 ( &mapping, 0, va_h, ( va >> 32 ) );
2032
 		MLX_FILL_1 ( &mapping, 0, va_h, ( va >> 32 ) );
2000
 		MLX_FILL_1 ( &mapping, 1, va_l, ( va >> 12 ) );
2033
 		MLX_FILL_1 ( &mapping, 1, va_l, ( va >> 12 ) );
2001
 		MLX_FILL_2 ( &mapping, 3,
2034
 		MLX_FILL_2 ( &mapping, 3,
2002
-			     log2size, 0,
2035
+			     log2size, ( ( fls ( size ) - 1 ) - 12 ),
2003
 			     pa_l, ( pa >> 12 ) );
2036
 			     pa_l, ( pa >> 12 ) );
2004
 		if ( ( rc = map ( arbel, &mapping ) ) != 0 ) {
2037
 		if ( ( rc = map ( arbel, &mapping ) ) != 0 ) {
2005
 			DBG_ENABLE ( DBGLVL_LOG | DBGLVL_EXTRA );
2038
 			DBG_ENABLE ( DBGLVL_LOG | DBGLVL_EXTRA );
2006
-			DBGC ( arbel, "Arbel %p could not map %llx => %lx: "
2007
-			       "%s\n", arbel, va, pa, strerror ( rc ) );
2039
+			DBGC ( arbel, "Arbel %p could not map %08llx+%zx to "
2040
+			       "%08lx: %s\n",
2041
+			       arbel, va, size, pa, strerror ( rc ) );
2008
 			return rc;
2042
 			return rc;
2009
 		}
2043
 		}
2010
-		pa += ARBEL_PAGE_SIZE;
2011
-		va += ARBEL_PAGE_SIZE;
2012
-		len -= ARBEL_PAGE_SIZE;
2044
+		va += size;
2013
 	}
2045
 	}
2046
+	assert ( low == start );
2047
+	assert ( high == end );
2014
 
2048
 
2015
 	DBG_ENABLE ( DBGLVL_LOG | DBGLVL_EXTRA );
2049
 	DBG_ENABLE ( DBGLVL_LOG | DBGLVL_EXTRA );
2016
 	return 0;
2050
 	return 0;
2026
 	struct arbelprm_query_fw fw;
2060
 	struct arbelprm_query_fw fw;
2027
 	struct arbelprm_access_lam lam;
2061
 	struct arbelprm_access_lam lam;
2028
 	unsigned int fw_pages;
2062
 	unsigned int fw_pages;
2029
-	unsigned int log2_fw_pages;
2030
 	size_t fw_size;
2063
 	size_t fw_size;
2031
 	physaddr_t fw_base;
2064
 	physaddr_t fw_base;
2032
 	uint64_t eq_set_ci_base_addr;
2065
 	uint64_t eq_set_ci_base_addr;
2042
 	       MLX_GET ( &fw, fw_rev_major ), MLX_GET ( &fw, fw_rev_minor ),
2075
 	       MLX_GET ( &fw, fw_rev_major ), MLX_GET ( &fw, fw_rev_minor ),
2043
 	       MLX_GET ( &fw, fw_rev_subminor ) );
2076
 	       MLX_GET ( &fw, fw_rev_subminor ) );
2044
 	fw_pages = MLX_GET ( &fw, fw_pages );
2077
 	fw_pages = MLX_GET ( &fw, fw_pages );
2045
-	log2_fw_pages = fls ( fw_pages - 1 );
2046
-	fw_pages = ( 1 << log2_fw_pages );
2047
 	DBGC ( arbel, "Arbel %p requires %d kB for firmware\n",
2078
 	DBGC ( arbel, "Arbel %p requires %d kB for firmware\n",
2048
 	       arbel, ( fw_pages * 4 ) );
2079
 	       arbel, ( fw_pages * 4 ) );
2049
 	eq_set_ci_base_addr =
2080
 	eq_set_ci_base_addr =
2212
 			     struct arbelprm_init_hca *init_hca ) {
2243
 			     struct arbelprm_init_hca *init_hca ) {
2213
 	struct arbelprm_scalar_parameter icm_size;
2244
 	struct arbelprm_scalar_parameter icm_size;
2214
 	struct arbelprm_scalar_parameter icm_aux_size;
2245
 	struct arbelprm_scalar_parameter icm_aux_size;
2246
+	struct arbelprm_scalar_parameter unmap_icm;
2215
 	union arbelprm_doorbell_record *db_rec;
2247
 	union arbelprm_doorbell_record *db_rec;
2216
 	size_t icm_offset = 0;
2248
 	size_t icm_offset = 0;
2217
 	unsigned int log_num_uars, log_num_qps, log_num_srqs, log_num_ees;
2249
 	unsigned int log_num_uars, log_num_qps, log_num_srqs, log_num_ees;
2218
 	unsigned int log_num_cqs, log_num_mtts, log_num_mpts, log_num_rdbs;
2250
 	unsigned int log_num_cqs, log_num_mtts, log_num_mpts, log_num_rdbs;
2219
 	unsigned int log_num_eqs, log_num_mcs;
2251
 	unsigned int log_num_eqs, log_num_mcs;
2220
-	size_t db_rec_offset;
2221
 	size_t len;
2252
 	size_t len;
2222
 	physaddr_t icm_phys;
2253
 	physaddr_t icm_phys;
2223
 	int rc;
2254
 	int rc;
2224
 
2255
 
2225
-	/* Queue pair contexts */
2256
+	/* Calculate number of each object type within ICM */
2226
 	log_num_qps = fls ( arbel->limits.reserved_qps +
2257
 	log_num_qps = fls ( arbel->limits.reserved_qps +
2227
 			    ARBEL_RSVD_SPECIAL_QPS + ARBEL_MAX_QPS - 1 );
2258
 			    ARBEL_RSVD_SPECIAL_QPS + ARBEL_MAX_QPS - 1 );
2259
+	log_num_srqs = fls ( arbel->limits.reserved_srqs - 1 );
2260
+	log_num_ees = fls ( arbel->limits.reserved_ees - 1 );
2261
+	log_num_cqs = fls ( arbel->limits.reserved_cqs + ARBEL_MAX_CQS - 1 );
2262
+	log_num_eqs = fls ( arbel->limits.reserved_eqs + ARBEL_MAX_EQS - 1 );
2263
+	log_num_mtts = fls ( arbel->limits.reserved_mtts - 1 );
2264
+	log_num_mpts = fls ( arbel->limits.reserved_mrws + 1 - 1 );
2265
+	log_num_rdbs = fls ( arbel->limits.reserved_rdbs - 1 );
2266
+	log_num_uars = fls ( arbel->limits.reserved_uars +
2267
+			     1 /* single UAR used */ - 1 );
2268
+	log_num_mcs = ARBEL_LOG_MULTICAST_HASH_SIZE;
2269
+
2270
+	/* Queue pair contexts */
2228
 	len = ( ( 1 << log_num_qps ) * arbel->limits.qpc_entry_size );
2271
 	len = ( ( 1 << log_num_qps ) * arbel->limits.qpc_entry_size );
2229
 	icm_offset = icm_align ( icm_offset, len );
2272
 	icm_offset = icm_align ( icm_offset, len );
2230
 	MLX_FILL_2 ( init_hca, 13,
2273
 	MLX_FILL_2 ( init_hca, 13,
2249
 	icm_offset += len;
2292
 	icm_offset += len;
2250
 
2293
 
2251
 	/* Completion queue contexts */
2294
 	/* Completion queue contexts */
2252
-	log_num_cqs = fls ( arbel->limits.reserved_cqs + ARBEL_MAX_CQS - 1 );
2253
 	len = ( ( 1 << log_num_cqs ) * arbel->limits.cqc_entry_size );
2295
 	len = ( ( 1 << log_num_cqs ) * arbel->limits.cqc_entry_size );
2254
 	icm_offset = icm_align ( icm_offset, len );
2296
 	icm_offset = icm_align ( icm_offset, len );
2255
 	MLX_FILL_2 ( init_hca, 21,
2297
 	MLX_FILL_2 ( init_hca, 21,
2262
 	       icm_offset, ( icm_offset + len ) );
2304
 	       icm_offset, ( icm_offset + len ) );
2263
 	icm_offset += len;
2305
 	icm_offset += len;
2264
 
2306
 
2265
-	/* User access region contexts */
2266
-	log_num_uars = fls ( arbel->limits.reserved_uars +
2267
-			     1 /* single UAR used */ - 1 );
2268
-	len = ( ( 1 << log_num_uars ) * ARBEL_PAGE_SIZE );
2269
-	icm_offset = icm_align ( icm_offset, len );
2270
-	MLX_FILL_1 ( init_hca, 74, uar_parameters.log_max_uars, log_num_uars );
2271
-	MLX_FILL_1 ( init_hca, 79,
2272
-		     uar_parameters.uar_context_base_addr_l, icm_offset );
2273
-	db_rec_offset = ( icm_offset +
2274
-			  ( arbel->limits.reserved_uars * ARBEL_PAGE_SIZE ) );
2275
-	DBGC ( arbel, "Arbel %p UAR is %d x %#zx at [%zx,%zx), doorbells "
2276
-	       "[%zx,%zx)\n", arbel, ( 1 << log_num_uars ), ARBEL_PAGE_SIZE,
2277
-	       icm_offset, ( icm_offset + len ), db_rec_offset,
2278
-	       ( db_rec_offset + ARBEL_PAGE_SIZE ) );
2279
-	icm_offset += len;
2280
-
2281
 	/* Event queue contexts */
2307
 	/* Event queue contexts */
2282
-	log_num_eqs = fls ( arbel->limits.reserved_eqs + ARBEL_MAX_EQS - 1 );
2283
 	len = ( ( 1 << log_num_eqs ) * arbel->limits.eqc_entry_size );
2308
 	len = ( ( 1 << log_num_eqs ) * arbel->limits.eqc_entry_size );
2284
 	icm_offset = icm_align ( icm_offset, len );
2309
 	icm_offset = icm_align ( icm_offset, len );
2285
 	MLX_FILL_2 ( init_hca, 33,
2310
 	MLX_FILL_2 ( init_hca, 33,
2293
 	icm_offset += len;
2318
 	icm_offset += len;
2294
 
2319
 
2295
 	/* End-to-end contexts */
2320
 	/* End-to-end contexts */
2296
-	log_num_ees = fls ( arbel->limits.reserved_ees - 1 );
2297
 	len = ( ( 1 << log_num_ees ) * arbel->limits.eec_entry_size );
2321
 	len = ( ( 1 << log_num_ees ) * arbel->limits.eec_entry_size );
2298
 	icm_offset = icm_align ( icm_offset, len );
2322
 	icm_offset = icm_align ( icm_offset, len );
2299
 	MLX_FILL_2 ( init_hca, 17,
2323
 	MLX_FILL_2 ( init_hca, 17,
2307
 	icm_offset += len;
2331
 	icm_offset += len;
2308
 
2332
 
2309
 	/* Shared receive queue contexts */
2333
 	/* Shared receive queue contexts */
2310
-	log_num_srqs = fls ( arbel->limits.reserved_srqs - 1 );
2311
 	len = ( ( 1 << log_num_srqs ) * arbel->limits.srqc_entry_size );
2334
 	len = ( ( 1 << log_num_srqs ) * arbel->limits.srqc_entry_size );
2312
 	icm_offset = icm_align ( icm_offset, len );
2335
 	icm_offset = icm_align ( icm_offset, len );
2313
 	MLX_FILL_2 ( init_hca, 19,
2336
 	MLX_FILL_2 ( init_hca, 19,
2321
 	icm_offset += len;
2344
 	icm_offset += len;
2322
 
2345
 
2323
 	/* Memory protection table */
2346
 	/* Memory protection table */
2324
-	log_num_mpts = fls ( arbel->limits.reserved_mrws + 1 - 1 );
2325
 	len = ( ( 1 << log_num_mpts ) * arbel->limits.mpt_entry_size );
2347
 	len = ( ( 1 << log_num_mpts ) * arbel->limits.mpt_entry_size );
2326
 	icm_offset = icm_align ( icm_offset, len );
2348
 	icm_offset = icm_align ( icm_offset, len );
2327
 	MLX_FILL_1 ( init_hca, 61,
2349
 	MLX_FILL_1 ( init_hca, 61,
2334
 	icm_offset += len;
2356
 	icm_offset += len;
2335
 
2357
 
2336
 	/* Remote read data base table */
2358
 	/* Remote read data base table */
2337
-	log_num_rdbs = fls ( arbel->limits.reserved_rdbs - 1 );
2338
 	len = ( ( 1 << log_num_rdbs ) * ARBEL_RDB_ENTRY_SIZE );
2359
 	len = ( ( 1 << log_num_rdbs ) * ARBEL_RDB_ENTRY_SIZE );
2339
 	icm_offset = icm_align ( icm_offset, len );
2360
 	icm_offset = icm_align ( icm_offset, len );
2340
 	MLX_FILL_1 ( init_hca, 37,
2361
 	MLX_FILL_1 ( init_hca, 37,
2357
 	icm_offset += len;
2378
 	icm_offset += len;
2358
 
2379
 
2359
 	/* Multicast table */
2380
 	/* Multicast table */
2360
-	log_num_mcs = ARBEL_LOG_MULTICAST_HASH_SIZE;
2361
 	len = ( ( 1 << log_num_mcs ) * sizeof ( struct arbelprm_mgm_entry ) );
2381
 	len = ( ( 1 << log_num_mcs ) * sizeof ( struct arbelprm_mgm_entry ) );
2362
 	icm_offset = icm_align ( icm_offset, len );
2382
 	icm_offset = icm_align ( icm_offset, len );
2363
 	MLX_FILL_1 ( init_hca, 49,
2383
 	MLX_FILL_1 ( init_hca, 49,
2377
 	icm_offset += len;
2397
 	icm_offset += len;
2378
 
2398
 
2379
 	/* Memory translation table */
2399
 	/* Memory translation table */
2380
-	log_num_mtts = fls ( arbel->limits.reserved_mtts - 1 );
2381
 	len = ( ( 1 << log_num_mtts ) * arbel->limits.mtt_entry_size );
2400
 	len = ( ( 1 << log_num_mtts ) * arbel->limits.mtt_entry_size );
2382
 	icm_offset = icm_align ( icm_offset, len );
2401
 	icm_offset = icm_align ( icm_offset, len );
2383
 	MLX_FILL_1 ( init_hca, 65,
2402
 	MLX_FILL_1 ( init_hca, 65,
2398
 	       icm_offset, ( icm_offset + len ) );
2417
 	       icm_offset, ( icm_offset + len ) );
2399
 	icm_offset += len;
2418
 	icm_offset += len;
2400
 
2419
 
2401
-	/* Round up to a whole number of pages */
2402
-	arbel->icm_len = icm_align ( icm_offset, ARBEL_PAGE_SIZE );
2420
+	/* Record amount of ICM to be allocated */
2421
+	icm_offset = icm_align ( icm_offset, ARBEL_PAGE_SIZE );
2422
+	arbel->icm_len = icm_offset;
2423
+
2424
+	/* User access region contexts
2425
+	 *
2426
+	 * The reserved UAR(s) do not need to be backed by physical
2427
+	 * memory, and our UAR is allocated separately; neither are
2428
+	 * part of the umalloc()ed ICM block, but both contribute to
2429
+	 * the total length of ICM virtual address space.
2430
+	 */
2431
+	len = ( ( 1 << log_num_uars ) * ARBEL_PAGE_SIZE );
2432
+	icm_offset = icm_align ( icm_offset, len );
2433
+	MLX_FILL_1 ( init_hca, 74, uar_parameters.log_max_uars, log_num_uars );
2434
+	MLX_FILL_1 ( init_hca, 79,
2435
+		     uar_parameters.uar_context_base_addr_l, icm_offset );
2436
+	arbel->db_rec_offset =
2437
+		( icm_offset +
2438
+		  ( arbel->limits.reserved_uars * ARBEL_PAGE_SIZE ) );
2439
+	DBGC ( arbel, "Arbel %p UAR is %d x %#zx at [%zx,%zx), doorbells "
2440
+	       "[%zx,%zx)\n", arbel, ( 1 << log_num_uars ), ARBEL_PAGE_SIZE,
2441
+	       icm_offset, ( icm_offset + len ), arbel->db_rec_offset,
2442
+	       ( arbel->db_rec_offset + ARBEL_PAGE_SIZE ) );
2443
+	icm_offset += len;
2403
 
2444
 
2404
 	/* Get ICM auxiliary area size */
2445
 	/* Get ICM auxiliary area size */
2405
 	memset ( &icm_size, 0, sizeof ( icm_size ) );
2446
 	memset ( &icm_size, 0, sizeof ( icm_size ) );
2420
 	arbel->icm = umalloc ( arbel->icm_len + arbel->icm_aux_len );
2461
 	arbel->icm = umalloc ( arbel->icm_len + arbel->icm_aux_len );
2421
 	if ( ! arbel->icm ) {
2462
 	if ( ! arbel->icm ) {
2422
 		rc = -ENOMEM;
2463
 		rc = -ENOMEM;
2423
-		goto err_alloc;
2464
+		goto err_alloc_icm;
2424
 	}
2465
 	}
2425
 	icm_phys = user_to_phys ( arbel->icm, 0 );
2466
 	icm_phys = user_to_phys ( arbel->icm, 0 );
2426
 
2467
 
2468
+	/* Allocate doorbell UAR */
2469
+	arbel->db_rec = malloc_dma ( ARBEL_PAGE_SIZE, ARBEL_PAGE_SIZE );
2470
+	if ( ! arbel->db_rec ) {
2471
+		rc = -ENOMEM;
2472
+		goto err_alloc_doorbell;
2473
+	}
2474
+
2427
 	/* Map ICM auxiliary area */
2475
 	/* Map ICM auxiliary area */
2428
 	DBGC ( arbel, "Arbel %p ICM AUX at [%08lx,%08lx)\n",
2476
 	DBGC ( arbel, "Arbel %p ICM AUX at [%08lx,%08lx)\n",
2429
 	       arbel, icm_phys, ( icm_phys + arbel->icm_aux_len ) );
2477
 	       arbel, icm_phys, ( icm_phys + arbel->icm_aux_len ) );
2435
 	}
2483
 	}
2436
 	icm_phys += arbel->icm_aux_len;
2484
 	icm_phys += arbel->icm_aux_len;
2437
 
2485
 
2438
-	/* MAP ICM area */
2486
+	/* Map ICM area */
2439
 	DBGC ( arbel, "Arbel %p ICM at [%08lx,%08lx)\n",
2487
 	DBGC ( arbel, "Arbel %p ICM at [%08lx,%08lx)\n",
2440
 	       arbel, icm_phys, ( icm_phys + arbel->icm_len ) );
2488
 	       arbel, icm_phys, ( icm_phys + arbel->icm_len ) );
2441
 	if ( ( rc = arbel_map_vpm ( arbel, arbel_cmd_map_icm,
2489
 	if ( ( rc = arbel_map_vpm ( arbel, arbel_cmd_map_icm,
2444
 		       arbel, strerror ( rc ) );
2492
 		       arbel, strerror ( rc ) );
2445
 		goto err_map_icm;
2493
 		goto err_map_icm;
2446
 	}
2494
 	}
2447
-	arbel->db_rec = phys_to_virt ( icm_phys + db_rec_offset );
2448
 	icm_phys += arbel->icm_len;
2495
 	icm_phys += arbel->icm_len;
2449
 
2496
 
2497
+	/* Map doorbell UAR */
2498
+	DBGC ( arbel, "Arbel %p UAR at [%08lx,%08lx)\n",
2499
+	       arbel, virt_to_phys ( arbel->db_rec ),
2500
+	       ( virt_to_phys ( arbel->db_rec ) + ARBEL_PAGE_SIZE ) );
2501
+	if ( ( rc = arbel_map_vpm ( arbel, arbel_cmd_map_icm,
2502
+				    arbel->db_rec_offset,
2503
+				    virt_to_phys ( arbel->db_rec ),
2504
+				    ARBEL_PAGE_SIZE ) ) != 0 ) {
2505
+		DBGC ( arbel, "Arbel %p could not map doorbell UAR: %s\n",
2506
+		       arbel, strerror ( rc ) );
2507
+		goto err_map_doorbell;
2508
+	}
2509
+
2450
 	/* Initialise doorbell records */
2510
 	/* Initialise doorbell records */
2451
 	memset ( arbel->db_rec, 0, ARBEL_PAGE_SIZE );
2511
 	memset ( arbel->db_rec, 0, ARBEL_PAGE_SIZE );
2452
 	db_rec = &arbel->db_rec[ARBEL_GROUP_SEPARATOR_DOORBELL];
2512
 	db_rec = &arbel->db_rec[ARBEL_GROUP_SEPARATOR_DOORBELL];
2454
 
2514
 
2455
 	return 0;
2515
 	return 0;
2456
 
2516
 
2457
-	arbel_cmd_unmap_icm ( arbel, ( arbel->icm_len / ARBEL_PAGE_SIZE ) );
2517
+	memset ( &unmap_icm, 0, sizeof ( unmap_icm ) );
2518
+	MLX_FILL_1 ( &unmap_icm, 1, value, arbel->db_rec_offset );
2519
+	arbel_cmd_unmap_icm ( arbel, 1, &unmap_icm );
2520
+ err_map_doorbell:
2521
+	memset ( &unmap_icm, 0, sizeof ( unmap_icm ) );
2522
+	arbel_cmd_unmap_icm ( arbel, ( arbel->icm_len / ARBEL_PAGE_SIZE ),
2523
+			      &unmap_icm );
2458
  err_map_icm:
2524
  err_map_icm:
2459
 	arbel_cmd_unmap_icm_aux ( arbel );
2525
 	arbel_cmd_unmap_icm_aux ( arbel );
2460
  err_map_icm_aux:
2526
  err_map_icm_aux:
2527
+	free_dma ( arbel->db_rec, ARBEL_PAGE_SIZE );
2528
+	arbel->db_rec= NULL;
2529
+ err_alloc_doorbell:
2461
 	ufree ( arbel->icm );
2530
 	ufree ( arbel->icm );
2462
 	arbel->icm = UNULL;
2531
 	arbel->icm = UNULL;
2463
- err_alloc:
2532
+ err_alloc_icm:
2464
  err_set_icm_size:
2533
  err_set_icm_size:
2465
 	return rc;
2534
 	return rc;
2466
 }
2535
 }
2471
  * @v arbel		Arbel device
2540
  * @v arbel		Arbel device
2472
  */
2541
  */
2473
 static void arbel_free_icm ( struct arbel *arbel ) {
2542
 static void arbel_free_icm ( struct arbel *arbel ) {
2474
-	arbel_cmd_unmap_icm ( arbel, ( arbel->icm_len / ARBEL_PAGE_SIZE ) );
2543
+	struct arbelprm_scalar_parameter unmap_icm;
2544
+
2545
+	memset ( &unmap_icm, 0, sizeof ( unmap_icm ) );
2546
+	MLX_FILL_1 ( &unmap_icm, 1, value, arbel->db_rec_offset );
2547
+	arbel_cmd_unmap_icm ( arbel, 1, &unmap_icm );
2548
+	memset ( &unmap_icm, 0, sizeof ( unmap_icm ) );
2549
+	arbel_cmd_unmap_icm ( arbel, ( arbel->icm_len / ARBEL_PAGE_SIZE ),
2550
+			      &unmap_icm );
2475
 	arbel_cmd_unmap_icm_aux ( arbel );
2551
 	arbel_cmd_unmap_icm_aux ( arbel );
2552
+	free_dma ( arbel->db_rec, ARBEL_PAGE_SIZE );
2553
+	arbel->db_rec = NULL;
2476
 	ufree ( arbel->icm );
2554
 	ufree ( arbel->icm );
2477
 	arbel->icm = UNULL;
2555
 	arbel->icm = UNULL;
2478
 }
2556
 }

+ 4
- 3
src/drivers/infiniband/arbel.h View File

468
 	size_t icm_aux_len;
468
 	size_t icm_aux_len;
469
 	/** ICM area */
469
 	/** ICM area */
470
 	userptr_t icm;
470
 	userptr_t icm;
471
-
472
-	/** Event queue */
473
-	struct arbel_event_queue eq;
471
+	/** Offset within ICM of doorbell records */
472
+	size_t db_rec_offset;
474
 	/** Doorbell records */
473
 	/** Doorbell records */
475
 	union arbelprm_doorbell_record *db_rec;
474
 	union arbelprm_doorbell_record *db_rec;
475
+	/** Event queue */
476
+	struct arbel_event_queue eq;
476
 	/** Unrestricted LKey
477
 	/** Unrestricted LKey
477
 	 *
478
 	 *
478
 	 * Used to get unrestricted memory access.
479
 	 * Used to get unrestricted memory access.

Loading…
Cancel
Save