|
@@ -1778,7 +1778,9 @@ static int hermon_create_eq ( struct hermon *hermon ) {
|
1778
|
1778
|
|
1779
|
1779
|
/* Hand queue over to hardware */
|
1780
|
1780
|
memset ( &eqctx, 0, sizeof ( eqctx ) );
|
1781
|
|
- MLX_FILL_1 ( &eqctx, 0, st, 0xa /* "Fired" */ );
|
|
1781
|
+ MLX_FILL_2 ( &eqctx, 0,
|
|
1782
|
+ st, 0xa /* "Fired" */,
|
|
1783
|
+ oi, 1 );
|
1782
|
1784
|
MLX_FILL_1 ( &eqctx, 2,
|
1783
|
1785
|
page_offset, ( hermon_eq->mtt.page_offset >> 5 ) );
|
1784
|
1786
|
MLX_FILL_1 ( &eqctx, 3, log_eq_size, fls ( HERMON_NUM_EQES - 1 ) );
|
|
@@ -1831,8 +1833,7 @@ static void hermon_destroy_eq ( struct hermon *hermon ) {
|
1831
|
1833
|
int rc;
|
1832
|
1834
|
|
1833
|
1835
|
/* Unmap events from event queue */
|
1834
|
|
- memset ( &mask, 0, sizeof ( mask ) );
|
1835
|
|
- MLX_FILL_1 ( &mask, 1, port_state_change, 1 );
|
|
1836
|
+ memset ( &mask, 0xff, sizeof ( mask ) );
|
1836
|
1837
|
if ( ( rc = hermon_cmd_map_eq ( hermon,
|
1837
|
1838
|
( HERMON_UNMAP_EQ | hermon_eq->eqn ),
|
1838
|
1839
|
&mask ) ) != 0 ) {
|
|
@@ -2879,6 +2880,11 @@ static int hermon_get_cap ( struct hermon *hermon ) {
|
2879
|
2880
|
( 1 << MLX_GET ( &dev_cap, log2_rsvd_cqs ) );
|
2880
|
2881
|
hermon->cap.cqc_entry_size = MLX_GET ( &dev_cap, cqc_entry_sz );
|
2881
|
2882
|
hermon->cap.reserved_eqs = MLX_GET ( &dev_cap, num_rsvd_eqs );
|
|
2883
|
+ if ( hermon->cap.reserved_eqs == 0 ) {
|
|
2884
|
+ /* Backward compatibility */
|
|
2885
|
+ hermon->cap.reserved_eqs =
|
|
2886
|
+ ( 1 << MLX_GET ( &dev_cap, log2_rsvd_eqs ) );
|
|
2887
|
+ }
|
2882
|
2888
|
hermon->cap.eqc_entry_size = MLX_GET ( &dev_cap, eqc_entry_sz );
|
2883
|
2889
|
hermon->cap.reserved_mtts =
|
2884
|
2890
|
( 1 << MLX_GET ( &dev_cap, log2_rsvd_mtts ) );
|