Browse Source

[arbel] Map all event types to our event queue

Only port state change events are currently mapped to our event queue,
since those are the only events we are prepared to handle.  This
ignores a potentially useful source of diagnostic information in the
case of unexpected failures.

Fix by mapping all events to the event queue; a build with debugging
enabled will therefore at least dump the raw content of the unexpected
events.

Signed-off-by: Michael Brown <mcb30@ipxe.org>
tags/v1.20.1
Michael Brown 14 years ago
parent
commit
46f2580049
2 changed files with 21 additions and 5 deletions
  1. 2
    3
      src/drivers/infiniband/arbel.c
  2. 19
    2
      src/drivers/infiniband/arbel.h

+ 2
- 3
src/drivers/infiniband/arbel.c View File

607
 	MLX_FILL_2 ( &cqctx, 3,
607
 	MLX_FILL_2 ( &cqctx, 3,
608
 		     usr_page, arbel->limits.reserved_uars,
608
 		     usr_page, arbel->limits.reserved_uars,
609
 		     log_cq_size, fls ( cq->num_cqes - 1 ) );
609
 		     log_cq_size, fls ( cq->num_cqes - 1 ) );
610
-	MLX_FILL_1 ( &cqctx, 5, c_eqn, ARBEL_NO_EQ );
610
+	MLX_FILL_1 ( &cqctx, 5, c_eqn, arbel->eq.eqn );
611
 	MLX_FILL_1 ( &cqctx, 6, pd, ARBEL_GLOBAL_PD );
611
 	MLX_FILL_1 ( &cqctx, 6, pd, ARBEL_GLOBAL_PD );
612
 	MLX_FILL_1 ( &cqctx, 7, l_key, arbel->reserved_lkey );
612
 	MLX_FILL_1 ( &cqctx, 7, l_key, arbel->reserved_lkey );
613
 	MLX_FILL_1 ( &cqctx, 12, cqn, cq->cqn );
613
 	MLX_FILL_1 ( &cqctx, 12, cqn, cq->cqn );
1350
 	}
1350
 	}
1351
 
1351
 
1352
 	/* Map events to this event queue */
1352
 	/* Map events to this event queue */
1353
-	memset ( &mask, 0, sizeof ( mask ) );
1354
-	MLX_FILL_1 ( &mask, 1, port_state_change, 1 );
1353
+	memset ( &mask, 0xff, sizeof ( mask ) );
1355
 	if ( ( rc = arbel_cmd_map_eq ( arbel,
1354
 	if ( ( rc = arbel_cmd_map_eq ( arbel,
1356
 				       ( ARBEL_MAP_EQ | arbel_eq->eqn ),
1355
 				       ( ARBEL_MAP_EQ | arbel_eq->eqn ),
1357
 				       &mask ) ) != 0 ) {
1356
 				       &mask ) ) != 0 ) {

+ 19
- 2
src/drivers/infiniband/arbel.h View File

119
 	pseudo_bit_t reserved0[0x00020];
119
 	pseudo_bit_t reserved0[0x00020];
120
 /* -------------- */
120
 /* -------------- */
121
 	pseudo_bit_t completion[0x00001];
121
 	pseudo_bit_t completion[0x00001];
122
-	pseudo_bit_t reserved1[0x0008];
122
+	pseudo_bit_t path_migration_succeeded[0x00001];
123
+	pseudo_bit_t communication_established[0x00001];
124
+	pseudo_bit_t send_queue_drained[0x00001];
125
+	pseudo_bit_t cq_error[0x00001];
126
+	pseudo_bit_t wq_catastrophe[0x00001];
127
+	pseudo_bit_t qpc_catastrophe[0x00001];
128
+	pseudo_bit_t path_migration_failed[0x00001];
129
+	pseudo_bit_t reserved1[0x00001];
123
 	pseudo_bit_t port_state_change[0x00001];
130
 	pseudo_bit_t port_state_change[0x00001];
124
-	pseudo_bit_t reserved2[0x00016];
131
+	pseudo_bit_t command_done[0x00001];
132
+	pseudo_bit_t reserved2[0x00005];
133
+	pseudo_bit_t wq_invalid_request[0x00001];
134
+	pseudo_bit_t wq_access_violation[0x00001];
135
+	pseudo_bit_t srq_catastrophe[0x00001];
136
+	pseudo_bit_t srq_last_wqe[0x00001];
137
+	pseudo_bit_t srq_rq_limit[0x00001];
138
+	pseudo_bit_t gpio[0x00001];
139
+	pseudo_bit_t clientreregister[0x00001];
140
+	pseudo_bit_t path_migration_armed[0x00001];
141
+	pseudo_bit_t reserved3[0x00008];
125
 } __attribute__ (( packed ));
142
 } __attribute__ (( packed ));
126
 
143
 
127
 struct arbelprm_eq_set_ci_st {
144
 struct arbelprm_eq_set_ci_st {

Loading…
Cancel
Save