|
@@ -0,0 +1,414 @@
|
|
1
|
+/*
|
|
2
|
+ * Copyright (C) 2009 Michael Brown <mbrown@fensystems.co.uk>.
|
|
3
|
+ *
|
|
4
|
+ * This program is free software; you can redistribute it and/or
|
|
5
|
+ * modify it under the terms of the GNU General Public License as
|
|
6
|
+ * published by the Free Software Foundation; either version 2 of the
|
|
7
|
+ * License, or any later version.
|
|
8
|
+ *
|
|
9
|
+ * This program is distributed in the hope that it will be useful, but
|
|
10
|
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
11
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
12
|
+ * General Public License for more details.
|
|
13
|
+ *
|
|
14
|
+ * You should have received a copy of the GNU General Public License
|
|
15
|
+ * along with this program; if not, write to the Free Software
|
|
16
|
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
|
17
|
+ */
|
|
18
|
+
|
|
19
|
+FILE_LICENCE ( GPL2_OR_LATER );
|
|
20
|
+
|
|
21
|
+#include <stdint.h>
|
|
22
|
+#include <stdlib.h>
|
|
23
|
+#include <string.h>
|
|
24
|
+#include <errno.h>
|
|
25
|
+#include <stdio.h>
|
|
26
|
+#include <unistd.h>
|
|
27
|
+#include <byteswap.h>
|
|
28
|
+#include <gpxe/infiniband.h>
|
|
29
|
+#include <gpxe/iobuf.h>
|
|
30
|
+#include <gpxe/ib_gma.h>
|
|
31
|
+
|
|
32
|
+/**
|
|
33
|
+ * @file
|
|
34
|
+ *
|
|
35
|
+ * Infiniband General Management Agent
|
|
36
|
+ *
|
|
37
|
+ */
|
|
38
|
+
|
|
39
|
+/** A MAD request */
|
|
40
|
+struct ib_mad_request {
|
|
41
|
+ /** Associated GMA */
|
|
42
|
+ struct ib_gma *gma;
|
|
43
|
+ /** List of outstanding MAD requests */
|
|
44
|
+ struct list_head list;
|
|
45
|
+ /** Retry timer */
|
|
46
|
+ struct retry_timer timer;
|
|
47
|
+ /** Destination address */
|
|
48
|
+ struct ib_address_vector av;
|
|
49
|
+ /** MAD request */
|
|
50
|
+ union ib_mad mad;
|
|
51
|
+};
|
|
52
|
+
|
|
53
|
+/** GMA number of send WQEs
|
|
54
|
+ *
|
|
55
|
+ * This is a policy decision.
|
|
56
|
+ */
|
|
57
|
+#define IB_GMA_NUM_SEND_WQES 4
|
|
58
|
+
|
|
59
|
+/** GMA number of receive WQEs
|
|
60
|
+ *
|
|
61
|
+ * This is a policy decision.
|
|
62
|
+ */
|
|
63
|
+#define IB_GMA_NUM_RECV_WQES 2
|
|
64
|
+
|
|
65
|
+/** GMA number of completion queue entries
|
|
66
|
+ *
|
|
67
|
+ * This is a policy decision
|
|
68
|
+ */
|
|
69
|
+#define IB_GMA_NUM_CQES 8
|
|
70
|
+
|
|
71
|
+/** GMA TID magic signature */
|
|
72
|
+#define IB_GMA_TID_MAGIC ( ( 'g' << 24 ) | ( 'P' << 16 ) | ( 'X' << 8 ) | 'E' )
|
|
73
|
+
|
|
74
|
+/** TID to use for next MAD request */
|
|
75
|
+static unsigned int next_request_tid;
|
|
76
|
+
|
|
77
|
+/**
|
|
78
|
+ * Identify attribute handler
|
|
79
|
+ *
|
|
80
|
+ * @v mgmt_class Management class
|
|
81
|
+ * @v class_version Class version
|
|
82
|
+ * @v method Method
|
|
83
|
+ * @v attr_id Attribute ID (in network byte order)
|
|
84
|
+ * @ret handler Attribute handler (or NULL)
|
|
85
|
+ */
|
|
86
|
+static int ib_handle_mad ( struct ib_device *ibdev,
|
|
87
|
+ union ib_mad *mad ) {
|
|
88
|
+ struct ib_mad_hdr *hdr = &mad->hdr;
|
|
89
|
+ struct ib_mad_handler *handler;
|
|
90
|
+
|
|
91
|
+ for_each_table_entry ( handler, IB_MAD_HANDLERS ) {
|
|
92
|
+ if ( ( handler->mgmt_class == hdr->mgmt_class ) &&
|
|
93
|
+ ( handler->class_version == hdr->class_version ) &&
|
|
94
|
+ ( handler->method == hdr->method ) &&
|
|
95
|
+ ( handler->attr_id == hdr->attr_id ) ) {
|
|
96
|
+ hdr->method = handler->resp_method;
|
|
97
|
+ return handler->handle ( ibdev, mad );
|
|
98
|
+ }
|
|
99
|
+ }
|
|
100
|
+
|
|
101
|
+ hdr->method = IB_MGMT_METHOD_TRAP;
|
|
102
|
+ hdr->status = htons ( IB_MGMT_STATUS_UNSUPPORTED_METHOD_ATTR );
|
|
103
|
+ return -ENOTSUP;
|
|
104
|
+}
|
|
105
|
+
|
|
106
|
+/**
|
|
107
|
+ * Complete GMA receive
|
|
108
|
+ *
|
|
109
|
+ *
|
|
110
|
+ * @v ibdev Infiniband device
|
|
111
|
+ * @v qp Queue pair
|
|
112
|
+ * @v av Address vector
|
|
113
|
+ * @v iobuf I/O buffer
|
|
114
|
+ * @v rc Completion status code
|
|
115
|
+ */
|
|
116
|
+static void ib_gma_complete_recv ( struct ib_device *ibdev,
|
|
117
|
+ struct ib_queue_pair *qp,
|
|
118
|
+ struct ib_address_vector *av,
|
|
119
|
+ struct io_buffer *iobuf, int rc ) {
|
|
120
|
+ struct ib_gma *gma = ib_qp_get_ownerdata ( qp );
|
|
121
|
+ struct ib_mad_request *request;
|
|
122
|
+ union ib_mad *mad;
|
|
123
|
+ struct ib_mad_hdr *hdr;
|
|
124
|
+ unsigned int hop_pointer;
|
|
125
|
+ unsigned int hop_count;
|
|
126
|
+
|
|
127
|
+ /* Ignore errors */
|
|
128
|
+ if ( rc != 0 ) {
|
|
129
|
+ DBGC ( gma, "GMA %p RX error: %s\n", gma, strerror ( rc ) );
|
|
130
|
+ goto out;
|
|
131
|
+ }
|
|
132
|
+
|
|
133
|
+ /* Sanity checks */
|
|
134
|
+ if ( iob_len ( iobuf ) != sizeof ( *mad ) ) {
|
|
135
|
+ DBGC ( gma, "GMA %p RX bad size (%zd bytes)\n",
|
|
136
|
+ gma, iob_len ( iobuf ) );
|
|
137
|
+ DBGC_HDA ( gma, 0, iobuf->data, iob_len ( iobuf ) );
|
|
138
|
+ goto out;
|
|
139
|
+ }
|
|
140
|
+ mad = iobuf->data;
|
|
141
|
+ hdr = &mad->hdr;
|
|
142
|
+ if ( hdr->base_version != IB_MGMT_BASE_VERSION ) {
|
|
143
|
+ DBGC ( gma, "GMA %p unsupported base version %x\n",
|
|
144
|
+ gma, hdr->base_version );
|
|
145
|
+ DBGC_HDA ( gma, 0, mad, sizeof ( *mad ) );
|
|
146
|
+ goto out;
|
|
147
|
+ }
|
|
148
|
+ DBGC ( gma, "GMA %p RX TID %08x%08x (%02x,%02x,%02x,%04x) status "
|
|
149
|
+ "%04x\n", gma, ntohl ( hdr->tid[0] ), ntohl ( hdr->tid[1] ),
|
|
150
|
+ hdr->mgmt_class, hdr->class_version, hdr->method,
|
|
151
|
+ ntohs ( hdr->attr_id ), ntohs ( hdr->status ) );
|
|
152
|
+ DBGC2_HDA ( gma, 0, mad, sizeof ( *mad ) );
|
|
153
|
+
|
|
154
|
+ /* Dequeue request if applicable */
|
|
155
|
+ list_for_each_entry ( request, &gma->requests, list ) {
|
|
156
|
+ if ( memcmp ( &request->mad.hdr.tid, &hdr->tid,
|
|
157
|
+ sizeof ( request->mad.hdr.tid ) ) == 0 ) {
|
|
158
|
+ stop_timer ( &request->timer );
|
|
159
|
+ list_del ( &request->list );
|
|
160
|
+ free ( request );
|
|
161
|
+ break;
|
|
162
|
+ }
|
|
163
|
+ }
|
|
164
|
+
|
|
165
|
+ /* Handle MAD, if possible */
|
|
166
|
+ if ( ( rc = ib_handle_mad ( ibdev, mad ) ) != 0 ) {
|
|
167
|
+ DBGC ( gma, "GMA %p could not handle TID %08x%08x: %s\n",
|
|
168
|
+ gma, ntohl ( hdr->tid[0] ), ntohl ( hdr->tid[1] ),
|
|
169
|
+ strerror ( rc ) );
|
|
170
|
+ /* Do not abort; we may want to send an error response */
|
|
171
|
+ }
|
|
172
|
+
|
|
173
|
+ /* Finish processing if we have no response to send */
|
|
174
|
+ if ( ! hdr->method )
|
|
175
|
+ goto out;
|
|
176
|
+
|
|
177
|
+ DBGC ( gma, "GMA %p TX TID %08x%08x (%02x,%02x,%02x,%04x)\n", gma,
|
|
178
|
+ ntohl ( hdr->tid[0] ), ntohl ( hdr->tid[1] ), hdr->mgmt_class,
|
|
179
|
+ hdr->class_version, hdr->method, ntohs ( hdr->attr_id ) );
|
|
180
|
+ DBGC2_HDA ( gma, 0, mad, sizeof ( *mad ) );
|
|
181
|
+
|
|
182
|
+ /* Set response fields for directed route SMPs */
|
|
183
|
+ if ( hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ) {
|
|
184
|
+ struct ib_mad_smp *smp = &mad->smp;
|
|
185
|
+
|
|
186
|
+ hdr->status |= htons ( IB_SMP_STATUS_D_INBOUND );
|
|
187
|
+ hop_pointer = smp->mad_hdr.class_specific.smp.hop_pointer;
|
|
188
|
+ hop_count = smp->mad_hdr.class_specific.smp.hop_count;
|
|
189
|
+ assert ( hop_count == hop_pointer );
|
|
190
|
+ if ( hop_pointer < ( sizeof ( smp->return_path.hops ) /
|
|
191
|
+ sizeof ( smp->return_path.hops[0] ) ) ) {
|
|
192
|
+ smp->return_path.hops[hop_pointer] = ibdev->port;
|
|
193
|
+ } else {
|
|
194
|
+ DBGC ( gma, "GMA %p invalid hop pointer %d\n",
|
|
195
|
+ gma, hop_pointer );
|
|
196
|
+ goto out;
|
|
197
|
+ }
|
|
198
|
+ }
|
|
199
|
+
|
|
200
|
+ /* Construct return address */
|
|
201
|
+ av->qkey = ( ( av->qpn == IB_QPN_SMA ) ? IB_QKEY_SMA : IB_QKEY_GMA );
|
|
202
|
+ av->rate = IB_RATE_2_5;
|
|
203
|
+
|
|
204
|
+ /* Send MAD response, if applicable */
|
|
205
|
+ if ( ( rc = ib_post_send ( ibdev, qp, av,
|
|
206
|
+ iob_disown ( iobuf ) ) ) != 0 ) {
|
|
207
|
+ DBGC ( gma, "GMA %p could not send MAD response: %s\n",
|
|
208
|
+ gma, strerror ( rc ) );
|
|
209
|
+ goto out;
|
|
210
|
+ }
|
|
211
|
+
|
|
212
|
+ out:
|
|
213
|
+ free_iob ( iobuf );
|
|
214
|
+}
|
|
215
|
+
|
|
216
|
+/**
|
|
217
|
+ * Complete GMA send
|
|
218
|
+ *
|
|
219
|
+ *
|
|
220
|
+ * @v ibdev Infiniband device
|
|
221
|
+ * @v qp Queue pair
|
|
222
|
+ * @v iobuf I/O buffer
|
|
223
|
+ * @v rc Completion status code
|
|
224
|
+ */
|
|
225
|
+static void ib_gma_complete_send ( struct ib_device *ibdev __unused,
|
|
226
|
+ struct ib_queue_pair *qp,
|
|
227
|
+ struct io_buffer *iobuf, int rc ) {
|
|
228
|
+ struct ib_gma *gma = ib_qp_get_ownerdata ( qp );
|
|
229
|
+
|
|
230
|
+ if ( rc != 0 ) {
|
|
231
|
+ DBGC ( gma, "GMA %p send completion error: %s\n",
|
|
232
|
+ gma, strerror ( rc ) );
|
|
233
|
+ }
|
|
234
|
+ free_iob ( iobuf );
|
|
235
|
+}
|
|
236
|
+
|
|
237
|
+/** GMA completion operations */
|
|
238
|
+static struct ib_completion_queue_operations ib_gma_completion_ops = {
|
|
239
|
+ .complete_send = ib_gma_complete_send,
|
|
240
|
+ .complete_recv = ib_gma_complete_recv,
|
|
241
|
+};
|
|
242
|
+
|
|
243
|
+/**
|
|
244
|
+ * Handle MAD request timer expiry
|
|
245
|
+ *
|
|
246
|
+ * @v timer Retry timer
|
|
247
|
+ * @v expired Failure indicator
|
|
248
|
+ */
|
|
249
|
+static void ib_gma_timer_expired ( struct retry_timer *timer, int expired ) {
|
|
250
|
+ struct ib_mad_request *request =
|
|
251
|
+ container_of ( timer, struct ib_mad_request, timer );
|
|
252
|
+ struct ib_gma *gma = request->gma;
|
|
253
|
+ struct ib_device *ibdev = gma->ibdev;
|
|
254
|
+ struct io_buffer *iobuf;
|
|
255
|
+ int rc;
|
|
256
|
+
|
|
257
|
+ /* Abandon TID if we have tried too many times */
|
|
258
|
+ if ( expired ) {
|
|
259
|
+ DBGC ( gma, "GMA %p abandoning TID %08x%08x\n",
|
|
260
|
+ gma, ntohl ( request->mad.hdr.tid[0] ),
|
|
261
|
+ ntohl ( request->mad.hdr.tid[1] ) );
|
|
262
|
+ list_del ( &request->list );
|
|
263
|
+ free ( request );
|
|
264
|
+ return;
|
|
265
|
+ }
|
|
266
|
+
|
|
267
|
+ DBGC ( gma, "GMA %p TX TID %08x%08x (%02x,%02x,%02x,%04x)\n",
|
|
268
|
+ gma, ntohl ( request->mad.hdr.tid[0] ),
|
|
269
|
+ ntohl ( request->mad.hdr.tid[1] ), request->mad.hdr.mgmt_class,
|
|
270
|
+ request->mad.hdr.class_version, request->mad.hdr.method,
|
|
271
|
+ ntohs ( request->mad.hdr.attr_id ) );
|
|
272
|
+ DBGC2_HDA ( gma, 0, &request->mad, sizeof ( request->mad ) );
|
|
273
|
+
|
|
274
|
+ /* Restart retransmission timer */
|
|
275
|
+ start_timer ( timer );
|
|
276
|
+
|
|
277
|
+ /* Construct I/O buffer */
|
|
278
|
+ iobuf = alloc_iob ( sizeof ( request->mad ) );
|
|
279
|
+ if ( ! iobuf ) {
|
|
280
|
+ DBGC ( gma, "GMA %p could not allocate buffer for TID "
|
|
281
|
+ "%08x%08x\n", gma, ntohl ( request->mad.hdr.tid[0] ),
|
|
282
|
+ ntohl ( request->mad.hdr.tid[1] ) );
|
|
283
|
+ return;
|
|
284
|
+ }
|
|
285
|
+ memcpy ( iob_put ( iobuf, sizeof ( request->mad ) ), &request->mad,
|
|
286
|
+ sizeof ( request->mad ) );
|
|
287
|
+
|
|
288
|
+ /* Post send request */
|
|
289
|
+ if ( ( rc = ib_post_send ( ibdev, gma->qp, &request->av,
|
|
290
|
+ iobuf ) ) != 0 ) {
|
|
291
|
+ DBGC ( gma, "GMA %p could not send TID %08x%08x: %s\n",
|
|
292
|
+ gma, ntohl ( request->mad.hdr.tid[0] ),
|
|
293
|
+ ntohl ( request->mad.hdr.tid[1] ), strerror ( rc ) );
|
|
294
|
+ free_iob ( iobuf );
|
|
295
|
+ return;
|
|
296
|
+ }
|
|
297
|
+}
|
|
298
|
+
|
|
299
|
+/**
|
|
300
|
+ * Issue MAD request
|
|
301
|
+ *
|
|
302
|
+ * @v gma General management agent
|
|
303
|
+ * @v mad MAD request
|
|
304
|
+ * @v av Destination address, or NULL for SM
|
|
305
|
+ * @ret rc Return status code
|
|
306
|
+ */
|
|
307
|
+int ib_gma_request ( struct ib_gma *gma, union ib_mad *mad,
|
|
308
|
+ struct ib_address_vector *av ) {
|
|
309
|
+ struct ib_device *ibdev = gma->ibdev;
|
|
310
|
+ struct ib_mad_request *request;
|
|
311
|
+
|
|
312
|
+ /* Allocate and initialise structure */
|
|
313
|
+ request = zalloc ( sizeof ( *request ) );
|
|
314
|
+ if ( ! request ) {
|
|
315
|
+ DBGC ( gma, "GMA %p could not allocate MAD request\n", gma );
|
|
316
|
+ return -ENOMEM;
|
|
317
|
+ }
|
|
318
|
+ request->gma = gma;
|
|
319
|
+ list_add ( &request->list, &gma->requests );
|
|
320
|
+ request->timer.expired = ib_gma_timer_expired;
|
|
321
|
+
|
|
322
|
+ /* Determine address vector */
|
|
323
|
+ if ( av ) {
|
|
324
|
+ memcpy ( &request->av, av, sizeof ( request->av ) );
|
|
325
|
+ } else {
|
|
326
|
+ request->av.lid = ibdev->sm_lid;
|
|
327
|
+ request->av.sl = ibdev->sm_sl;
|
|
328
|
+ request->av.qpn = IB_QPN_GMA;
|
|
329
|
+ request->av.qkey = IB_QKEY_GMA;
|
|
330
|
+ }
|
|
331
|
+
|
|
332
|
+ /* Copy MAD body */
|
|
333
|
+ memcpy ( &request->mad, mad, sizeof ( request->mad ) );
|
|
334
|
+
|
|
335
|
+ /* Allocate TID */
|
|
336
|
+ request->mad.hdr.tid[0] = htonl ( IB_GMA_TID_MAGIC );
|
|
337
|
+ request->mad.hdr.tid[1] = htonl ( ++next_request_tid );
|
|
338
|
+
|
|
339
|
+ /* Start timer to initiate transmission */
|
|
340
|
+ start_timer_nodelay ( &request->timer );
|
|
341
|
+
|
|
342
|
+ return 0;
|
|
343
|
+}
|
|
344
|
+
|
|
345
|
+/**
|
|
346
|
+ * Create GMA
|
|
347
|
+ *
|
|
348
|
+ * @v gma General management agent
|
|
349
|
+ * @v ibdev Infiniband device
|
|
350
|
+ * @v qkey Queue key
|
|
351
|
+ * @ret rc Return status code
|
|
352
|
+ */
|
|
353
|
+int ib_create_gma ( struct ib_gma *gma, struct ib_device *ibdev,
|
|
354
|
+ unsigned long qkey ) {
|
|
355
|
+ int rc;
|
|
356
|
+
|
|
357
|
+ /* Initialise fields */
|
|
358
|
+ memset ( gma, 0, sizeof ( *gma ) );
|
|
359
|
+ gma->ibdev = ibdev;
|
|
360
|
+ INIT_LIST_HEAD ( &gma->requests );
|
|
361
|
+
|
|
362
|
+ /* Create completion queue */
|
|
363
|
+ gma->cq = ib_create_cq ( ibdev, IB_GMA_NUM_CQES,
|
|
364
|
+ &ib_gma_completion_ops );
|
|
365
|
+ if ( ! gma->cq ) {
|
|
366
|
+ DBGC ( gma, "GMA %p could not allocate completion queue\n",
|
|
367
|
+ gma );
|
|
368
|
+ rc = -ENOMEM;
|
|
369
|
+ goto err_create_cq;
|
|
370
|
+ }
|
|
371
|
+
|
|
372
|
+ /* Create queue pair */
|
|
373
|
+ gma->qp = ib_create_qp ( ibdev, IB_GMA_NUM_SEND_WQES, gma->cq,
|
|
374
|
+ IB_GMA_NUM_RECV_WQES, gma->cq, qkey );
|
|
375
|
+ if ( ! gma->qp ) {
|
|
376
|
+ DBGC ( gma, "GMA %p could not allocate queue pair\n", gma );
|
|
377
|
+ rc = -ENOMEM;
|
|
378
|
+ goto err_create_qp;
|
|
379
|
+ }
|
|
380
|
+ ib_qp_set_ownerdata ( gma->qp, gma );
|
|
381
|
+
|
|
382
|
+ DBGC ( gma, "GMA %p running on QPN %#lx\n", gma, gma->qp->qpn );
|
|
383
|
+
|
|
384
|
+ /* Fill receive ring */
|
|
385
|
+ ib_refill_recv ( ibdev, gma->qp );
|
|
386
|
+ return 0;
|
|
387
|
+
|
|
388
|
+ ib_destroy_qp ( ibdev, gma->qp );
|
|
389
|
+ err_create_qp:
|
|
390
|
+ ib_destroy_cq ( ibdev, gma->cq );
|
|
391
|
+ err_create_cq:
|
|
392
|
+ return rc;
|
|
393
|
+}
|
|
394
|
+
|
|
395
|
+/**
|
|
396
|
+ * Destroy GMA
|
|
397
|
+ *
|
|
398
|
+ * @v gma General management agent
|
|
399
|
+ */
|
|
400
|
+void ib_destroy_gma ( struct ib_gma *gma ) {
|
|
401
|
+ struct ib_device *ibdev = gma->ibdev;
|
|
402
|
+ struct ib_mad_request *request;
|
|
403
|
+ struct ib_mad_request *tmp;
|
|
404
|
+
|
|
405
|
+ /* Flush any outstanding requests */
|
|
406
|
+ list_for_each_entry_safe ( request, tmp, &gma->requests, list ) {
|
|
407
|
+ stop_timer ( &request->timer );
|
|
408
|
+ list_del ( &request->list );
|
|
409
|
+ free ( request );
|
|
410
|
+ }
|
|
411
|
+
|
|
412
|
+ ib_destroy_qp ( ibdev, gma->qp );
|
|
413
|
+ ib_destroy_cq ( ibdev, gma->cq );
|
|
414
|
+}
|