|
@@ -0,0 +1,669 @@
|
|
1
|
+/*
|
|
2
|
+ * Copyright (C) 2011 Michael Brown <mbrown@fensystems.co.uk>.
|
|
3
|
+ *
|
|
4
|
+ * This program is free software; you can redistribute it and/or
|
|
5
|
+ * modify it under the terms of the GNU General Public License as
|
|
6
|
+ * published by the Free Software Foundation; either version 2 of the
|
|
7
|
+ * License, or any later version.
|
|
8
|
+ *
|
|
9
|
+ * This program is distributed in the hope that it will be useful, but
|
|
10
|
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
11
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
12
|
+ * General Public License for more details.
|
|
13
|
+ *
|
|
14
|
+ * You should have received a copy of the GNU General Public License
|
|
15
|
+ * along with this program; if not, write to the Free Software
|
|
16
|
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
|
17
|
+ */
|
|
18
|
+
|
|
19
|
+FILE_LICENCE ( GPL2_OR_LATER );
|
|
20
|
+
|
|
21
|
+#include <stdint.h>
|
|
22
|
+#include <errno.h>
|
|
23
|
+#include <assert.h>
|
|
24
|
+#include <byteswap.h>
|
|
25
|
+#include <ipxe/pci.h>
|
|
26
|
+#include <ipxe/io.h>
|
|
27
|
+#include <ipxe/malloc.h>
|
|
28
|
+#include <ipxe/iobuf.h>
|
|
29
|
+#include <ipxe/netdevice.h>
|
|
30
|
+#include <ipxe/if_ether.h>
|
|
31
|
+#include <ipxe/ethernet.h>
|
|
32
|
+#include "vmxnet3.h"
|
|
33
|
+
|
|
34
|
+/**
|
|
35
|
+ * @file
|
|
36
|
+ *
|
|
37
|
+ * VMware vmxnet3 virtual NIC driver
|
|
38
|
+ *
|
|
39
|
+ */
|
|
40
|
+
|
|
41
|
+/**
|
|
42
|
+ * Issue command
|
|
43
|
+ *
|
|
44
|
+ * @v vmxnet vmxnet3 NIC
|
|
45
|
+ * @v command Command to issue
|
|
46
|
+ * @ret result Command result
|
|
47
|
+ */
|
|
48
|
+static inline uint32_t vmxnet3_command ( struct vmxnet3_nic *vmxnet,
|
|
49
|
+ uint32_t command ) {
|
|
50
|
+
|
|
51
|
+ /* Issue command */
|
|
52
|
+ writel ( command, ( vmxnet->vd + VMXNET3_VD_CMD ) );
|
|
53
|
+ return readl ( vmxnet->vd + VMXNET3_VD_CMD );
|
|
54
|
+}
|
|
55
|
+
|
|
56
|
+/**
|
|
57
|
+ * Transmit packet
|
|
58
|
+ *
|
|
59
|
+ * @v netdev Network device
|
|
60
|
+ * @v iobuf I/O buffer
|
|
61
|
+ * @ret rc Return status code
|
|
62
|
+ */
|
|
63
|
+static int vmxnet3_transmit ( struct net_device *netdev,
|
|
64
|
+ struct io_buffer *iobuf ) {
|
|
65
|
+ struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
|
|
66
|
+ struct vmxnet3_tx_desc *tx_desc;
|
|
67
|
+ unsigned int desc_idx;
|
|
68
|
+ unsigned int generation;
|
|
69
|
+
|
|
70
|
+ /* Check that we have a free transmit descriptor */
|
|
71
|
+ desc_idx = ( vmxnet->count.tx_prod % VMXNET3_NUM_TX_DESC );
|
|
72
|
+ generation = ( ( vmxnet->count.tx_prod & VMXNET3_NUM_TX_DESC ) ?
|
|
73
|
+ 0 : cpu_to_le32 ( VMXNET3_TXF_GEN ) );
|
|
74
|
+ if ( vmxnet->tx_iobuf[desc_idx] ) {
|
|
75
|
+ DBGC ( vmxnet, "VMXNET3 %p out of transmit descriptors\n",
|
|
76
|
+ vmxnet );
|
|
77
|
+ return -ENOBUFS;
|
|
78
|
+ }
|
|
79
|
+
|
|
80
|
+ /* Increment producer counter */
|
|
81
|
+ vmxnet->count.tx_prod++;
|
|
82
|
+
|
|
83
|
+ /* Store I/O buffer for later completion */
|
|
84
|
+ vmxnet->tx_iobuf[desc_idx] = iobuf;
|
|
85
|
+
|
|
86
|
+ /* Populate transmit descriptor */
|
|
87
|
+ tx_desc = &vmxnet->dma->tx_desc[desc_idx];
|
|
88
|
+ tx_desc->address = cpu_to_le64 ( virt_to_bus ( iobuf->data ) );
|
|
89
|
+ tx_desc->flags[0] = ( generation | cpu_to_le32 ( iob_len ( iobuf ) ) );
|
|
90
|
+ tx_desc->flags[1] = cpu_to_le32 ( VMXNET3_TXF_CQ | VMXNET3_TXF_EOP );
|
|
91
|
+
|
|
92
|
+ /* Hand over descriptor to NIC */
|
|
93
|
+ wmb();
|
|
94
|
+ writel ( ( vmxnet->count.tx_prod % VMXNET3_NUM_TX_DESC ),
|
|
95
|
+ ( vmxnet->pt + VMXNET3_PT_TXPROD ) );
|
|
96
|
+
|
|
97
|
+ return 0;
|
|
98
|
+}
|
|
99
|
+
|
|
100
|
+/**
|
|
101
|
+ * Poll for completed transmissions
|
|
102
|
+ *
|
|
103
|
+ * @v netdev Network device
|
|
104
|
+ */
|
|
105
|
+static void vmxnet3_poll_tx ( struct net_device *netdev ) {
|
|
106
|
+ struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
|
|
107
|
+ struct vmxnet3_tx_comp *tx_comp;
|
|
108
|
+ struct io_buffer *iobuf;
|
|
109
|
+ unsigned int comp_idx;
|
|
110
|
+ unsigned int desc_idx;
|
|
111
|
+ unsigned int generation;
|
|
112
|
+
|
|
113
|
+ while ( 1 ) {
|
|
114
|
+
|
|
115
|
+ /* Look for completed descriptors */
|
|
116
|
+ comp_idx = ( vmxnet->count.tx_cons % VMXNET3_NUM_TX_COMP );
|
|
117
|
+ generation = ( ( vmxnet->count.tx_cons & VMXNET3_NUM_TX_COMP ) ?
|
|
118
|
+ 0 : cpu_to_le32 ( VMXNET3_TXCF_GEN ) );
|
|
119
|
+ tx_comp = &vmxnet->dma->tx_comp[comp_idx];
|
|
120
|
+ if ( generation != ( tx_comp->flags &
|
|
121
|
+ cpu_to_le32 ( VMXNET3_TXCF_GEN ) ) ) {
|
|
122
|
+ break;
|
|
123
|
+ }
|
|
124
|
+
|
|
125
|
+ /* Increment consumer counter */
|
|
126
|
+ vmxnet->count.tx_cons++;
|
|
127
|
+
|
|
128
|
+ /* Locate corresponding transmit descriptor */
|
|
129
|
+ desc_idx = ( le32_to_cpu ( tx_comp->index ) %
|
|
130
|
+ VMXNET3_NUM_TX_DESC );
|
|
131
|
+ iobuf = vmxnet->tx_iobuf[desc_idx];
|
|
132
|
+ if ( ! iobuf ) {
|
|
133
|
+ DBGC ( vmxnet, "VMXNET3 %p completed on empty transmit "
|
|
134
|
+ "buffer %#x/%#x\n", vmxnet, comp_idx, desc_idx );
|
|
135
|
+ netdev_tx_err ( netdev, NULL, -ENOTTY );
|
|
136
|
+ continue;
|
|
137
|
+ }
|
|
138
|
+
|
|
139
|
+ /* Remove I/O buffer from transmit queue */
|
|
140
|
+ vmxnet->tx_iobuf[desc_idx] = NULL;
|
|
141
|
+
|
|
142
|
+ /* Report transmission completion to network layer */
|
|
143
|
+ DBGC2 ( vmxnet, "VMXNET3 %p completed TX %#x/%#x (len %#zx)\n",
|
|
144
|
+ vmxnet, comp_idx, desc_idx, iob_len ( iobuf ) );
|
|
145
|
+ netdev_tx_complete ( netdev, iobuf );
|
|
146
|
+ }
|
|
147
|
+}
|
|
148
|
+
|
|
149
|
+/**
|
|
150
|
+ * Flush any uncompleted transmit buffers
|
|
151
|
+ *
|
|
152
|
+ * @v netdev Network device
|
|
153
|
+ */
|
|
154
|
+static void vmxnet3_flush_tx ( struct net_device *netdev ) {
|
|
155
|
+ struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
|
|
156
|
+ unsigned int i;
|
|
157
|
+
|
|
158
|
+ for ( i = 0 ; i < VMXNET3_NUM_TX_DESC ; i++ ) {
|
|
159
|
+ if ( vmxnet->tx_iobuf[i] ) {
|
|
160
|
+ netdev_tx_complete_err ( netdev, vmxnet->tx_iobuf[i],
|
|
161
|
+ -ECANCELED );
|
|
162
|
+ vmxnet->tx_iobuf[i] = NULL;
|
|
163
|
+ }
|
|
164
|
+ }
|
|
165
|
+}
|
|
166
|
+
|
|
167
|
+/**
|
|
168
|
+ * Refill receive ring
|
|
169
|
+ *
|
|
170
|
+ * @v netdev Network device
|
|
171
|
+ */
|
|
172
|
+static void vmxnet3_refill_rx ( struct net_device *netdev ) {
|
|
173
|
+ struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
|
|
174
|
+ struct vmxnet3_rx_desc *rx_desc;
|
|
175
|
+ struct io_buffer *iobuf;
|
|
176
|
+ unsigned int orig_rx_prod = vmxnet->count.rx_prod;
|
|
177
|
+ unsigned int desc_idx;
|
|
178
|
+ unsigned int generation;
|
|
179
|
+
|
|
180
|
+ /* Fill receive ring to specified fill level */
|
|
181
|
+ while ( vmxnet->count.rx_fill < VMXNET3_RX_FILL ) {
|
|
182
|
+
|
|
183
|
+ /* Locate receive descriptor */
|
|
184
|
+ desc_idx = ( vmxnet->count.rx_prod % VMXNET3_NUM_RX_DESC );
|
|
185
|
+ generation = ( ( vmxnet->count.rx_prod & VMXNET3_NUM_RX_DESC ) ?
|
|
186
|
+ 0 : cpu_to_le32 ( VMXNET3_RXF_GEN ) );
|
|
187
|
+ assert ( vmxnet->rx_iobuf[desc_idx] == NULL );
|
|
188
|
+
|
|
189
|
+ /* Allocate I/O buffer */
|
|
190
|
+ iobuf = alloc_iob ( VMXNET3_MTU + NET_IP_ALIGN );
|
|
191
|
+ if ( ! iobuf ) {
|
|
192
|
+ /* Non-fatal low memory condition */
|
|
193
|
+ break;
|
|
194
|
+ }
|
|
195
|
+ iob_reserve ( iobuf, NET_IP_ALIGN );
|
|
196
|
+
|
|
197
|
+ /* Increment producer counter and fill level */
|
|
198
|
+ vmxnet->count.rx_prod++;
|
|
199
|
+ vmxnet->count.rx_fill++;
|
|
200
|
+
|
|
201
|
+ /* Store I/O buffer for later completion */
|
|
202
|
+ vmxnet->rx_iobuf[desc_idx] = iobuf;
|
|
203
|
+
|
|
204
|
+ /* Populate receive descriptor */
|
|
205
|
+ rx_desc = &vmxnet->dma->rx_desc[desc_idx];
|
|
206
|
+ rx_desc->address = cpu_to_le64 ( virt_to_bus ( iobuf->data ) );
|
|
207
|
+ rx_desc->flags = ( generation | cpu_to_le32 ( VMXNET3_MTU ) );
|
|
208
|
+
|
|
209
|
+ }
|
|
210
|
+
|
|
211
|
+ /* Hand over any new descriptors to NIC */
|
|
212
|
+ if ( vmxnet->count.rx_prod != orig_rx_prod ) {
|
|
213
|
+ wmb();
|
|
214
|
+ writel ( ( vmxnet->count.rx_prod % VMXNET3_NUM_RX_DESC ),
|
|
215
|
+ ( vmxnet->pt + VMXNET3_PT_RXPROD ) );
|
|
216
|
+ }
|
|
217
|
+}
|
|
218
|
+
|
|
219
|
+/**
|
|
220
|
+ * Poll for received packets
|
|
221
|
+ *
|
|
222
|
+ * @v netdev Network device
|
|
223
|
+ */
|
|
224
|
+static void vmxnet3_poll_rx ( struct net_device *netdev ) {
|
|
225
|
+ struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
|
|
226
|
+ struct vmxnet3_rx_comp *rx_comp;
|
|
227
|
+ struct io_buffer *iobuf;
|
|
228
|
+ unsigned int comp_idx;
|
|
229
|
+ unsigned int desc_idx;
|
|
230
|
+ unsigned int generation;
|
|
231
|
+ size_t len;
|
|
232
|
+
|
|
233
|
+ while ( 1 ) {
|
|
234
|
+
|
|
235
|
+ /* Look for completed descriptors */
|
|
236
|
+ comp_idx = ( vmxnet->count.rx_cons % VMXNET3_NUM_RX_COMP );
|
|
237
|
+ generation = ( ( vmxnet->count.rx_cons & VMXNET3_NUM_RX_COMP ) ?
|
|
238
|
+ 0 : cpu_to_le32 ( VMXNET3_RXCF_GEN ) );
|
|
239
|
+ rx_comp = &vmxnet->dma->rx_comp[comp_idx];
|
|
240
|
+ if ( generation != ( rx_comp->flags &
|
|
241
|
+ cpu_to_le32 ( VMXNET3_RXCF_GEN ) ) ) {
|
|
242
|
+ break;
|
|
243
|
+ }
|
|
244
|
+
|
|
245
|
+ /* Increment consumer counter */
|
|
246
|
+ vmxnet->count.rx_cons++;
|
|
247
|
+
|
|
248
|
+ /* Locate corresponding receive descriptor */
|
|
249
|
+ desc_idx = ( le32_to_cpu ( rx_comp->index ) %
|
|
250
|
+ VMXNET3_NUM_RX_DESC );
|
|
251
|
+ iobuf = vmxnet->rx_iobuf[desc_idx];
|
|
252
|
+ if ( ! iobuf ) {
|
|
253
|
+ DBGC ( vmxnet, "VMXNET3 %p completed on empty receive "
|
|
254
|
+ "buffer %#x/%#x\n", vmxnet, comp_idx, desc_idx );
|
|
255
|
+ netdev_rx_err ( netdev, NULL, -ENOTTY );
|
|
256
|
+ continue;
|
|
257
|
+ }
|
|
258
|
+
|
|
259
|
+ /* Remove I/O buffer from receive queue */
|
|
260
|
+ vmxnet->rx_iobuf[desc_idx] = NULL;
|
|
261
|
+ vmxnet->count.rx_fill--;
|
|
262
|
+
|
|
263
|
+ /* Deliver packet to network layer */
|
|
264
|
+ len = ( le32_to_cpu ( rx_comp->len ) &
|
|
265
|
+ ( VMXNET3_MAX_PACKET_LEN - 1 ) );
|
|
266
|
+ DBGC2 ( vmxnet, "VMXNET3 %p completed RX %#x/%#x (len %#zx)\n",
|
|
267
|
+ vmxnet, comp_idx, desc_idx, len );
|
|
268
|
+ iob_put ( iobuf, len );
|
|
269
|
+ netdev_rx ( netdev, iobuf );
|
|
270
|
+ }
|
|
271
|
+}
|
|
272
|
+
|
|
273
|
+/**
|
|
274
|
+ * Flush any uncompleted receive buffers
|
|
275
|
+ *
|
|
276
|
+ * @v netdev Network device
|
|
277
|
+ */
|
|
278
|
+static void vmxnet3_flush_rx ( struct net_device *netdev ) {
|
|
279
|
+ struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
|
|
280
|
+ struct io_buffer *iobuf;
|
|
281
|
+ unsigned int i;
|
|
282
|
+
|
|
283
|
+ for ( i = 0 ; i < VMXNET3_NUM_RX_DESC ; i++ ) {
|
|
284
|
+ if ( ( iobuf = vmxnet->rx_iobuf[i] ) != NULL ) {
|
|
285
|
+ netdev_rx_err ( netdev, iobuf, -ECANCELED );
|
|
286
|
+ vmxnet->rx_iobuf[i] = NULL;
|
|
287
|
+ }
|
|
288
|
+ }
|
|
289
|
+}
|
|
290
|
+
|
|
291
|
+/**
|
|
292
|
+ * Check link state
|
|
293
|
+ *
|
|
294
|
+ * @v netdev Network device
|
|
295
|
+ */
|
|
296
|
+static void vmxnet3_check_link ( struct net_device *netdev ) {
|
|
297
|
+ struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
|
|
298
|
+ uint32_t state;
|
|
299
|
+ int link_up;
|
|
300
|
+ unsigned int link_speed;
|
|
301
|
+
|
|
302
|
+ /* Get link state */
|
|
303
|
+ state = vmxnet3_command ( vmxnet, VMXNET3_CMD_GET_LINK );
|
|
304
|
+ link_up = ( state & 1 );
|
|
305
|
+ link_speed = ( state >> 16 );
|
|
306
|
+
|
|
307
|
+ /* Report link state to network device */
|
|
308
|
+ if ( link_up ) {
|
|
309
|
+ DBGC ( vmxnet, "VMXNET3 %p link is up at %d Mbps\n",
|
|
310
|
+ vmxnet, link_speed );
|
|
311
|
+ netdev_link_up ( netdev );
|
|
312
|
+ } else {
|
|
313
|
+ DBGC ( vmxnet, "VMXNET3 %p link is down\n", vmxnet );
|
|
314
|
+ netdev_link_down ( netdev );
|
|
315
|
+ }
|
|
316
|
+}
|
|
317
|
+
|
|
318
|
+/**
|
|
319
|
+ * Poll for events
|
|
320
|
+ *
|
|
321
|
+ * @v netdev Network device
|
|
322
|
+ */
|
|
323
|
+static void vmxnet3_poll_events ( struct net_device *netdev ) {
|
|
324
|
+ struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
|
|
325
|
+ uint32_t events;
|
|
326
|
+
|
|
327
|
+ /* Do nothing unless there are events to process */
|
|
328
|
+ if ( ! vmxnet->dma->shared.ecr )
|
|
329
|
+ return;
|
|
330
|
+ events = le32_to_cpu ( vmxnet->dma->shared.ecr );
|
|
331
|
+
|
|
332
|
+ /* Acknowledge these events */
|
|
333
|
+ writel ( events, ( vmxnet->vd + VMXNET3_VD_ECR ) );
|
|
334
|
+
|
|
335
|
+ /* Check for link state change */
|
|
336
|
+ if ( events & VMXNET3_ECR_LINK ) {
|
|
337
|
+ vmxnet3_check_link ( netdev );
|
|
338
|
+ events &= ~VMXNET3_ECR_LINK;
|
|
339
|
+ }
|
|
340
|
+
|
|
341
|
+ /* Check for queue errors */
|
|
342
|
+ if ( events & ( VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR ) ) {
|
|
343
|
+ vmxnet3_command ( vmxnet, VMXNET3_CMD_GET_QUEUE_STATUS );
|
|
344
|
+ DBGC ( vmxnet, "VMXNET3 %p queue error status (TX %08x, RX "
|
|
345
|
+ "%08x)\n", vmxnet,
|
|
346
|
+ le32_to_cpu ( vmxnet->dma->queues.tx.status.error ),
|
|
347
|
+ le32_to_cpu ( vmxnet->dma->queues.rx.status.error ) );
|
|
348
|
+ /* Report errors to allow for visibility via "ifstat" */
|
|
349
|
+ if ( events & VMXNET3_ECR_TQERR )
|
|
350
|
+ netdev_tx_err ( netdev, NULL, -EPIPE );
|
|
351
|
+ if ( events & VMXNET3_ECR_RQERR )
|
|
352
|
+ netdev_rx_err ( netdev, NULL, -EPIPE );
|
|
353
|
+ events &= ~( VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR );
|
|
354
|
+ }
|
|
355
|
+
|
|
356
|
+ /* Check for unknown events */
|
|
357
|
+ if ( events ) {
|
|
358
|
+ DBGC ( vmxnet, "VMXNET3 %p unknown events %08x\n",
|
|
359
|
+ vmxnet, events );
|
|
360
|
+ /* Report error to allow for visibility via "ifstat" */
|
|
361
|
+ netdev_rx_err ( netdev, NULL, -ENODEV );
|
|
362
|
+ }
|
|
363
|
+}
|
|
364
|
+
|
|
365
|
+/**
|
|
366
|
+ * Poll network device
|
|
367
|
+ *
|
|
368
|
+ * @v netdev Network device
|
|
369
|
+ */
|
|
370
|
+static void vmxnet3_poll ( struct net_device *netdev ) {
|
|
371
|
+
|
|
372
|
+ vmxnet3_poll_events ( netdev );
|
|
373
|
+ vmxnet3_poll_tx ( netdev );
|
|
374
|
+ vmxnet3_poll_rx ( netdev );
|
|
375
|
+ vmxnet3_refill_rx ( netdev );
|
|
376
|
+}
|
|
377
|
+
|
|
378
|
+/**
|
|
379
|
+ * Enable/disable interrupts
|
|
380
|
+ *
|
|
381
|
+ * @v netdev Network device
|
|
382
|
+ * @v enable Interrupts should be enabled
|
|
383
|
+ */
|
|
384
|
+static void vmxnet3_irq ( struct net_device *netdev, int enable ) {
|
|
385
|
+ struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
|
|
386
|
+
|
|
387
|
+ DBGC ( vmxnet, "VMXNET3 %p %s IRQ not implemented\n",
|
|
388
|
+ vmxnet, ( enable ? "enable" : "disable" ) );
|
|
389
|
+}
|
|
390
|
+
|
|
391
|
+/**
|
|
392
|
+ * Set MAC address
|
|
393
|
+ *
|
|
394
|
+ * @v vmxnet vmxnet3 NIC
|
|
395
|
+ * @v ll_addr Link-layer address to set
|
|
396
|
+ */
|
|
397
|
+static void vmxnet3_set_ll_addr ( struct vmxnet3_nic *vmxnet,
|
|
398
|
+ const void *ll_addr ) {
|
|
399
|
+ struct {
|
|
400
|
+ uint32_t low;
|
|
401
|
+ uint32_t high;
|
|
402
|
+ } __attribute__ (( packed )) mac;
|
|
403
|
+
|
|
404
|
+ memset ( &mac, 0, sizeof ( mac ) );
|
|
405
|
+ memcpy ( &mac, ll_addr, ETH_ALEN );
|
|
406
|
+ writel ( cpu_to_le32 ( mac.low ), ( vmxnet->vd + VMXNET3_VD_MACL ) );
|
|
407
|
+ writel ( cpu_to_le32 ( mac.high ), ( vmxnet->vd + VMXNET3_VD_MACH ) );
|
|
408
|
+}
|
|
409
|
+
|
|
410
|
+/**
|
|
411
|
+ * Open NIC
|
|
412
|
+ *
|
|
413
|
+ * @v netdev Network device
|
|
414
|
+ * @ret rc Return status code
|
|
415
|
+ */
|
|
416
|
+static int vmxnet3_open ( struct net_device *netdev ) {
|
|
417
|
+ struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
|
|
418
|
+ struct vmxnet3_shared *shared;
|
|
419
|
+ struct vmxnet3_queues *queues;
|
|
420
|
+ uint64_t shared_bus;
|
|
421
|
+ uint64_t queues_bus;
|
|
422
|
+ uint32_t status;
|
|
423
|
+ int rc;
|
|
424
|
+
|
|
425
|
+ /* Allocate DMA areas */
|
|
426
|
+ vmxnet->dma = malloc_dma ( sizeof ( *vmxnet->dma ), VMXNET3_DMA_ALIGN );
|
|
427
|
+ if ( ! vmxnet->dma ) {
|
|
428
|
+ DBGC ( vmxnet, "VMXNET3 %p could not allocate DMA area\n",
|
|
429
|
+ vmxnet );
|
|
430
|
+ rc = -ENOMEM;
|
|
431
|
+ goto err_alloc_dma;
|
|
432
|
+ }
|
|
433
|
+ memset ( vmxnet->dma, 0, sizeof ( *vmxnet->dma ) );
|
|
434
|
+
|
|
435
|
+ /* Populate queue descriptors */
|
|
436
|
+ queues = &vmxnet->dma->queues;
|
|
437
|
+ queues->tx.cfg.desc_address =
|
|
438
|
+ cpu_to_le64 ( virt_to_bus ( &vmxnet->dma->tx_desc ) );
|
|
439
|
+ queues->tx.cfg.comp_address =
|
|
440
|
+ cpu_to_le64 ( virt_to_bus ( &vmxnet->dma->tx_comp ) );
|
|
441
|
+ queues->tx.cfg.num_desc = cpu_to_le32 ( VMXNET3_NUM_TX_DESC );
|
|
442
|
+ queues->tx.cfg.num_comp = cpu_to_le32 ( VMXNET3_NUM_TX_COMP );
|
|
443
|
+ queues->rx.cfg.desc_address[0] =
|
|
444
|
+ cpu_to_le64 ( virt_to_bus ( &vmxnet->dma->rx_desc ) );
|
|
445
|
+ queues->rx.cfg.comp_address =
|
|
446
|
+ cpu_to_le64 ( virt_to_bus ( &vmxnet->dma->rx_comp ) );
|
|
447
|
+ queues->rx.cfg.num_desc[0] = cpu_to_le32 ( VMXNET3_NUM_RX_DESC );
|
|
448
|
+ queues->rx.cfg.num_comp = cpu_to_le32 ( VMXNET3_NUM_RX_COMP );
|
|
449
|
+ queues_bus = virt_to_bus ( queues );
|
|
450
|
+ DBGC ( vmxnet, "VMXNET3 %p queue descriptors at %08llx+%zx\n",
|
|
451
|
+ vmxnet, queues_bus, sizeof ( *queues ) );
|
|
452
|
+
|
|
453
|
+ /* Populate shared area */
|
|
454
|
+ shared = &vmxnet->dma->shared;
|
|
455
|
+ shared->magic = cpu_to_le32 ( VMXNET3_SHARED_MAGIC );
|
|
456
|
+ shared->misc.version = cpu_to_le32 ( VMXNET3_VERSION_MAGIC );
|
|
457
|
+ shared->misc.version_support = cpu_to_le32 ( VMXNET3_VERSION_SELECT );
|
|
458
|
+ shared->misc.upt_version_support =
|
|
459
|
+ cpu_to_le32 ( VMXNET3_UPT_VERSION_SELECT );
|
|
460
|
+ shared->misc.queue_desc_address = cpu_to_le64 ( queues_bus );
|
|
461
|
+ shared->misc.queue_desc_len = cpu_to_le32 ( sizeof ( *queues ) );
|
|
462
|
+ shared->misc.mtu = cpu_to_le32 ( VMXNET3_MTU );
|
|
463
|
+ shared->misc.num_tx_queues = 1;
|
|
464
|
+ shared->misc.num_rx_queues = 1;
|
|
465
|
+ shared->interrupt.num_intrs = 1;
|
|
466
|
+ shared->interrupt.control = cpu_to_le32 ( VMXNET3_IC_DISABLE_ALL );
|
|
467
|
+ shared->rx_filter.mode = cpu_to_le32 ( VMXNET3_RXM_UCAST |
|
|
468
|
+ VMXNET3_RXM_BCAST |
|
|
469
|
+ VMXNET3_RXM_ALL_MULTI );
|
|
470
|
+ shared_bus = virt_to_bus ( shared );
|
|
471
|
+ DBGC ( vmxnet, "VMXNET3 %p shared area at %08llx+%zx\n",
|
|
472
|
+ vmxnet, shared_bus, sizeof ( *shared ) );
|
|
473
|
+
|
|
474
|
+ /* Zero counters */
|
|
475
|
+ memset ( &vmxnet->count, 0, sizeof ( vmxnet->count ) );
|
|
476
|
+
|
|
477
|
+ /* Set MAC address */
|
|
478
|
+ vmxnet3_set_ll_addr ( vmxnet, &netdev->ll_addr );
|
|
479
|
+
|
|
480
|
+ /* Pass shared area to device */
|
|
481
|
+ writel ( ( shared_bus >> 0 ), ( vmxnet->vd + VMXNET3_VD_DSAL ) );
|
|
482
|
+ writel ( ( shared_bus >> 32 ), ( vmxnet->vd + VMXNET3_VD_DSAH ) );
|
|
483
|
+
|
|
484
|
+ /* Activate device */
|
|
485
|
+ if ( ( status = vmxnet3_command ( vmxnet,
|
|
486
|
+ VMXNET3_CMD_ACTIVATE_DEV ) ) != 0 ) {
|
|
487
|
+ DBGC ( vmxnet, "VMXNET3 %p could not activate (status %#x)\n",
|
|
488
|
+ vmxnet, status );
|
|
489
|
+ rc = -EIO;
|
|
490
|
+ goto err_activate;
|
|
491
|
+ }
|
|
492
|
+
|
|
493
|
+ /* Fill receive ring */
|
|
494
|
+ vmxnet3_refill_rx ( netdev );
|
|
495
|
+
|
|
496
|
+ return 0;
|
|
497
|
+
|
|
498
|
+ vmxnet3_command ( vmxnet, VMXNET3_CMD_QUIESCE_DEV );
|
|
499
|
+ vmxnet3_command ( vmxnet, VMXNET3_CMD_RESET_DEV );
|
|
500
|
+ err_activate:
|
|
501
|
+ vmxnet3_flush_tx ( netdev );
|
|
502
|
+ vmxnet3_flush_rx ( netdev );
|
|
503
|
+ free_dma ( vmxnet->dma, sizeof ( *vmxnet->dma ) );
|
|
504
|
+ err_alloc_dma:
|
|
505
|
+ return rc;
|
|
506
|
+}
|
|
507
|
+
|
|
508
|
+/**
|
|
509
|
+ * Close NIC
|
|
510
|
+ *
|
|
511
|
+ * @v netdev Network device
|
|
512
|
+ */
|
|
513
|
+static void vmxnet3_close ( struct net_device *netdev ) {
|
|
514
|
+ struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
|
|
515
|
+
|
|
516
|
+ vmxnet3_command ( vmxnet, VMXNET3_CMD_QUIESCE_DEV );
|
|
517
|
+ vmxnet3_command ( vmxnet, VMXNET3_CMD_RESET_DEV );
|
|
518
|
+ vmxnet3_flush_tx ( netdev );
|
|
519
|
+ vmxnet3_flush_rx ( netdev );
|
|
520
|
+ free_dma ( vmxnet->dma, sizeof ( *vmxnet->dma ) );
|
|
521
|
+}
|
|
522
|
+
|
|
523
|
+/** vmxnet3 net device operations */
|
|
524
|
+static struct net_device_operations vmxnet3_operations = {
|
|
525
|
+ .open = vmxnet3_open,
|
|
526
|
+ .close = vmxnet3_close,
|
|
527
|
+ .transmit = vmxnet3_transmit,
|
|
528
|
+ .poll = vmxnet3_poll,
|
|
529
|
+ .irq = vmxnet3_irq,
|
|
530
|
+};
|
|
531
|
+
|
|
532
|
+/**
|
|
533
|
+ * Check version
|
|
534
|
+ *
|
|
535
|
+ * @v vmxnet vmxnet3 NIC
|
|
536
|
+ * @ret rc Return status code
|
|
537
|
+ */
|
|
538
|
+static int vmxnet3_check_version ( struct vmxnet3_nic *vmxnet ) {
|
|
539
|
+ uint32_t version;
|
|
540
|
+ uint32_t upt_version;
|
|
541
|
+
|
|
542
|
+ /* Read version */
|
|
543
|
+ version = readl ( vmxnet->vd + VMXNET3_VD_VRRS );
|
|
544
|
+ upt_version = readl ( vmxnet->vd + VMXNET3_VD_UVRS );
|
|
545
|
+ DBGC ( vmxnet, "VMXNET3 %p is version %d (UPT version %d)\n",
|
|
546
|
+ vmxnet, version, upt_version );
|
|
547
|
+
|
|
548
|
+ /* Inform NIC of driver version */
|
|
549
|
+ writel ( VMXNET3_VERSION_SELECT, ( vmxnet->vd + VMXNET3_VD_VRRS ) );
|
|
550
|
+ writel ( VMXNET3_UPT_VERSION_SELECT, ( vmxnet->vd + VMXNET3_VD_UVRS ) );
|
|
551
|
+
|
|
552
|
+ return 0;
|
|
553
|
+}
|
|
554
|
+
|
|
555
|
+/**
|
|
556
|
+ * Get permanent MAC address
|
|
557
|
+ *
|
|
558
|
+ * @v vmxnet vmxnet3 NIC
|
|
559
|
+ * @v hw_addr Hardware address to fill in
|
|
560
|
+ */
|
|
561
|
+static void vmxnet3_get_hw_addr ( struct vmxnet3_nic *vmxnet, void *hw_addr ) {
|
|
562
|
+ struct {
|
|
563
|
+ uint32_t low;
|
|
564
|
+ uint32_t high;
|
|
565
|
+ } __attribute__ (( packed )) mac;
|
|
566
|
+
|
|
567
|
+ mac.low = le32_to_cpu ( vmxnet3_command ( vmxnet,
|
|
568
|
+ VMXNET3_CMD_GET_PERM_MAC_LO ) );
|
|
569
|
+ mac.high = le32_to_cpu ( vmxnet3_command ( vmxnet,
|
|
570
|
+ VMXNET3_CMD_GET_PERM_MAC_HI ) );
|
|
571
|
+ memcpy ( hw_addr, &mac, ETH_ALEN );
|
|
572
|
+}
|
|
573
|
+
|
|
574
|
+/**
|
|
575
|
+ * Probe PCI device
|
|
576
|
+ *
|
|
577
|
+ * @v pci PCI device
|
|
578
|
+ * @v id PCI ID
|
|
579
|
+ * @ret rc Return status code
|
|
580
|
+ */
|
|
581
|
+static int vmxnet3_probe ( struct pci_device *pci ) {
|
|
582
|
+ struct net_device *netdev;
|
|
583
|
+ struct vmxnet3_nic *vmxnet;
|
|
584
|
+ int rc;
|
|
585
|
+
|
|
586
|
+ /* Allocate network device */
|
|
587
|
+ netdev = alloc_etherdev ( sizeof ( *vmxnet ) );
|
|
588
|
+ if ( ! netdev ) {
|
|
589
|
+ rc = -ENOMEM;
|
|
590
|
+ goto err_alloc_etherdev;
|
|
591
|
+ }
|
|
592
|
+ netdev_init ( netdev, &vmxnet3_operations );
|
|
593
|
+ vmxnet = netdev_priv ( netdev );
|
|
594
|
+ pci_set_drvdata ( pci, netdev );
|
|
595
|
+ netdev->dev = &pci->dev;
|
|
596
|
+ memset ( vmxnet, 0, sizeof ( *vmxnet ) );
|
|
597
|
+
|
|
598
|
+ /* Fix up PCI device */
|
|
599
|
+ adjust_pci_device ( pci );
|
|
600
|
+
|
|
601
|
+ /* Map PCI BARs */
|
|
602
|
+ vmxnet->pt = ioremap ( pci_bar_start ( pci, VMXNET3_PT_BAR ),
|
|
603
|
+ VMXNET3_PT_LEN );
|
|
604
|
+ vmxnet->vd = ioremap ( pci_bar_start ( pci, VMXNET3_VD_BAR ),
|
|
605
|
+ VMXNET3_VD_LEN );
|
|
606
|
+
|
|
607
|
+ /* Version check */
|
|
608
|
+ if ( ( rc = vmxnet3_check_version ( vmxnet ) ) != 0 )
|
|
609
|
+ goto err_check_version;
|
|
610
|
+
|
|
611
|
+ /* Reset device */
|
|
612
|
+ if ( ( rc = vmxnet3_command ( vmxnet, VMXNET3_CMD_RESET_DEV ) ) != 0 )
|
|
613
|
+ goto err_reset;
|
|
614
|
+
|
|
615
|
+ /* Read initial MAC address */
|
|
616
|
+ vmxnet3_get_hw_addr ( vmxnet, &netdev->hw_addr );
|
|
617
|
+
|
|
618
|
+ /* Register network device */
|
|
619
|
+ if ( ( rc = register_netdev ( netdev ) ) != 0 ) {
|
|
620
|
+ DBGC ( vmxnet, "VMXNET3 %p could not register net device: "
|
|
621
|
+ "%s\n", vmxnet, strerror ( rc ) );
|
|
622
|
+ goto err_register_netdev;
|
|
623
|
+ }
|
|
624
|
+
|
|
625
|
+ /* Get initial link state */
|
|
626
|
+ vmxnet3_check_link ( netdev );
|
|
627
|
+
|
|
628
|
+ return 0;
|
|
629
|
+
|
|
630
|
+ unregister_netdev ( netdev );
|
|
631
|
+ err_register_netdev:
|
|
632
|
+ err_reset:
|
|
633
|
+ err_check_version:
|
|
634
|
+ iounmap ( vmxnet->vd );
|
|
635
|
+ iounmap ( vmxnet->pt );
|
|
636
|
+ netdev_nullify ( netdev );
|
|
637
|
+ netdev_put ( netdev );
|
|
638
|
+ err_alloc_etherdev:
|
|
639
|
+ return rc;
|
|
640
|
+}
|
|
641
|
+
|
|
642
|
+/**
|
|
643
|
+ * Remove PCI device
|
|
644
|
+ *
|
|
645
|
+ * @v pci PCI device
|
|
646
|
+ */
|
|
647
|
+static void vmxnet3_remove ( struct pci_device *pci ) {
|
|
648
|
+ struct net_device *netdev = pci_get_drvdata ( pci );
|
|
649
|
+ struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
|
|
650
|
+
|
|
651
|
+ unregister_netdev ( netdev );
|
|
652
|
+ iounmap ( vmxnet->vd );
|
|
653
|
+ iounmap ( vmxnet->pt );
|
|
654
|
+ netdev_nullify ( netdev );
|
|
655
|
+ netdev_put ( netdev );
|
|
656
|
+}
|
|
657
|
+
|
|
658
|
+/** vmxnet3 PCI IDs */
|
|
659
|
+static struct pci_device_id vmxnet3_nics[] = {
|
|
660
|
+ PCI_ROM ( 0x15ad, 0x07b0, "vmxnet3", "vmxnet3 virtual NIC", 0 ),
|
|
661
|
+};
|
|
662
|
+
|
|
663
|
+/** vmxnet3 PCI driver */
|
|
664
|
+struct pci_driver vmxnet3_driver __pci_driver = {
|
|
665
|
+ .ids = vmxnet3_nics,
|
|
666
|
+ .id_count = ( sizeof ( vmxnet3_nics ) / sizeof ( vmxnet3_nics[0] ) ),
|
|
667
|
+ .probe = vmxnet3_probe,
|
|
668
|
+ .remove = vmxnet3_remove,
|
|
669
|
+};
|