|
@@ -0,0 +1,942 @@
|
|
1
|
+
|
|
2
|
+FILE_LICENCE ( GPL2_ONLY );
|
|
3
|
+
|
|
4
|
+#include <mii.h>
|
|
5
|
+#include <stdio.h>
|
|
6
|
+#include <errno.h>
|
|
7
|
+#include <unistd.h>
|
|
8
|
+#include <byteswap.h>
|
|
9
|
+#include <ipxe/pci.h>
|
|
10
|
+#include <ipxe/iobuf.h>
|
|
11
|
+#include <ipxe/timer.h>
|
|
12
|
+#include <ipxe/malloc.h>
|
|
13
|
+#include <ipxe/if_ether.h>
|
|
14
|
+#include <ipxe/ethernet.h>
|
|
15
|
+#include <ipxe/netdevice.h>
|
|
16
|
+
|
|
17
|
+#include "tg3.h"
|
|
18
|
+
|
|
19
|
+#define TG3_DEF_RX_MODE 0
|
|
20
|
+#define TG3_DEF_TX_MODE 0
|
|
21
|
+
|
|
22
|
+static void tg3_refill_prod_ring(struct tg3 *tp);
|
|
23
|
+
|
|
24
|
+/* Do not place this n-ring entries value into the tp struct itself,
|
|
25
|
+ * we really want to expose these constants to GCC so that modulo et
|
|
26
|
+ * al. operations are done with shifts and masks instead of with
|
|
27
|
+ * hw multiply/modulo instructions. Another solution would be to
|
|
28
|
+ * replace things like '% foo' with '& (foo - 1)'.
|
|
29
|
+ */
|
|
30
|
+
|
|
31
|
+#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
|
|
32
|
+ TG3_TX_RING_SIZE)
|
|
33
|
+
|
|
34
|
+/* FIXME: does TG3_RX_RET_MAX_SIZE_5705 work for all cards? */
|
|
35
|
+#define TG3_RX_RCB_RING_BYTES(tp) \
|
|
36
|
+ (sizeof(struct tg3_rx_buffer_desc) * (TG3_RX_RET_MAX_SIZE_5705))
|
|
37
|
+
|
|
38
|
+#define TG3_RX_STD_RING_BYTES(tp) \
|
|
39
|
+ (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_MAX_SIZE_5700)
|
|
40
|
+
|
|
41
|
+void tg3_rx_prodring_fini(struct tg3_rx_prodring_set *tpr)
|
|
42
|
+{ DBGP("%s\n", __func__);
|
|
43
|
+
|
|
44
|
+ if (tpr->rx_std) {
|
|
45
|
+ free_dma(tpr->rx_std, TG3_RX_STD_RING_BYTES(tp));
|
|
46
|
+ tpr->rx_std = NULL;
|
|
47
|
+ }
|
|
48
|
+}
|
|
49
|
+
|
|
50
|
+/*
|
|
51
|
+ * Must not be invoked with interrupt sources disabled and
|
|
52
|
+ * the hardware shutdown down.
|
|
53
|
+ */
|
|
54
|
+static void tg3_free_consistent(struct tg3 *tp)
|
|
55
|
+{ DBGP("%s\n", __func__);
|
|
56
|
+
|
|
57
|
+ if (tp->tx_ring) {
|
|
58
|
+ free_dma(tp->tx_ring, TG3_TX_RING_BYTES);
|
|
59
|
+ tp->tx_ring = NULL;
|
|
60
|
+ }
|
|
61
|
+
|
|
62
|
+ free(tp->tx_buffers);
|
|
63
|
+ tp->tx_buffers = NULL;
|
|
64
|
+
|
|
65
|
+ if (tp->rx_rcb) {
|
|
66
|
+ free_dma(tp->rx_rcb, TG3_RX_RCB_RING_BYTES(tp));
|
|
67
|
+ tp->rx_rcb_mapping = 0;
|
|
68
|
+ tp->rx_rcb = NULL;
|
|
69
|
+ }
|
|
70
|
+
|
|
71
|
+ tg3_rx_prodring_fini(&tp->prodring);
|
|
72
|
+
|
|
73
|
+ if (tp->hw_status) {
|
|
74
|
+ free_dma(tp->hw_status, TG3_HW_STATUS_SIZE);
|
|
75
|
+ tp->status_mapping = 0;
|
|
76
|
+ tp->hw_status = NULL;
|
|
77
|
+ }
|
|
78
|
+}
|
|
79
|
+
|
|
80
|
+/*
|
|
81
|
+ * Must not be invoked with interrupt sources disabled and
|
|
82
|
+ * the hardware shutdown down. Can sleep.
|
|
83
|
+ */
|
|
84
|
+int tg3_alloc_consistent(struct tg3 *tp)
|
|
85
|
+{ DBGP("%s\n", __func__);
|
|
86
|
+
|
|
87
|
+ struct tg3_hw_status *sblk;
|
|
88
|
+ struct tg3_rx_prodring_set *tpr = &tp->prodring;
|
|
89
|
+
|
|
90
|
+ tp->hw_status = malloc_dma(TG3_HW_STATUS_SIZE, TG3_DMA_ALIGNMENT);
|
|
91
|
+ if (!tp->hw_status) {
|
|
92
|
+ DBGC(tp->dev, "hw_status alloc failed\n");
|
|
93
|
+ goto err_out;
|
|
94
|
+ }
|
|
95
|
+ tp->status_mapping = virt_to_bus(tp->hw_status);
|
|
96
|
+
|
|
97
|
+ memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
|
|
98
|
+ sblk = tp->hw_status;
|
|
99
|
+
|
|
100
|
+ tpr->rx_std = malloc_dma(TG3_RX_STD_RING_BYTES(tp), TG3_DMA_ALIGNMENT);
|
|
101
|
+ if (!tpr->rx_std) {
|
|
102
|
+ DBGC(tp->dev, "rx prodring alloc failed\n");
|
|
103
|
+ goto err_out;
|
|
104
|
+ }
|
|
105
|
+ tpr->rx_std_mapping = virt_to_bus(tpr->rx_std);
|
|
106
|
+ memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
|
|
107
|
+
|
|
108
|
+ tp->tx_buffers = zalloc(sizeof(struct ring_info) * TG3_TX_RING_SIZE);
|
|
109
|
+ if (!tp->tx_buffers)
|
|
110
|
+ goto err_out;
|
|
111
|
+
|
|
112
|
+ tp->tx_ring = malloc_dma(TG3_TX_RING_BYTES, TG3_DMA_ALIGNMENT);
|
|
113
|
+ if (!tp->tx_ring)
|
|
114
|
+ goto err_out;
|
|
115
|
+ tp->tx_desc_mapping = virt_to_bus(tp->tx_ring);
|
|
116
|
+
|
|
117
|
+ /*
|
|
118
|
+ * When RSS is enabled, the status block format changes
|
|
119
|
+ * slightly. The "rx_jumbo_consumer", "reserved",
|
|
120
|
+ * and "rx_mini_consumer" members get mapped to the
|
|
121
|
+ * other three rx return ring producer indexes.
|
|
122
|
+ */
|
|
123
|
+
|
|
124
|
+ tp->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
|
|
125
|
+
|
|
126
|
+ tp->rx_rcb = malloc_dma(TG3_RX_RCB_RING_BYTES(tp), TG3_DMA_ALIGNMENT);
|
|
127
|
+ if (!tp->rx_rcb)
|
|
128
|
+ goto err_out;
|
|
129
|
+ tp->rx_rcb_mapping = virt_to_bus(tp->rx_rcb);
|
|
130
|
+
|
|
131
|
+ memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
|
|
132
|
+
|
|
133
|
+ return 0;
|
|
134
|
+
|
|
135
|
+err_out:
|
|
136
|
+ tg3_free_consistent(tp);
|
|
137
|
+ return -ENOMEM;
|
|
138
|
+}
|
|
139
|
+
|
|
140
|
+#define TG3_RX_STD_BUFF_RING_BYTES(tp) \
|
|
141
|
+ (sizeof(struct ring_info) * TG3_RX_STD_MAX_SIZE_5700)
|
|
142
|
+#define TG3_RX_STD_RING_BYTES(tp) \
|
|
143
|
+ (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_MAX_SIZE_5700)
|
|
144
|
+
|
|
145
|
+/* Initialize rx rings for packet processing.
|
|
146
|
+ *
|
|
147
|
+ * The chip has been shut down and the driver detached from
|
|
148
|
+ * the networking, so no interrupts or new tx packets will
|
|
149
|
+ * end up in the driver.
|
|
150
|
+ */
|
|
151
|
+static int tg3_rx_prodring_alloc(struct tg3 __unused *tp,
|
|
152
|
+ struct tg3_rx_prodring_set *tpr)
|
|
153
|
+{ DBGP("%s\n", __func__);
|
|
154
|
+
|
|
155
|
+ u32 i;
|
|
156
|
+
|
|
157
|
+ tpr->rx_std_cons_idx = 0;
|
|
158
|
+ tpr->rx_std_prod_idx = 0;
|
|
159
|
+
|
|
160
|
+ /* Initialize invariants of the rings, we only set this
|
|
161
|
+ * stuff once. This works because the card does not
|
|
162
|
+ * write into the rx buffer posting rings.
|
|
163
|
+ */
|
|
164
|
+ /* FIXME: does TG3_RX_STD_MAX_SIZE_5700 work on all cards? */
|
|
165
|
+ for (i = 0; i < TG3_RX_STD_MAX_SIZE_5700; i++) {
|
|
166
|
+ struct tg3_rx_buffer_desc *rxd;
|
|
167
|
+
|
|
168
|
+ rxd = &tpr->rx_std[i];
|
|
169
|
+ rxd->idx_len = (TG3_RX_STD_DMA_SZ - 64 - 2) << RXD_LEN_SHIFT;
|
|
170
|
+ rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
|
|
171
|
+ rxd->opaque = (RXD_OPAQUE_RING_STD |
|
|
172
|
+ (i << RXD_OPAQUE_INDEX_SHIFT));
|
|
173
|
+ }
|
|
174
|
+
|
|
175
|
+ return 0;
|
|
176
|
+}
|
|
177
|
+
|
|
178
|
+static void tg3_rx_iob_free(struct io_buffer *iobs[], int i)
|
|
179
|
+{ DBGP("%s\n", __func__);
|
|
180
|
+
|
|
181
|
+ if (iobs[i] == NULL)
|
|
182
|
+ return;
|
|
183
|
+
|
|
184
|
+ free_iob(iobs[i]);
|
|
185
|
+ iobs[i] = NULL;
|
|
186
|
+}
|
|
187
|
+
|
|
188
|
+static void tg3_rx_prodring_free(struct tg3_rx_prodring_set *tpr)
|
|
189
|
+{ DBGP("%s\n", __func__);
|
|
190
|
+
|
|
191
|
+ unsigned int i;
|
|
192
|
+
|
|
193
|
+ for (i = 0; i < TG3_DEF_RX_RING_PENDING; i++)
|
|
194
|
+ tg3_rx_iob_free(tpr->rx_iobufs, i);
|
|
195
|
+}
|
|
196
|
+
|
|
197
|
+/* Initialize tx/rx rings for packet processing.
|
|
198
|
+ *
|
|
199
|
+ * The chip has been shut down and the driver detached from
|
|
200
|
+ * the networking, so no interrupts or new tx packets will
|
|
201
|
+ * end up in the driver.
|
|
202
|
+ */
|
|
203
|
+int tg3_init_rings(struct tg3 *tp)
|
|
204
|
+{ DBGP("%s\n", __func__);
|
|
205
|
+
|
|
206
|
+ /* Free up all the SKBs. */
|
|
207
|
+/// tg3_free_rings(tp);
|
|
208
|
+
|
|
209
|
+ tp->last_tag = 0;
|
|
210
|
+ tp->last_irq_tag = 0;
|
|
211
|
+ tp->hw_status->status = 0;
|
|
212
|
+ tp->hw_status->status_tag = 0;
|
|
213
|
+ memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
|
|
214
|
+
|
|
215
|
+ tp->tx_prod = 0;
|
|
216
|
+ tp->tx_cons = 0;
|
|
217
|
+ if (tp->tx_ring)
|
|
218
|
+ memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
|
|
219
|
+
|
|
220
|
+ tp->rx_rcb_ptr = 0;
|
|
221
|
+ if (tp->rx_rcb)
|
|
222
|
+ memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
|
|
223
|
+
|
|
224
|
+ if (tg3_rx_prodring_alloc(tp, &tp->prodring)) {
|
|
225
|
+ DBGC(tp->dev, "tg3_rx_prodring_alloc() failed\n");
|
|
226
|
+ tg3_rx_prodring_free(&tp->prodring);
|
|
227
|
+ return -ENOMEM;
|
|
228
|
+ }
|
|
229
|
+
|
|
230
|
+ return 0;
|
|
231
|
+}
|
|
232
|
+
|
|
233
|
+static int tg3_open(struct net_device *dev)
|
|
234
|
+{ DBGP("%s\n", __func__);
|
|
235
|
+
|
|
236
|
+ struct tg3 *tp = netdev_priv(dev);
|
|
237
|
+ struct tg3_rx_prodring_set *tpr = &tp->prodring;
|
|
238
|
+ int err = 0;
|
|
239
|
+
|
|
240
|
+ tg3_set_power_state_0(tp);
|
|
241
|
+
|
|
242
|
+ /* Initialize MAC address and backoff seed. */
|
|
243
|
+ __tg3_set_mac_addr(tp, 0);
|
|
244
|
+
|
|
245
|
+ err = tg3_alloc_consistent(tp);
|
|
246
|
+ if (err)
|
|
247
|
+ return err;
|
|
248
|
+
|
|
249
|
+ tpr->rx_std_iob_cnt = 0;
|
|
250
|
+ tg3_refill_prod_ring(tp);
|
|
251
|
+
|
|
252
|
+ err = tg3_init_hw(tp, 1);
|
|
253
|
+ if (err != 0)
|
|
254
|
+ DBGC(tp->dev, "tg3_init_hw failed: %s\n", strerror(err));
|
|
255
|
+
|
|
256
|
+ return err;
|
|
257
|
+}
|
|
258
|
+
|
|
259
|
+static inline u32 tg3_tx_avail(struct tg3 *tp)
|
|
260
|
+{ DBGP("%s\n", __func__);
|
|
261
|
+
|
|
262
|
+ /* Tell compiler to fetch tx indices from memory. */
|
|
263
|
+ barrier();
|
|
264
|
+ return TG3_DEF_TX_RING_PENDING -
|
|
265
|
+ ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1));
|
|
266
|
+}
|
|
267
|
+
|
|
268
|
+#if 0
|
|
269
|
+/**
|
|
270
|
+ *
|
|
271
|
+ * Prints all registers that could cause a set ERR bit in hw_status->status
|
|
272
|
+ */
|
|
273
|
+static void tg3_dump_err_reg(struct tg3 *tp)
|
|
274
|
+{ DBGP("%s\n", __func__);
|
|
275
|
+
|
|
276
|
+ printf("FLOW_ATTN: %#08x\n", tr32(HOSTCC_FLOW_ATTN));
|
|
277
|
+ printf("MAC ATTN: %#08x\n", tr32(MAC_STATUS));
|
|
278
|
+ printf("MSI STATUS: %#08x\n", tr32(MSGINT_STATUS));
|
|
279
|
+ printf("DMA RD: %#08x\n", tr32(RDMAC_STATUS));
|
|
280
|
+ printf("DMA WR: %#08x\n", tr32(WDMAC_STATUS));
|
|
281
|
+ printf("TX CPU STATE: %#08x\n", tr32(TX_CPU_STATE));
|
|
282
|
+ printf("RX CPU STATE: %#08x\n", tr32(RX_CPU_STATE));
|
|
283
|
+}
|
|
284
|
+
|
|
285
|
+static void __unused tw32_mailbox2(struct tg3 *tp, uint32_t reg, uint32_t val)
|
|
286
|
+{ DBGP("%s\n", __func__);
|
|
287
|
+
|
|
288
|
+ tw32_mailbox(reg, val);
|
|
289
|
+ tr32(reg);
|
|
290
|
+}
|
|
291
|
+#endif
|
|
292
|
+
|
|
293
|
+#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
|
|
294
|
+
|
|
295
|
+/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
|
|
296
|
+ * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
|
|
297
|
+ */
|
|
298
|
+static int tg3_transmit(struct net_device *dev, struct io_buffer *iob)
|
|
299
|
+{ DBGP("%s\n", __func__);
|
|
300
|
+
|
|
301
|
+ struct tg3 *tp = netdev_priv(dev);
|
|
302
|
+ u32 len, entry;
|
|
303
|
+ dma_addr_t mapping;
|
|
304
|
+ u32 bmsr;
|
|
305
|
+
|
|
306
|
+ if (tg3_tx_avail(tp) < 1) {
|
|
307
|
+ DBGC(dev, "Transmit ring full\n");
|
|
308
|
+ return -ENOBUFS;
|
|
309
|
+ }
|
|
310
|
+
|
|
311
|
+ entry = tp->tx_prod;
|
|
312
|
+
|
|
313
|
+ iob_pad(iob, ETH_ZLEN);
|
|
314
|
+ mapping = virt_to_bus(iob->data);
|
|
315
|
+ len = iob_len(iob);
|
|
316
|
+
|
|
317
|
+ tp->tx_buffers[entry].iob = iob;
|
|
318
|
+
|
|
319
|
+ tg3_set_txd(tp, entry, mapping, len, TXD_FLAG_END);
|
|
320
|
+
|
|
321
|
+ entry = NEXT_TX(entry);
|
|
322
|
+
|
|
323
|
+ /* Packets are ready, update Tx producer idx local and on card. */
|
|
324
|
+ tw32_tx_mbox(tp->prodmbox, entry);
|
|
325
|
+
|
|
326
|
+ writel(entry, tp->regs + MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
|
|
327
|
+
|
|
328
|
+ tp->tx_prod = entry;
|
|
329
|
+
|
|
330
|
+ mb();
|
|
331
|
+
|
|
332
|
+ tg3_readphy(tp, MII_BMSR, &bmsr);
|
|
333
|
+
|
|
334
|
+ return 0;
|
|
335
|
+}
|
|
336
|
+
|
|
337
|
+static void tg3_tx_complete(struct net_device *dev)
|
|
338
|
+{ DBGP("%s\n", __func__);
|
|
339
|
+
|
|
340
|
+ struct tg3 *tp = netdev_priv(dev);
|
|
341
|
+ u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
|
|
342
|
+ u32 sw_idx = tp->tx_cons;
|
|
343
|
+
|
|
344
|
+ while (sw_idx != hw_idx) {
|
|
345
|
+ struct io_buffer *iob = tp->tx_buffers[sw_idx].iob;
|
|
346
|
+
|
|
347
|
+ DBGC2(dev, "Transmitted packet: %zd bytes\n", iob_len(iob));
|
|
348
|
+
|
|
349
|
+ netdev_tx_complete(dev, iob);
|
|
350
|
+ sw_idx = NEXT_TX(sw_idx);
|
|
351
|
+ }
|
|
352
|
+
|
|
353
|
+ tp->tx_cons = sw_idx;
|
|
354
|
+}
|
|
355
|
+
|
|
356
|
+#define TG3_RX_STD_BUFF_RING_BYTES(tp) \
|
|
357
|
+ (sizeof(struct ring_info) * TG3_RX_STD_MAX_SIZE_5700)
|
|
358
|
+#define TG3_RX_STD_RING_BYTES(tp) \
|
|
359
|
+ (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_MAX_SIZE_5700)
|
|
360
|
+
|
|
361
|
+/* Returns 0 or < 0 on error.
|
|
362
|
+ *
|
|
363
|
+ * We only need to fill in the address because the other members
|
|
364
|
+ * of the RX descriptor are invariant, see tg3_init_rings.
|
|
365
|
+ *
|
|
366
|
+ * Note the purposeful assymetry of cpu vs. chip accesses. For
|
|
367
|
+ * posting buffers we only dirty the first cache line of the RX
|
|
368
|
+ * descriptor (containing the address). Whereas for the RX status
|
|
369
|
+ * buffers the cpu only reads the last cacheline of the RX descriptor
|
|
370
|
+ * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
|
|
371
|
+ */
|
|
372
|
+static int tg3_alloc_rx_iob(struct tg3_rx_prodring_set *tpr, u32 dest_idx_unmasked)
|
|
373
|
+{ DBGP("%s\n", __func__);
|
|
374
|
+
|
|
375
|
+ struct tg3_rx_buffer_desc *desc;
|
|
376
|
+ struct io_buffer *iob;
|
|
377
|
+ dma_addr_t mapping;
|
|
378
|
+ int dest_idx, iob_idx;
|
|
379
|
+
|
|
380
|
+ dest_idx = dest_idx_unmasked & (TG3_RX_STD_MAX_SIZE_5700 - 1);
|
|
381
|
+ desc = &tpr->rx_std[dest_idx];
|
|
382
|
+
|
|
383
|
+ /* Do not overwrite any of the map or rp information
|
|
384
|
+ * until we are sure we can commit to a new buffer.
|
|
385
|
+ *
|
|
386
|
+ * Callers depend upon this behavior and assume that
|
|
387
|
+ * we leave everything unchanged if we fail.
|
|
388
|
+ */
|
|
389
|
+ iob = alloc_iob(TG3_RX_STD_DMA_SZ);
|
|
390
|
+ if (iob == NULL)
|
|
391
|
+ return -ENOMEM;
|
|
392
|
+
|
|
393
|
+ iob_idx = dest_idx % TG3_DEF_RX_RING_PENDING;
|
|
394
|
+ tpr->rx_iobufs[iob_idx] = iob;
|
|
395
|
+
|
|
396
|
+ mapping = virt_to_bus(iob->data);
|
|
397
|
+
|
|
398
|
+ desc->addr_hi = ((u64)mapping >> 32);
|
|
399
|
+ desc->addr_lo = ((u64)mapping & 0xffffffff);
|
|
400
|
+
|
|
401
|
+ return 0;
|
|
402
|
+}
|
|
403
|
+
|
|
404
|
+static void tg3_refill_prod_ring(struct tg3 *tp)
|
|
405
|
+{ DBGP("%s\n", __func__);
|
|
406
|
+
|
|
407
|
+ struct tg3_rx_prodring_set *tpr = &tp->prodring;
|
|
408
|
+ int idx = tpr->rx_std_prod_idx;
|
|
409
|
+
|
|
410
|
+ DBGCP(tp->dev, "%s\n", __func__);
|
|
411
|
+
|
|
412
|
+ while (tpr->rx_std_iob_cnt < TG3_DEF_RX_RING_PENDING) {
|
|
413
|
+ if (tpr->rx_iobufs[idx % TG3_DEF_RX_RING_PENDING] == NULL) {
|
|
414
|
+ if (tg3_alloc_rx_iob(tpr, idx) < 0) {
|
|
415
|
+ DBGC(tp->dev, "alloc_iob() failed for descriptor %d\n", idx);
|
|
416
|
+ break;
|
|
417
|
+ }
|
|
418
|
+ DBGC2(tp->dev, "allocated iob_buffer for descriptor %d\n", idx);
|
|
419
|
+ }
|
|
420
|
+
|
|
421
|
+ idx = (idx + 1) % TG3_RX_STD_MAX_SIZE_5700;
|
|
422
|
+ tpr->rx_std_iob_cnt++;
|
|
423
|
+ }
|
|
424
|
+
|
|
425
|
+ tpr->rx_std_prod_idx = idx;
|
|
426
|
+ tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, idx);
|
|
427
|
+}
|
|
428
|
+
|
|
429
|
+static void tg3_rx_complete(struct net_device *dev)
|
|
430
|
+{ DBGP("%s\n", __func__);
|
|
431
|
+
|
|
432
|
+ struct tg3 *tp = netdev_priv(dev);
|
|
433
|
+
|
|
434
|
+ u32 sw_idx = tp->rx_rcb_ptr;
|
|
435
|
+ u16 hw_idx;
|
|
436
|
+ struct tg3_rx_prodring_set *tpr = &tp->prodring;
|
|
437
|
+
|
|
438
|
+ hw_idx = *(tp->rx_rcb_prod_idx);
|
|
439
|
+
|
|
440
|
+ while (sw_idx != hw_idx) {
|
|
441
|
+ struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
|
|
442
|
+ u32 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
|
|
443
|
+ int iob_idx = desc_idx % TG3_DEF_RX_RING_PENDING;
|
|
444
|
+ struct io_buffer *iob = tpr->rx_iobufs[iob_idx];
|
|
445
|
+ unsigned int len;
|
|
446
|
+
|
|
447
|
+ DBGC2(dev, "RX - desc_idx: %d sw_idx: %d hw_idx: %d\n", desc_idx, sw_idx, hw_idx);
|
|
448
|
+
|
|
449
|
+ assert(iob != NULL);
|
|
450
|
+
|
|
451
|
+ if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
|
|
452
|
+ (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
|
|
453
|
+ /* drop packet */
|
|
454
|
+ DBGC(dev, "Corrupted packet received\n");
|
|
455
|
+ netdev_rx_err(dev, iob, -EINVAL);
|
|
456
|
+ } else {
|
|
457
|
+ len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
|
|
458
|
+ ETH_FCS_LEN;
|
|
459
|
+ iob_put(iob, len);
|
|
460
|
+ netdev_rx(dev, iob);
|
|
461
|
+
|
|
462
|
+ DBGC2(dev, "Received packet: %d bytes %d %d\n", len, sw_idx, hw_idx);
|
|
463
|
+ }
|
|
464
|
+
|
|
465
|
+ sw_idx++;
|
|
466
|
+ sw_idx &= TG3_RX_RET_MAX_SIZE_5705 - 1;
|
|
467
|
+
|
|
468
|
+ tpr->rx_iobufs[iob_idx] = NULL;
|
|
469
|
+ tpr->rx_std_iob_cnt--;
|
|
470
|
+ }
|
|
471
|
+
|
|
472
|
+ tp->rx_rcb_ptr = sw_idx;
|
|
473
|
+
|
|
474
|
+ tg3_refill_prod_ring(tp);
|
|
475
|
+}
|
|
476
|
+
|
|
477
|
+static void tg3_poll(struct net_device *dev)
|
|
478
|
+{ DBGP("%s\n", __func__);
|
|
479
|
+
|
|
480
|
+ struct tg3 *tp = netdev_priv(dev);
|
|
481
|
+
|
|
482
|
+ /* ACK interrupts */
|
|
483
|
+ tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00);
|
|
484
|
+ tp->hw_status->status &= ~SD_STATUS_UPDATED;
|
|
485
|
+
|
|
486
|
+ tg3_poll_link(tp);
|
|
487
|
+ tg3_tx_complete(dev);
|
|
488
|
+ tg3_rx_complete(dev);
|
|
489
|
+}
|
|
490
|
+
|
|
491
|
+static void tg3_close(struct net_device *dev)
|
|
492
|
+{ DBGP("%s\n", __func__);
|
|
493
|
+
|
|
494
|
+ struct tg3 *tp = netdev_priv(dev);
|
|
495
|
+
|
|
496
|
+ DBGP("%s\n", __func__);
|
|
497
|
+
|
|
498
|
+ tg3_halt(tp);
|
|
499
|
+ tg3_rx_prodring_free(&tp->prodring);
|
|
500
|
+ tg3_flag_clear(tp, INIT_COMPLETE);
|
|
501
|
+
|
|
502
|
+ tg3_free_consistent(tp);
|
|
503
|
+
|
|
504
|
+}
|
|
505
|
+
|
|
506
|
+static void tg3_irq(struct net_device *dev, int enable)
|
|
507
|
+{ DBGP("%s\n", __func__);
|
|
508
|
+
|
|
509
|
+ struct tg3 *tp = netdev_priv(dev);
|
|
510
|
+
|
|
511
|
+ DBGP("%s: %d\n", __func__, enable);
|
|
512
|
+
|
|
513
|
+ if (enable)
|
|
514
|
+ tg3_enable_ints(tp);
|
|
515
|
+ else
|
|
516
|
+ tg3_disable_ints(tp);
|
|
517
|
+}
|
|
518
|
+
|
|
519
|
+static struct net_device_operations tg3_netdev_ops = {
|
|
520
|
+ .open = tg3_open,
|
|
521
|
+ .close = tg3_close,
|
|
522
|
+ .poll = tg3_poll,
|
|
523
|
+ .transmit = tg3_transmit,
|
|
524
|
+ .irq = tg3_irq,
|
|
525
|
+};
|
|
526
|
+
|
|
527
|
+#define TEST_BUFFER_SIZE 0x2000
|
|
528
|
+
|
|
529
|
+int tg3_do_test_dma(struct tg3 *tp, u32 __unused *buf, dma_addr_t buf_dma, int size, int to_device);
|
|
530
|
+void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val);
|
|
531
|
+
|
|
532
|
+static int tg3_test_dma(struct tg3 *tp)
|
|
533
|
+{ DBGP("%s\n", __func__);
|
|
534
|
+
|
|
535
|
+ dma_addr_t buf_dma;
|
|
536
|
+ u32 *buf, saved_dma_rwctrl;
|
|
537
|
+ int ret = 0;
|
|
538
|
+
|
|
539
|
+ buf = malloc_dma(TEST_BUFFER_SIZE, TG3_DMA_ALIGNMENT);
|
|
540
|
+ if (!buf) {
|
|
541
|
+ ret = -ENOMEM;
|
|
542
|
+ goto out_nofree;
|
|
543
|
+ }
|
|
544
|
+ buf_dma = virt_to_bus(buf);
|
|
545
|
+ DBGC2(tp->dev, "dma test buffer, virt: %p phys: %#08x\n", buf, buf_dma);
|
|
546
|
+
|
|
547
|
+ tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
|
|
548
|
+ (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
|
|
549
|
+
|
|
550
|
+ tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
|
|
551
|
+
|
|
552
|
+ if (tg3_flag(tp, 57765_PLUS))
|
|
553
|
+ goto out;
|
|
554
|
+
|
|
555
|
+ if (tg3_flag(tp, PCI_EXPRESS)) {
|
|
556
|
+ /* DMA read watermark not used on PCIE */
|
|
557
|
+ tp->dma_rwctrl |= 0x00180000;
|
|
558
|
+ } else if (!tg3_flag(tp, PCIX_MODE)) {
|
|
559
|
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
|
|
560
|
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
|
|
561
|
+ tp->dma_rwctrl |= 0x003f0000;
|
|
562
|
+ else
|
|
563
|
+ tp->dma_rwctrl |= 0x003f000f;
|
|
564
|
+ } else {
|
|
565
|
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
|
|
566
|
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
|
|
567
|
+ u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
|
|
568
|
+ u32 read_water = 0x7;
|
|
569
|
+
|
|
570
|
+ if (ccval == 0x6 || ccval == 0x7)
|
|
571
|
+ tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
|
|
572
|
+
|
|
573
|
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
|
|
574
|
+ read_water = 4;
|
|
575
|
+ /* Set bit 23 to enable PCIX hw bug fix */
|
|
576
|
+ tp->dma_rwctrl |=
|
|
577
|
+ (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
|
|
578
|
+ (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
|
|
579
|
+ (1 << 23);
|
|
580
|
+ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
|
|
581
|
+ /* 5780 always in PCIX mode */
|
|
582
|
+ tp->dma_rwctrl |= 0x00144000;
|
|
583
|
+ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
|
|
584
|
+ /* 5714 always in PCIX mode */
|
|
585
|
+ tp->dma_rwctrl |= 0x00148000;
|
|
586
|
+ } else {
|
|
587
|
+ tp->dma_rwctrl |= 0x001b000f;
|
|
588
|
+ }
|
|
589
|
+ }
|
|
590
|
+
|
|
591
|
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
|
|
592
|
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
|
|
593
|
+ tp->dma_rwctrl &= 0xfffffff0;
|
|
594
|
+
|
|
595
|
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
|
|
596
|
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
|
|
597
|
+ /* Remove this if it causes problems for some boards. */
|
|
598
|
+ tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
|
|
599
|
+
|
|
600
|
+ /* On 5700/5701 chips, we need to set this bit.
|
|
601
|
+ * Otherwise the chip will issue cacheline transactions
|
|
602
|
+ * to streamable DMA memory with not all the byte
|
|
603
|
+ * enables turned on. This is an error on several
|
|
604
|
+ * RISC PCI controllers, in particular sparc64.
|
|
605
|
+ *
|
|
606
|
+ * On 5703/5704 chips, this bit has been reassigned
|
|
607
|
+ * a different meaning. In particular, it is used
|
|
608
|
+ * on those chips to enable a PCI-X workaround.
|
|
609
|
+ */
|
|
610
|
+ tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
|
|
611
|
+ }
|
|
612
|
+
|
|
613
|
+ tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
|
|
614
|
+
|
|
615
|
+#if 0
|
|
616
|
+ /* Unneeded, already done by tg3_get_invariants. */
|
|
617
|
+ tg3_switch_clocks(tp);
|
|
618
|
+#endif
|
|
619
|
+
|
|
620
|
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
|
|
621
|
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
|
|
622
|
+ goto out;
|
|
623
|
+
|
|
624
|
+ /* It is best to perform DMA test with maximum write burst size
|
|
625
|
+ * to expose the 5700/5701 write DMA bug.
|
|
626
|
+ */
|
|
627
|
+ saved_dma_rwctrl = tp->dma_rwctrl;
|
|
628
|
+ tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
|
|
629
|
+ tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
|
|
630
|
+
|
|
631
|
+ while (1) {
|
|
632
|
+ u32 *p = buf, i;
|
|
633
|
+
|
|
634
|
+ for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
|
|
635
|
+ p[i] = i;
|
|
636
|
+
|
|
637
|
+ /* Send the buffer to the chip. */
|
|
638
|
+ ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
|
|
639
|
+ if (ret) {
|
|
640
|
+ DBGC(&tp->pdev->dev,
|
|
641
|
+ "%s: Buffer write failed. err = %d\n",
|
|
642
|
+ __func__, ret);
|
|
643
|
+ break;
|
|
644
|
+ }
|
|
645
|
+
|
|
646
|
+ /* validate data reached card RAM correctly. */
|
|
647
|
+ for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
|
|
648
|
+ u32 val;
|
|
649
|
+ tg3_read_mem(tp, 0x2100 + (i*4), &val);
|
|
650
|
+ if (le32_to_cpu(val) != p[i]) {
|
|
651
|
+ DBGC(&tp->pdev->dev,
|
|
652
|
+ "%s: Buffer corrupted on device! "
|
|
653
|
+ "(%d != %d)\n", __func__, val, i);
|
|
654
|
+ /* ret = -ENODEV here? */
|
|
655
|
+ }
|
|
656
|
+ p[i] = 0;
|
|
657
|
+ }
|
|
658
|
+
|
|
659
|
+ /* Now read it back. */
|
|
660
|
+ ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
|
|
661
|
+ if (ret) {
|
|
662
|
+ DBGC(&tp->pdev->dev, "%s: Buffer read failed. "
|
|
663
|
+ "err = %d\n", __func__, ret);
|
|
664
|
+ break;
|
|
665
|
+ }
|
|
666
|
+
|
|
667
|
+ /* Verify it. */
|
|
668
|
+ for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
|
|
669
|
+ if (p[i] == i)
|
|
670
|
+ continue;
|
|
671
|
+
|
|
672
|
+ if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
|
|
673
|
+ DMA_RWCTRL_WRITE_BNDRY_16) {
|
|
674
|
+ tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
|
|
675
|
+ tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
|
|
676
|
+ tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
|
|
677
|
+ break;
|
|
678
|
+ } else {
|
|
679
|
+ DBGC(&tp->pdev->dev,
|
|
680
|
+ "%s: Buffer corrupted on read back! "
|
|
681
|
+ "(%d != %d)\n", __func__, p[i], i);
|
|
682
|
+ ret = -ENODEV;
|
|
683
|
+ goto out;
|
|
684
|
+ }
|
|
685
|
+ }
|
|
686
|
+
|
|
687
|
+ if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
|
|
688
|
+ /* Success. */
|
|
689
|
+ ret = 0;
|
|
690
|
+ break;
|
|
691
|
+ }
|
|
692
|
+ }
|
|
693
|
+
|
|
694
|
+ if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
|
|
695
|
+ DMA_RWCTRL_WRITE_BNDRY_16) {
|
|
696
|
+ /* DMA test passed without adjusting DMA boundary,
|
|
697
|
+ * now look for chipsets that are known to expose the
|
|
698
|
+ * DMA bug without failing the test.
|
|
699
|
+ */
|
|
700
|
+ tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
|
|
701
|
+ tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
|
|
702
|
+
|
|
703
|
+ tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
|
|
704
|
+ }
|
|
705
|
+
|
|
706
|
+out:
|
|
707
|
+ free_dma(buf, TEST_BUFFER_SIZE);
|
|
708
|
+out_nofree:
|
|
709
|
+ return ret;
|
|
710
|
+}
|
|
711
|
+
|
|
712
|
+static int tg3_init_one(struct pci_device *pdev)
|
|
713
|
+{ DBGP("%s\n", __func__);
|
|
714
|
+
|
|
715
|
+ struct net_device *dev;
|
|
716
|
+ struct tg3 *tp;
|
|
717
|
+ int err = 0;
|
|
718
|
+ unsigned long reg_base, reg_size;
|
|
719
|
+
|
|
720
|
+ adjust_pci_device(pdev);
|
|
721
|
+
|
|
722
|
+ dev = alloc_etherdev(sizeof(*tp));
|
|
723
|
+ if (!dev) {
|
|
724
|
+ DBGC(&pdev->dev, "Failed to allocate etherdev\n");
|
|
725
|
+ err = -ENOMEM;
|
|
726
|
+ goto err_out_disable_pdev;
|
|
727
|
+ }
|
|
728
|
+
|
|
729
|
+ netdev_init(dev, &tg3_netdev_ops);
|
|
730
|
+ pci_set_drvdata(pdev, dev);
|
|
731
|
+
|
|
732
|
+ dev->dev = &pdev->dev;
|
|
733
|
+
|
|
734
|
+ tp = netdev_priv(dev);
|
|
735
|
+ tp->pdev = pdev;
|
|
736
|
+ tp->dev = dev;
|
|
737
|
+ tp->rx_mode = TG3_DEF_RX_MODE;
|
|
738
|
+ tp->tx_mode = TG3_DEF_TX_MODE;
|
|
739
|
+
|
|
740
|
+ /* Subsystem IDs are required later */
|
|
741
|
+ pci_read_config_word(tp->pdev, PCI_SUBSYSTEM_VENDOR_ID, &tp->subsystem_vendor);
|
|
742
|
+ pci_read_config_word(tp->pdev, PCI_SUBSYSTEM_ID, &tp->subsystem_device);
|
|
743
|
+
|
|
744
|
+ /* The word/byte swap controls here control register access byte
|
|
745
|
+ * swapping. DMA data byte swapping is controlled in the GRC_MODE
|
|
746
|
+ * setting below.
|
|
747
|
+ */
|
|
748
|
+ tp->misc_host_ctrl =
|
|
749
|
+ MISC_HOST_CTRL_MASK_PCI_INT |
|
|
750
|
+ MISC_HOST_CTRL_WORD_SWAP |
|
|
751
|
+ MISC_HOST_CTRL_INDIR_ACCESS |
|
|
752
|
+ MISC_HOST_CTRL_PCISTATE_RW;
|
|
753
|
+
|
|
754
|
+ /* The NONFRM (non-frame) byte/word swap controls take effect
|
|
755
|
+ * on descriptor entries, anything which isn't packet data.
|
|
756
|
+ *
|
|
757
|
+ * The StrongARM chips on the board (one for tx, one for rx)
|
|
758
|
+ * are running in big-endian mode.
|
|
759
|
+ */
|
|
760
|
+ tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
|
|
761
|
+ GRC_MODE_WSWAP_NONFRM_DATA);
|
|
762
|
+#if __BYTE_ORDER == __BIG_ENDIAN
|
|
763
|
+ tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
|
|
764
|
+#endif
|
|
765
|
+
|
|
766
|
+ /* FIXME: how can we detect errors here? */
|
|
767
|
+ reg_base = pci_bar_start(pdev, PCI_BASE_ADDRESS_0);
|
|
768
|
+ reg_size = pci_bar_size(pdev, PCI_BASE_ADDRESS_0);
|
|
769
|
+
|
|
770
|
+ tp->regs = ioremap(reg_base, reg_size);
|
|
771
|
+ if (!tp->regs) {
|
|
772
|
+ DBGC(&pdev->dev, "Failed to remap device registers\n");
|
|
773
|
+ errno = -ENOENT;
|
|
774
|
+ goto err_out_disable_pdev;
|
|
775
|
+ }
|
|
776
|
+
|
|
777
|
+ err = tg3_get_invariants(tp);
|
|
778
|
+ if (err) {
|
|
779
|
+ DBGC(&pdev->dev, "Problem fetching invariants of chip, aborting\n");
|
|
780
|
+ goto err_out_iounmap;
|
|
781
|
+ }
|
|
782
|
+
|
|
783
|
+ tg3_init_bufmgr_config(tp);
|
|
784
|
+
|
|
785
|
+ err = tg3_get_device_address(tp);
|
|
786
|
+ if (err) {
|
|
787
|
+ DBGC(&pdev->dev, "Could not obtain valid ethernet address, aborting\n");
|
|
788
|
+ goto err_out_iounmap;
|
|
789
|
+ }
|
|
790
|
+
|
|
791
|
+ /*
|
|
792
|
+ * Reset chip in case UNDI or EFI driver did not shutdown
|
|
793
|
+ * DMA self test will enable WDMAC and we'll see (spurious)
|
|
794
|
+ * pending DMA on the PCI bus at that point.
|
|
795
|
+ */
|
|
796
|
+ if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
|
|
797
|
+ (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
|
|
798
|
+ tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
|
|
799
|
+ tg3_halt(tp);
|
|
800
|
+ }
|
|
801
|
+
|
|
802
|
+ err = tg3_test_dma(tp);
|
|
803
|
+ if (err) {
|
|
804
|
+ DBGC(&pdev->dev, "DMA engine test failed, aborting\n");
|
|
805
|
+ goto err_out_iounmap;
|
|
806
|
+ }
|
|
807
|
+
|
|
808
|
+ tp->int_mbox = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
|
|
809
|
+ tp->consmbox = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
|
|
810
|
+ tp->prodmbox = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
|
|
811
|
+
|
|
812
|
+ tp->coal_now = HOSTCC_MODE_NOW;
|
|
813
|
+
|
|
814
|
+ err = register_netdev(dev);
|
|
815
|
+ if (err) {
|
|
816
|
+ DBGC(&pdev->dev, "Cannot register net device, aborting\n");
|
|
817
|
+ goto err_out_iounmap;
|
|
818
|
+ }
|
|
819
|
+
|
|
820
|
+ /* Call tg3_setup_phy() to start autoneg process, which saves time
|
|
821
|
+ * over starting autoneg in tg3_open();
|
|
822
|
+ */
|
|
823
|
+ err = tg3_setup_phy(tp, 0);
|
|
824
|
+ if (err) {
|
|
825
|
+ DBGC(tp->dev, "tg3_setup_phy() call failed in %s\n", __func__);
|
|
826
|
+ goto err_out_iounmap;
|
|
827
|
+ }
|
|
828
|
+
|
|
829
|
+ return 0;
|
|
830
|
+
|
|
831
|
+err_out_iounmap:
|
|
832
|
+ if (tp->regs) {
|
|
833
|
+ iounmap(tp->regs);
|
|
834
|
+ tp->regs = NULL;
|
|
835
|
+ }
|
|
836
|
+
|
|
837
|
+ netdev_put(dev);
|
|
838
|
+
|
|
839
|
+err_out_disable_pdev:
|
|
840
|
+ pci_set_drvdata(pdev, NULL);
|
|
841
|
+ return err;
|
|
842
|
+}
|
|
843
|
+
|
|
844
|
+static void tg3_remove_one(struct pci_device *pci)
|
|
845
|
+{ DBGP("%s\n", __func__);
|
|
846
|
+
|
|
847
|
+ struct net_device *netdev = pci_get_drvdata(pci);
|
|
848
|
+
|
|
849
|
+ unregister_netdev(netdev);
|
|
850
|
+ netdev_nullify(netdev);
|
|
851
|
+ netdev_put(netdev);
|
|
852
|
+}
|
|
853
|
+
|
|
854
|
+static struct pci_device_id tg3_nics[] = {
|
|
855
|
+ PCI_ROM(0x14e4, 0x1644, "14e4-1644", "14e4-1644", 0),
|
|
856
|
+ PCI_ROM(0x14e4, 0x1645, "14e4-1645", "14e4-1645", 0),
|
|
857
|
+ PCI_ROM(0x14e4, 0x1646, "14e4-1646", "14e4-1646", 0),
|
|
858
|
+ PCI_ROM(0x14e4, 0x1647, "14e4-1647", "14e4-1647", 0),
|
|
859
|
+ PCI_ROM(0x14e4, 0x1648, "14e4-1648", "14e4-1648", 0),
|
|
860
|
+ PCI_ROM(0x14e4, 0x164d, "14e4-164d", "14e4-164d", 0),
|
|
861
|
+ PCI_ROM(0x14e4, 0x1653, "14e4-1653", "14e4-1653", 0),
|
|
862
|
+ PCI_ROM(0x14e4, 0x1654, "14e4-1654", "14e4-1654", 0),
|
|
863
|
+ PCI_ROM(0x14e4, 0x165d, "14e4-165d", "14e4-165d", 0),
|
|
864
|
+ PCI_ROM(0x14e4, 0x165e, "14e4-165e", "14e4-165e", 0),
|
|
865
|
+ PCI_ROM(0x14e4, 0x16a6, "14e4-16a6", "14e4-16a6", 0),
|
|
866
|
+ PCI_ROM(0x14e4, 0x16a7, "14e4-16a7", "14e4-16a7", 0),
|
|
867
|
+ PCI_ROM(0x14e4, 0x16a8, "14e4-16a8", "14e4-16a8", 0),
|
|
868
|
+ PCI_ROM(0x14e4, 0x16c6, "14e4-16c6", "14e4-16c6", 0),
|
|
869
|
+ PCI_ROM(0x14e4, 0x16c7, "14e4-16c7", "14e4-16c7", 0),
|
|
870
|
+ PCI_ROM(0x14e4, 0x1696, "14e4-1696", "14e4-1696", 0),
|
|
871
|
+ PCI_ROM(0x14e4, 0x169c, "14e4-169c", "14e4-169c", 0),
|
|
872
|
+ PCI_ROM(0x14e4, 0x169d, "14e4-169d", "14e4-169d", 0),
|
|
873
|
+ PCI_ROM(0x14e4, 0x170d, "14e4-170d", "14e4-170d", 0),
|
|
874
|
+ PCI_ROM(0x14e4, 0x170e, "14e4-170e", "14e4-170e", 0),
|
|
875
|
+ PCI_ROM(0x14e4, 0x1649, "14e4-1649", "14e4-1649", 0),
|
|
876
|
+ PCI_ROM(0x14e4, 0x166e, "14e4-166e", "14e4-166e", 0),
|
|
877
|
+ PCI_ROM(0x14e4, 0x1659, "14e4-1659", "14e4-1659", 0),
|
|
878
|
+ PCI_ROM(0x14e4, 0x165a, "14e4-165a", "14e4-165a", 0),
|
|
879
|
+ PCI_ROM(0x14e4, 0x1677, "14e4-1677", "14e4-1677", 0),
|
|
880
|
+ PCI_ROM(0x14e4, 0x167d, "14e4-167d", "14e4-167d", 0),
|
|
881
|
+ PCI_ROM(0x14e4, 0x167e, "14e4-167e", "14e4-167e", 0),
|
|
882
|
+ PCI_ROM(0x14e4, 0x1600, "14e4-1600", "14e4-1600", 0),
|
|
883
|
+ PCI_ROM(0x14e4, 0x1601, "14e4-1601", "14e4-1601", 0),
|
|
884
|
+ PCI_ROM(0x14e4, 0x16f7, "14e4-16f7", "14e4-16f7", 0),
|
|
885
|
+ PCI_ROM(0x14e4, 0x16fd, "14e4-16fd", "14e4-16fd", 0),
|
|
886
|
+ PCI_ROM(0x14e4, 0x16fe, "14e4-16fe", "14e4-16fe", 0),
|
|
887
|
+ PCI_ROM(0x14e4, 0x167a, "14e4-167a", "14e4-167a", 0),
|
|
888
|
+ PCI_ROM(0x14e4, 0x1672, "14e4-1672", "14e4-1672", 0),
|
|
889
|
+ PCI_ROM(0x14e4, 0x167b, "14e4-167b", "14e4-167b", 0),
|
|
890
|
+ PCI_ROM(0x14e4, 0x1673, "14e4-1673", "14e4-1673", 0),
|
|
891
|
+ PCI_ROM(0x14e4, 0x1674, "14e4-1674", "14e4-1674", 0),
|
|
892
|
+ PCI_ROM(0x14e4, 0x169a, "14e4-169a", "14e4-169a", 0),
|
|
893
|
+ PCI_ROM(0x14e4, 0x169b, "14e4-169b", "14e4-169b", 0),
|
|
894
|
+ PCI_ROM(0x14e4, 0x1693, "14e4-1693", "14e4-1693", 0),
|
|
895
|
+ PCI_ROM(0x14e4, 0x167f, "14e4-167f", "14e4-167f", 0),
|
|
896
|
+ PCI_ROM(0x14e4, 0x1668, "14e4-1668", "14e4-1668", 0),
|
|
897
|
+ PCI_ROM(0x14e4, 0x1669, "14e4-1669", "14e4-1669", 0),
|
|
898
|
+ PCI_ROM(0x14e4, 0x1678, "14e4-1678", "14e4-1678", 0),
|
|
899
|
+ PCI_ROM(0x14e4, 0x1679, "14e4-1679", "14e4-1679", 0),
|
|
900
|
+ PCI_ROM(0x14e4, 0x166a, "14e4-166a", "14e4-166a", 0),
|
|
901
|
+ PCI_ROM(0x14e4, 0x166b, "14e4-166b", "14e4-166b", 0),
|
|
902
|
+ PCI_ROM(0x14e4, 0x16dd, "14e4-16dd", "14e4-16dd", 0),
|
|
903
|
+ PCI_ROM(0x14e4, 0x1712, "14e4-1712", "14e4-1712", 0),
|
|
904
|
+ PCI_ROM(0x14e4, 0x1713, "14e4-1713", "14e4-1713", 0),
|
|
905
|
+ PCI_ROM(0x14e4, 0x1698, "14e4-1698", "14e4-1698", 0),
|
|
906
|
+ PCI_ROM(0x14e4, 0x1684, "14e4-1684", "14e4-1684", 0),
|
|
907
|
+ PCI_ROM(0x14e4, 0x165b, "14e4-165b", "14e4-165b", 0),
|
|
908
|
+ PCI_ROM(0x14e4, 0x1681, "14e4-1681", "14e4-1681", 0),
|
|
909
|
+ PCI_ROM(0x14e4, 0x1680, "14e4-1680", "14e4-1680", 0),
|
|
910
|
+ PCI_ROM(0x14e4, 0x1688, "14e4-1688", "14e4-1688", 0),
|
|
911
|
+ PCI_ROM(0x14e4, 0x1689, "14e4-1689", "14e4-1689", 0),
|
|
912
|
+ PCI_ROM(0x14e4, 0x1699, "14e4-1699", "14e4-1699", 0),
|
|
913
|
+ PCI_ROM(0x14e4, 0x16a0, "14e4-16a0", "14e4-16a0", 0),
|
|
914
|
+ PCI_ROM(0x14e4, 0x1692, "14e4-1692", "14e4-1692", 0),
|
|
915
|
+ PCI_ROM(0x14e4, 0x1690, "14e4-1690", "14e4-1690", 0),
|
|
916
|
+ PCI_ROM(0x14e4, 0x1694, "14e4-1694", "14e4-1694", 0),
|
|
917
|
+ PCI_ROM(0x14e4, 0x1691, "14e4-1691", "14e4-1691", 0),
|
|
918
|
+ PCI_ROM(0x14e4, 0x1655, "14e4-1655", "14e4-1655", 0),
|
|
919
|
+ PCI_ROM(0x14e4, 0x1656, "14e4-1656", "14e4-1656", 0),
|
|
920
|
+ PCI_ROM(0x14e4, 0x16b1, "14e4-16b1", "14e4-16b1", 0),
|
|
921
|
+ PCI_ROM(0x14e4, 0x16b5, "14e4-16b5", "14e4-16b5", 0),
|
|
922
|
+ PCI_ROM(0x14e4, 0x16b0, "14e4-16b0", "14e4-16b0", 0),
|
|
923
|
+ PCI_ROM(0x14e4, 0x16b4, "14e4-16b4", "14e4-16b4", 0),
|
|
924
|
+ PCI_ROM(0x14e4, 0x16b2, "14e4-16b2", "14e4-16b2", 0),
|
|
925
|
+ PCI_ROM(0x14e4, 0x16b6, "14e4-16b6", "14e4-16b6", 0),
|
|
926
|
+ PCI_ROM(0x14e4, 0x1657, "14e4-1657", "14e4-1657", 0),
|
|
927
|
+ PCI_ROM(0x14e4, 0x165f, "14e4-165f", "14e4-165f", 0),
|
|
928
|
+ PCI_ROM(0x1148, 0x4400, "1148-4400", "1148-4400", 0),
|
|
929
|
+ PCI_ROM(0x1148, 0x4500, "1148-4500", "1148-4500", 0),
|
|
930
|
+ PCI_ROM(0x173b, 0x03e8, "173b-03e8", "173b-03e8", 0),
|
|
931
|
+ PCI_ROM(0x173b, 0x03e9, "173b-03e9", "173b-03e9", 0),
|
|
932
|
+ PCI_ROM(0x173b, 0x03eb, "173b-03eb", "173b-03eb", 0),
|
|
933
|
+ PCI_ROM(0x173b, 0x03ea, "173b-03ea", "173b-03ea", 0),
|
|
934
|
+ PCI_ROM(0x106b, 0x1645, "106b-1645", "106b-1645", 0),
|
|
935
|
+};
|
|
936
|
+
|
|
937
|
+struct pci_driver tg3_pci_driver __pci_driver = {
|
|
938
|
+ .ids = tg3_nics,
|
|
939
|
+ .id_count = ARRAY_SIZE(tg3_nics),
|
|
940
|
+ .probe = tg3_init_one,
|
|
941
|
+ .remove = tg3_remove_one,
|
|
942
|
+};
|