Browse Source

[vxge] Add support for X3100 series 10GbE Server/Storage Adapter

Signed-off-by: Sivakumar Subramani <sivakumar.subramani@neterion.com>
Signed-off-by: Masroor Vettuparambil <masroor.vettuparambil@neterion.com>
Signed-off-by: Stefan Hajnoczi <stefanha@gmail.com>
Signed-off-by: Marty Connor <mdc@etherboot.org>
tags/v1.20.1
Masroor Vettuparambil 15 years ago
parent
commit
f5f8ee00fc

+ 1
- 0
src/Makefile View File

@@ -62,6 +62,7 @@ SRCDIRS		+= drivers/net/e1000
62 62
 SRCDIRS		+= drivers/net/phantom
63 63
 SRCDIRS		+= drivers/net/rtl818x
64 64
 SRCDIRS		+= drivers/net/ath5k
65
+SRCDIRS		+= drivers/net/vxge
65 66
 SRCDIRS		+= drivers/block
66 67
 SRCDIRS		+= drivers/nvs
67 68
 SRCDIRS		+= drivers/bitbash

+ 1834
- 0
src/drivers/net/vxge/vxge_config.c
File diff suppressed because it is too large
View File


+ 787
- 0
src/drivers/net/vxge/vxge_config.h View File

@@ -0,0 +1,787 @@
1
+/*
2
+ * vxge-config.h: gPXE driver for Neterion Inc's X3100 Series 10GbE
3
+ *              PCIe I/O Virtualized Server Adapter.
4
+ *
5
+ * Copyright(c) 2002-2010 Neterion Inc.
6
+ *
7
+ * This software may be used and distributed according to the terms of
8
+ * the GNU General Public License (GPL), incorporated herein by
9
+ * reference.  Drivers based on or derived from this code fall under
10
+ * the GPL and must retain the authorship, copyright and license
11
+ * notice.
12
+ *
13
+ */
14
+
15
+FILE_LICENCE(GPL2_ONLY);
16
+
17
+#ifndef VXGE_CONFIG_H
18
+#define VXGE_CONFIG_H
19
+
20
+#include <stdint.h>
21
+#include <gpxe/list.h>
22
+#include <gpxe/pci.h>
23
+
24
+#ifndef VXGE_CACHE_LINE_SIZE
25
+#define VXGE_CACHE_LINE_SIZE 4096
26
+#endif
27
+
28
+#define WAIT_FACTOR          1
29
+
30
+#ifndef ARRAY_SIZE
31
+#define ARRAY_SIZE(a)  (sizeof(a) / sizeof((a)[0]))
32
+#endif
33
+
34
+#define VXGE_HW_MAC_MAX_WIRE_PORTS      2
35
+#define VXGE_HW_MAC_MAX_AGGR_PORTS      2
36
+#define VXGE_HW_MAC_MAX_PORTS           3
37
+
38
+#define VXGE_HW_MIN_MTU				68
39
+#define VXGE_HW_MAX_MTU				9600
40
+#define VXGE_HW_DEFAULT_MTU			1500
41
+
42
+#ifndef __iomem
43
+#define __iomem
44
+#endif
45
+
46
+#ifndef ____cacheline_aligned
47
+#define ____cacheline_aligned
48
+#endif
49
+
50
+/**
51
+ * debug filtering masks
52
+ */
53
+#define	VXGE_NONE	0x00
54
+#define	VXGE_INFO	0x01
55
+#define	VXGE_INTR	0x02
56
+#define	VXGE_XMIT	0x04
57
+#define VXGE_POLL	0x08
58
+#define	VXGE_ERR	0x10
59
+#define VXGE_TRACE	0x20
60
+#define VXGE_ALL	(VXGE_INFO|VXGE_INTR|VXGE_XMIT\
61
+			|VXGE_POLL|VXGE_ERR|VXGE_TRACE)
62
+
63
+#define NULL_VPID					0xFFFFFFFF
64
+
65
+#define VXGE_HW_EVENT_BASE                      0
66
+#define VXGE_LL_EVENT_BASE                      100
67
+
68
+#define VXGE_HW_BASE_INF	100
69
+#define VXGE_HW_BASE_ERR	200
70
+#define VXGE_HW_BASE_BADCFG	300
71
+#define VXGE_HW_DEF_DEVICE_POLL_MILLIS            1000
72
+#define VXGE_HW_MAX_PAYLOAD_SIZE_512            2
73
+
74
+enum vxge_hw_status {
75
+	VXGE_HW_OK				  = 0,
76
+	VXGE_HW_FAIL				  = 1,
77
+	VXGE_HW_PENDING				  = 2,
78
+	VXGE_HW_COMPLETIONS_REMAIN		  = 3,
79
+
80
+	VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS = VXGE_HW_BASE_INF + 1,
81
+	VXGE_HW_INF_OUT_OF_DESCRIPTORS		  = VXGE_HW_BASE_INF + 2,
82
+	VXGE_HW_INF_SW_LRO_BEGIN		  = VXGE_HW_BASE_INF + 3,
83
+	VXGE_HW_INF_SW_LRO_CONT			  = VXGE_HW_BASE_INF + 4,
84
+	VXGE_HW_INF_SW_LRO_UNCAPABLE		  = VXGE_HW_BASE_INF + 5,
85
+	VXGE_HW_INF_SW_LRO_FLUSH_SESSION	  = VXGE_HW_BASE_INF + 6,
86
+	VXGE_HW_INF_SW_LRO_FLUSH_BOTH		  = VXGE_HW_BASE_INF + 7,
87
+
88
+	VXGE_HW_ERR_INVALID_HANDLE		  = VXGE_HW_BASE_ERR + 1,
89
+	VXGE_HW_ERR_OUT_OF_MEMORY		  = VXGE_HW_BASE_ERR + 2,
90
+	VXGE_HW_ERR_VPATH_NOT_AVAILABLE	  	  = VXGE_HW_BASE_ERR + 3,
91
+	VXGE_HW_ERR_VPATH_NOT_OPEN		  = VXGE_HW_BASE_ERR + 4,
92
+	VXGE_HW_ERR_WRONG_IRQ			  = VXGE_HW_BASE_ERR + 5,
93
+	VXGE_HW_ERR_SWAPPER_CTRL		  = VXGE_HW_BASE_ERR + 6,
94
+	VXGE_HW_ERR_INVALID_MTU_SIZE		  = VXGE_HW_BASE_ERR + 7,
95
+	VXGE_HW_ERR_INVALID_INDEX		  = VXGE_HW_BASE_ERR + 8,
96
+	VXGE_HW_ERR_INVALID_TYPE		  = VXGE_HW_BASE_ERR + 9,
97
+	VXGE_HW_ERR_INVALID_OFFSET		  = VXGE_HW_BASE_ERR + 10,
98
+	VXGE_HW_ERR_INVALID_DEVICE		  = VXGE_HW_BASE_ERR + 11,
99
+	VXGE_HW_ERR_VERSION_CONFLICT		  = VXGE_HW_BASE_ERR + 12,
100
+	VXGE_HW_ERR_INVALID_PCI_INFO		  = VXGE_HW_BASE_ERR + 13,
101
+	VXGE_HW_ERR_INVALID_TCODE 		  = VXGE_HW_BASE_ERR + 14,
102
+	VXGE_HW_ERR_INVALID_BLOCK_SIZE		  = VXGE_HW_BASE_ERR + 15,
103
+	VXGE_HW_ERR_INVALID_STATE		  = VXGE_HW_BASE_ERR + 16,
104
+	VXGE_HW_ERR_PRIVILAGED_OPEARATION	  = VXGE_HW_BASE_ERR + 17,
105
+	VXGE_HW_ERR_INVALID_PORT 		  = VXGE_HW_BASE_ERR + 18,
106
+	VXGE_HW_ERR_FIFO		 	  = VXGE_HW_BASE_ERR + 19,
107
+	VXGE_HW_ERR_VPATH			  = VXGE_HW_BASE_ERR + 20,
108
+	VXGE_HW_ERR_CRITICAL			  = VXGE_HW_BASE_ERR + 21,
109
+	VXGE_HW_ERR_SLOT_FREEZE 		  = VXGE_HW_BASE_ERR + 22,
110
+	VXGE_HW_ERR_INVALID_MIN_BANDWIDTH	  = VXGE_HW_BASE_ERR + 25,
111
+	VXGE_HW_ERR_INVALID_MAX_BANDWIDTH	  = VXGE_HW_BASE_ERR + 26,
112
+	VXGE_HW_ERR_INVALID_TOTAL_BANDWIDTH	  = VXGE_HW_BASE_ERR + 27,
113
+	VXGE_HW_ERR_INVALID_BANDWIDTH_LIMIT	  = VXGE_HW_BASE_ERR + 28,
114
+	VXGE_HW_ERR_RESET_IN_PROGRESS		  = VXGE_HW_BASE_ERR + 29,
115
+	VXGE_HW_ERR_OUT_OF_SPACE		  = VXGE_HW_BASE_ERR + 30,
116
+	VXGE_HW_ERR_INVALID_FUNC_MODE		  = VXGE_HW_BASE_ERR + 31,
117
+	VXGE_HW_ERR_INVALID_DP_MODE		  = VXGE_HW_BASE_ERR + 32,
118
+	VXGE_HW_ERR_INVALID_FAILURE_BEHAVIOUR	  = VXGE_HW_BASE_ERR + 33,
119
+	VXGE_HW_ERR_INVALID_L2_SWITCH_STATE	  = VXGE_HW_BASE_ERR + 34,
120
+	VXGE_HW_ERR_INVALID_CATCH_BASIN_MODE	  = VXGE_HW_BASE_ERR + 35,
121
+
122
+	VXGE_HW_BADCFG_RING_INDICATE_MAX_PKTS	  = VXGE_HW_BASE_BADCFG + 1,
123
+	VXGE_HW_BADCFG_FIFO_BLOCKS		  = VXGE_HW_BASE_BADCFG + 2,
124
+	VXGE_HW_BADCFG_VPATH_MTU		  = VXGE_HW_BASE_BADCFG + 3,
125
+	VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG	  = VXGE_HW_BASE_BADCFG + 4,
126
+	VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH	  = VXGE_HW_BASE_BADCFG + 5,
127
+	VXGE_HW_BADCFG_VPATH_BANDWIDTH_LIMIT	  = VXGE_HW_BASE_BADCFG + 6,
128
+	VXGE_HW_BADCFG_INTR_MODE		  = VXGE_HW_BASE_BADCFG + 7,
129
+	VXGE_HW_BADCFG_RTS_MAC_EN		  = VXGE_HW_BASE_BADCFG + 8,
130
+	VXGE_HW_BADCFG_VPATH_AGGR_ACK		  = VXGE_HW_BASE_BADCFG + 9,
131
+	VXGE_HW_BADCFG_VPATH_PRIORITY		  = VXGE_HW_BASE_BADCFG + 10,
132
+
133
+	VXGE_HW_EOF_TRACE_BUF			  = -1
134
+};
135
+
136
+/**
137
+ * enum enum vxge_hw_device_link_state - Link state enumeration.
138
+ * @VXGE_HW_LINK_NONE: Invalid link state.
139
+ * @VXGE_HW_LINK_DOWN: Link is down.
140
+ * @VXGE_HW_LINK_UP: Link is up.
141
+ *
142
+ */
143
+enum vxge_hw_device_link_state {
144
+	VXGE_HW_LINK_NONE,
145
+	VXGE_HW_LINK_DOWN,
146
+	VXGE_HW_LINK_UP
147
+};
148
+
149
+/*forward declaration*/
150
+struct vxge_vpath;
151
+struct __vxge_hw_virtualpath;
152
+
153
+/**
154
+ * struct vxge_hw_ring_rxd_1 - One buffer mode RxD for ring
155
+ *
156
+ * One buffer mode RxD for ring structure
157
+ */
158
+struct vxge_hw_ring_rxd_1 {
159
+	u64 host_control;
160
+	u64 control_0;
161
+#define VXGE_HW_RING_RXD_RTH_BUCKET_GET(ctrl0)		vxge_bVALn(ctrl0, 0, 7)
162
+
163
+#define VXGE_HW_RING_RXD_LIST_OWN_ADAPTER		vxge_mBIT(7)
164
+
165
+#define VXGE_HW_RING_RXD_FAST_PATH_ELIGIBLE_GET(ctrl0)	vxge_bVALn(ctrl0, 8, 1)
166
+
167
+#define VXGE_HW_RING_RXD_L3_CKSUM_CORRECT_GET(ctrl0)	vxge_bVALn(ctrl0, 9, 1)
168
+
169
+#define VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(ctrl0)	vxge_bVALn(ctrl0, 10, 1)
170
+
171
+#define VXGE_HW_RING_RXD_T_CODE_GET(ctrl0)		vxge_bVALn(ctrl0, 12, 4)
172
+#define VXGE_HW_RING_RXD_T_CODE(val) 			vxge_vBIT(val, 12, 4)
173
+
174
+#define VXGE_HW_RING_RXD_T_CODE_UNUSED		VXGE_HW_RING_T_CODE_UNUSED
175
+
176
+#define VXGE_HW_RING_RXD_SYN_GET(ctrl0)		vxge_bVALn(ctrl0, 16, 1)
177
+
178
+#define VXGE_HW_RING_RXD_IS_ICMP_GET(ctrl0)		vxge_bVALn(ctrl0, 17, 1)
179
+
180
+#define VXGE_HW_RING_RXD_RTH_SPDM_HIT_GET(ctrl0)	vxge_bVALn(ctrl0, 18, 1)
181
+
182
+#define VXGE_HW_RING_RXD_RTH_IT_HIT_GET(ctrl0)		vxge_bVALn(ctrl0, 19, 1)
183
+
184
+#define VXGE_HW_RING_RXD_RTH_HASH_TYPE_GET(ctrl0)	vxge_bVALn(ctrl0, 20, 4)
185
+
186
+#define VXGE_HW_RING_RXD_IS_VLAN_GET(ctrl0)		vxge_bVALn(ctrl0, 24, 1)
187
+
188
+#define VXGE_HW_RING_RXD_ETHER_ENCAP_GET(ctrl0)		vxge_bVALn(ctrl0, 25, 2)
189
+
190
+#define VXGE_HW_RING_RXD_FRAME_PROTO_GET(ctrl0)		vxge_bVALn(ctrl0, 27, 5)
191
+
192
+#define VXGE_HW_RING_RXD_L3_CKSUM_GET(ctrl0)	vxge_bVALn(ctrl0, 32, 16)
193
+
194
+#define VXGE_HW_RING_RXD_L4_CKSUM_GET(ctrl0)	vxge_bVALn(ctrl0, 48, 16)
195
+
196
+	u64 control_1;
197
+
198
+#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(ctrl1)	vxge_bVALn(ctrl1, 2, 14)
199
+#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE(val) vxge_vBIT(val, 2, 14)
200
+#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK		vxge_vBIT(0x3FFF, 2, 14)
201
+
202
+#define VXGE_HW_RING_RXD_1_RTH_HASH_VAL_GET(ctrl1)    vxge_bVALn(ctrl1, 16, 32)
203
+
204
+#define VXGE_HW_RING_RXD_VLAN_TAG_GET(ctrl1)	vxge_bVALn(ctrl1, 48, 16)
205
+
206
+	u64 buffer0_ptr;
207
+};
208
+
209
+/**
210
+ * struct vxge_hw_fifo_txd - Transmit Descriptor
211
+ *
212
+ * Transmit descriptor (TxD).Fifo descriptor contains configured number
213
+ * (list) of TxDs. * For more details please refer to Titan User Guide,
214
+ * Section 5.4.2 "Transmit Descriptor (TxD) Format".
215
+ */
216
+struct vxge_hw_fifo_txd {
217
+	u64 control_0;
218
+#define VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER		vxge_mBIT(7)
219
+
220
+#define VXGE_HW_FIFO_TXD_T_CODE_GET(ctrl0)		vxge_bVALn(ctrl0, 12, 4)
221
+#define VXGE_HW_FIFO_TXD_T_CODE(val) 			vxge_vBIT(val, 12, 4)
222
+#define VXGE_HW_FIFO_TXD_T_CODE_UNUSED		VXGE_HW_FIFO_T_CODE_UNUSED
223
+
224
+#define VXGE_HW_FIFO_TXD_GATHER_CODE(val) 		vxge_vBIT(val, 22, 2)
225
+#define VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST	VXGE_HW_FIFO_GATHER_CODE_FIRST
226
+#define VXGE_HW_FIFO_TXD_GATHER_CODE_LAST	VXGE_HW_FIFO_GATHER_CODE_LAST
227
+
228
+#define VXGE_HW_FIFO_TXD_LSO_EN				vxge_mBIT(30)
229
+#define VXGE_HW_FIFO_TXD_LSO_MSS(val) 			vxge_vBIT(val, 34, 14)
230
+#define VXGE_HW_FIFO_TXD_BUFFER_SIZE(val) 		vxge_vBIT(val, 48, 16)
231
+
232
+	u64 control_1;
233
+#define VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN			vxge_mBIT(5)
234
+#define VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN			vxge_mBIT(6)
235
+#define VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN			vxge_mBIT(7)
236
+#define VXGE_HW_FIFO_TXD_VLAN_ENABLE			vxge_mBIT(15)
237
+
238
+#define VXGE_HW_FIFO_TXD_VLAN_TAG(val) 			vxge_vBIT(val, 16, 16)
239
+#define VXGE_HW_FIFO_TXD_NO_BW_LIMIT			vxge_mBIT(43)
240
+
241
+#define VXGE_HW_FIFO_TXD_INT_NUMBER(val) 		vxge_vBIT(val, 34, 6)
242
+
243
+#define VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST		vxge_mBIT(46)
244
+#define VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ			vxge_mBIT(47)
245
+
246
+	u64 buffer_pointer;
247
+
248
+	u64 host_control;
249
+};
250
+
251
+/**
252
+ * struct vxge_hw_device_date - Date Format
253
+ * @day: Day
254
+ * @month: Month
255
+ * @year: Year
256
+ * @date: Date in string format
257
+ *
258
+ * Structure for returning date
259
+ */
260
+
261
+#define VXGE_HW_FW_STRLEN	32
262
+struct vxge_hw_device_date {
263
+	u32     day;
264
+	u32     month;
265
+	u32     year;
266
+	char    date[VXGE_HW_FW_STRLEN];
267
+};
268
+
269
+struct vxge_hw_device_version {
270
+	u32     major;
271
+	u32     minor;
272
+	u32     build;
273
+	char    version[VXGE_HW_FW_STRLEN];
274
+};
275
+
276
+u64 __vxge_hw_vpath_pci_func_mode_get(
277
+	u32 vp_id,
278
+	struct vxge_hw_vpath_reg __iomem *vpath_reg);
279
+
280
+/*
281
+ * struct __vxge_hw_non_offload_db_wrapper - Non-offload Doorbell Wrapper
282
+ * @control_0: Bits 0 to 7 - Doorbell type.
283
+ *             Bits 8 to 31 - Reserved.
284
+ *             Bits 32 to 39 - The highest TxD in this TxDL.
285
+ *             Bits 40 to 47 - Reserved.
286
+ *	       Bits 48 to 55 - Reserved.
287
+ *             Bits 56 to 63 - No snoop flags.
288
+ * @txdl_ptr:  The starting location of the TxDL in host memory.
289
+ *
290
+ * Created by the host and written to the adapter via PIO to a Kernel Doorbell
291
+ * FIFO. All non-offload doorbell wrapper fields must be written by the host as
292
+ * part of a doorbell write. Consumed by the adapter but is not written by the
293
+ * adapter.
294
+ */
295
+struct __vxge_hw_non_offload_db_wrapper {
296
+	u64		control_0;
297
+#define	VXGE_HW_NODBW_GET_TYPE(ctrl0)			vxge_bVALn(ctrl0, 0, 8)
298
+#define VXGE_HW_NODBW_TYPE(val) vxge_vBIT(val, 0, 8)
299
+#define	VXGE_HW_NODBW_TYPE_NODBW				0
300
+
301
+#define	VXGE_HW_NODBW_GET_LAST_TXD_NUMBER(ctrl0)	vxge_bVALn(ctrl0, 32, 8)
302
+#define VXGE_HW_NODBW_LAST_TXD_NUMBER(val) vxge_vBIT(val, 32, 8)
303
+
304
+#define	VXGE_HW_NODBW_GET_NO_SNOOP(ctrl0)		vxge_bVALn(ctrl0, 56, 8)
305
+#define VXGE_HW_NODBW_LIST_NO_SNOOP(val) vxge_vBIT(val, 56, 8)
306
+#define	VXGE_HW_NODBW_LIST_NO_SNOOP_TXD_READ_TXD0_WRITE		0x2
307
+#define	VXGE_HW_NODBW_LIST_NO_SNOOP_TX_FRAME_DATA_READ		0x1
308
+
309
+	u64		txdl_ptr;
310
+};
311
+
312
+/*
313
+ * struct __vxge_hw_fifo - Fifo.
314
+ * @vp_id: Virtual path id
315
+ * @tx_intr_num: Interrupt Number associated with the TX
316
+ * @txdl: Start pointer of the txdl list of this fifo.
317
+ *        gPxe does not support tx fragmentation, so we need
318
+ *        only one txd in a list
319
+ * @depth: total number of lists in this fifo
320
+ * @hw_offset: txd index from where adapter owns the txd list
321
+ * @sw_offset: txd index from where driver owns the txd list
322
+ *
323
+ * @stats: Statistics of this fifo
324
+ *
325
+ */
326
+struct __vxge_hw_fifo {
327
+	struct vxge_hw_vpath_reg		*vp_reg;
328
+	struct __vxge_hw_non_offload_db_wrapper	*nofl_db;
329
+	u32					vp_id;
330
+	u32					tx_intr_num;
331
+
332
+	struct vxge_hw_fifo_txd		*txdl;
333
+#define VXGE_HW_FIFO_TXD_DEPTH 128
334
+	u16				depth;
335
+	u16				hw_offset;
336
+	u16				sw_offset;
337
+
338
+	struct __vxge_hw_virtualpath    *vpathh;
339
+};
340
+
341
+/* Structure that represents the Rx descriptor block which contains
342
+ * 128 Rx descriptors.
343
+ */
344
+struct __vxge_hw_ring_block {
345
+#define VXGE_HW_MAX_RXDS_PER_BLOCK_1            127
346
+	struct vxge_hw_ring_rxd_1 rxd[VXGE_HW_MAX_RXDS_PER_BLOCK_1];
347
+
348
+	u64 reserved_0;
349
+#define END_OF_BLOCK    0xFEFFFFFFFFFFFFFFULL
350
+	/* 0xFEFFFFFFFFFFFFFF to mark last Rxd in this blk */
351
+	u64 reserved_1;
352
+	/* Logical ptr to next */
353
+	u64 reserved_2_pNext_RxD_block;
354
+	/* Buff0_ptr.In a 32 bit arch the upper 32 bits should be 0 */
355
+	u64 pNext_RxD_Blk_physical;
356
+};
357
+
358
+/*
359
+ * struct __vxge_hw_ring - Ring channel.
360
+ *
361
+ * Note: The structure is cache line aligned to better utilize
362
+ *       CPU cache performance.
363
+ */
364
+struct __vxge_hw_ring {
365
+	struct vxge_hw_vpath_reg		*vp_reg;
366
+	struct vxge_hw_common_reg		*common_reg;
367
+	u32					vp_id;
368
+#define VXGE_HW_RING_RXD_QWORDS_MODE_1	4
369
+	u32					doorbell_cnt;
370
+	u32					total_db_cnt;
371
+#define VXGE_HW_RING_RXD_QWORD_LIMIT	16
372
+	u64					rxd_qword_limit;
373
+
374
+	struct __vxge_hw_ring_block		*rxdl;
375
+#define VXGE_HW_RING_BUF_PER_BLOCK 	9
376
+	u16					buf_per_block;
377
+	u16					rxd_offset;
378
+
379
+#define VXGE_HW_RING_RX_POLL_WEIGHT	8
380
+	u16					rx_poll_weight;
381
+
382
+	struct io_buffer *iobuf[VXGE_HW_RING_BUF_PER_BLOCK + 1];
383
+	struct __vxge_hw_virtualpath *vpathh;
384
+};
385
+
386
+/*
387
+ * struct __vxge_hw_virtualpath - Virtual Path
388
+ *
389
+ * Virtual path structure to encapsulate the data related to a virtual path.
390
+ * Virtual paths are allocated by the HW upon getting configuration from the
391
+ * driver and inserted into the list of virtual paths.
392
+ */
393
+struct __vxge_hw_virtualpath {
394
+	u32				vp_id;
395
+
396
+	u32				vp_open;
397
+#define VXGE_HW_VP_NOT_OPEN	0
398
+#define	VXGE_HW_VP_OPEN		1
399
+
400
+	struct __vxge_hw_device		*hldev;
401
+	struct vxge_hw_vpath_reg	*vp_reg;
402
+	struct vxge_hw_vpmgmt_reg	*vpmgmt_reg;
403
+	struct __vxge_hw_non_offload_db_wrapper	*nofl_db;
404
+
405
+	u32				max_mtu;
406
+	u32				vsport_number;
407
+	u32				max_kdfc_db;
408
+	u32				max_nofl_db;
409
+
410
+	struct __vxge_hw_ring ringh;
411
+	struct __vxge_hw_fifo fifoh;
412
+};
413
+#define VXGE_HW_INFO_LEN	64
414
+#define VXGE_HW_PMD_INFO_LEN	16
415
+#define VXGE_MAX_PRINT_BUF_SIZE	128
416
+/**
417
+ * struct vxge_hw_device_hw_info - Device information
418
+ * @host_type: Host Type
419
+ * @func_id: Function Id
420
+ * @vpath_mask: vpath bit mask
421
+ * @fw_version: Firmware version
422
+ * @fw_date: Firmware Date
423
+ * @flash_version: Firmware version
424
+ * @flash_date: Firmware Date
425
+ * @mac_addrs: Mac addresses for each vpath
426
+ * @mac_addr_masks: Mac address masks for each vpath
427
+ *
428
+ * Returns the vpath mask that has the bits set for each vpath allocated
429
+ * for the driver and the first mac address for each vpath
430
+ */
431
+struct vxge_hw_device_hw_info {
432
+	u32		host_type;
433
+#define VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION			0
434
+#define VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION			1
435
+#define VXGE_HW_NO_MR_SR_VH0_FUNCTION0				2
436
+#define VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION			3
437
+#define VXGE_HW_MR_SR_VH0_INVALID_CONFIG			4
438
+#define VXGE_HW_SR_VH_FUNCTION0					5
439
+#define VXGE_HW_SR_VH_VIRTUAL_FUNCTION				6
440
+#define VXGE_HW_VH_NORMAL_FUNCTION				7
441
+	u64		function_mode;
442
+#define VXGE_HW_FUNCTION_MODE_MIN				0
443
+#define VXGE_HW_FUNCTION_MODE_MAX				10
444
+
445
+#define VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION			0
446
+#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION			1
447
+#define VXGE_HW_FUNCTION_MODE_SRIOV				2
448
+#define VXGE_HW_FUNCTION_MODE_MRIOV				3
449
+#define VXGE_HW_FUNCTION_MODE_MRIOV_8				4
450
+#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17			5
451
+#define VXGE_HW_FUNCTION_MODE_SRIOV_8				6
452
+#define VXGE_HW_FUNCTION_MODE_SRIOV_4				7
453
+#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2			8
454
+#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_4			9
455
+#define VXGE_HW_FUNCTION_MODE_MRIOV_4				10
456
+
457
+	u32		func_id;
458
+	u64		vpath_mask;
459
+	struct vxge_hw_device_version fw_version;
460
+	struct vxge_hw_device_date    fw_date;
461
+	struct vxge_hw_device_version flash_version;
462
+	struct vxge_hw_device_date    flash_date;
463
+	u8		serial_number[VXGE_HW_INFO_LEN];
464
+	u8		part_number[VXGE_HW_INFO_LEN];
465
+	u8		product_desc[VXGE_HW_INFO_LEN];
466
+	u8 (mac_addrs)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
467
+	u8 (mac_addr_masks)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
468
+};
469
+
470
+/**
471
+ * struct __vxge_hw_device  - Hal device object
472
+ * @magic: Magic Number
473
+ * @bar0: BAR0 virtual address.
474
+ * @pdev: Physical device handle
475
+ * @config: Confguration passed by the LL driver at initialization
476
+ * @link_state: Link state
477
+ *
478
+ * HW device object. Represents Titan adapter
479
+ */
480
+struct __vxge_hw_device {
481
+	u32				magic;
482
+#define VXGE_HW_DEVICE_MAGIC		0x12345678
483
+#define VXGE_HW_DEVICE_DEAD		0xDEADDEAD
484
+	void __iomem			*bar0;
485
+	struct pci_device		*pdev;
486
+	struct net_device		*ndev;
487
+	struct vxgedev 			*vdev;
488
+
489
+	enum vxge_hw_device_link_state	link_state;
490
+
491
+	u32				host_type;
492
+	u32				func_id;
493
+	u8				titan1;
494
+	u32				access_rights;
495
+#define VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH      0x1
496
+#define VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM     0x2
497
+#define VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM     0x4
498
+	struct vxge_hw_legacy_reg	*legacy_reg;
499
+	struct vxge_hw_toc_reg		*toc_reg;
500
+	struct vxge_hw_common_reg	*common_reg;
501
+	struct vxge_hw_mrpcim_reg	*mrpcim_reg;
502
+	struct vxge_hw_srpcim_reg	*srpcim_reg \
503
+					[VXGE_HW_TITAN_SRPCIM_REG_SPACES];
504
+	struct vxge_hw_vpmgmt_reg	*vpmgmt_reg \
505
+					[VXGE_HW_TITAN_VPMGMT_REG_SPACES];
506
+	struct vxge_hw_vpath_reg	*vpath_reg \
507
+					[VXGE_HW_TITAN_VPATH_REG_SPACES];
508
+	u8				*kdfc;
509
+	u8				*usdc;
510
+	struct __vxge_hw_virtualpath	virtual_path;
511
+	u64				vpath_assignments;
512
+	u64				vpaths_deployed;
513
+	u32				first_vp_id;
514
+	u64				tim_int_mask0[4];
515
+	u32				tim_int_mask1[4];
516
+
517
+	struct vxge_hw_device_hw_info   hw_info;
518
+};
519
+
520
+#define VXGE_HW_DEVICE_LINK_STATE_SET(hldev, ls) (hldev->link_state = ls)
521
+
522
+#define VXGE_HW_DEVICE_TIM_INT_MASK_SET(m0, m1, i) {	\
523
+	if (i < 16) {					\
524
+		m0[0] |= vxge_vBIT(0x8, (i*4), 4);	\
525
+		m0[1] |= vxge_vBIT(0x4, (i*4), 4);	\
526
+	}			       		\
527
+	else {					\
528
+		m1[0] = 0x80000000;		\
529
+		m1[1] = 0x40000000;		\
530
+	}					\
531
+}
532
+
533
+#define VXGE_HW_DEVICE_TIM_INT_MASK_RESET(m0, m1, i) {	\
534
+	if (i < 16) {					\
535
+		m0[0] &= ~vxge_vBIT(0x8, (i*4), 4);	\
536
+		m0[1] &= ~vxge_vBIT(0x4, (i*4), 4);	\
537
+	}						\
538
+	else {						\
539
+		m1[0] = 0;				\
540
+		m1[1] = 0;				\
541
+	}						\
542
+}
543
+
544
+/**
545
+ * enum enum vxge_hw_txdl_state - Descriptor (TXDL) state.
546
+ * @VXGE_HW_TXDL_STATE_NONE: Invalid state.
547
+ * @VXGE_HW_TXDL_STATE_AVAIL: Descriptor is available for reservation.
548
+ * @VXGE_HW_TXDL_STATE_POSTED: Descriptor is posted for processing by the
549
+ * device.
550
+ * @VXGE_HW_TXDL_STATE_FREED: Descriptor is free and can be reused for
551
+ * filling-in and posting later.
552
+ *
553
+ * Titan/HW descriptor states.
554
+ *
555
+ */
556
+enum vxge_hw_txdl_state {
557
+	VXGE_HW_TXDL_STATE_NONE	= 0,
558
+	VXGE_HW_TXDL_STATE_AVAIL	= 1,
559
+	VXGE_HW_TXDL_STATE_POSTED	= 2,
560
+	VXGE_HW_TXDL_STATE_FREED	= 3
561
+};
562
+
563
+
564
+/* fifo and ring circular buffer offset tracking apis */
565
+static inline void __vxge_hw_desc_offset_up(u16 upper_limit,
566
+			u16 *offset)
567
+{
568
+	if (++(*offset) >= upper_limit)
569
+		*offset = 0;
570
+}
571
+
572
+/* rxd offset handling apis */
573
+static inline void vxge_hw_ring_rxd_offset_up(u16 *offset)
574
+{
575
+	__vxge_hw_desc_offset_up(VXGE_HW_MAX_RXDS_PER_BLOCK_1,
576
+			offset);
577
+}
578
+/* txd offset handling apis */
579
+static inline void vxge_hw_fifo_txd_offset_up(u16 *offset)
580
+{
581
+	__vxge_hw_desc_offset_up(VXGE_HW_FIFO_TXD_DEPTH, offset);
582
+}
583
+
584
+/**
585
+ * vxge_hw_ring_rxd_1b_set - Prepare 1-buffer-mode descriptor.
586
+ * @rxdh: Descriptor handle.
587
+ * @dma_pointer: DMA address of	a single receive buffer	this descriptor
588
+ * should carry. Note that by the time vxge_hw_ring_rxd_1b_set is called,
589
+ * the receive buffer should be already mapped to the device
590
+ * @size: Size of the receive @dma_pointer buffer.
591
+ *
592
+ * Prepare 1-buffer-mode Rx	descriptor for posting
593
+ * (via	vxge_hw_ring_rxd_post()).
594
+ *
595
+ * This	inline helper-function does not	return any parameters and always
596
+ * succeeds.
597
+ *
598
+ */
599
+static inline
600
+void vxge_hw_ring_rxd_1b_set(struct vxge_hw_ring_rxd_1 *rxdp,
601
+	struct io_buffer *iob, u32 size)
602
+{
603
+	rxdp->host_control = (intptr_t)(iob);
604
+	rxdp->buffer0_ptr = virt_to_bus(iob->data);
605
+	rxdp->control_1	&= ~VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK;
606
+	rxdp->control_1	|= VXGE_HW_RING_RXD_1_BUFFER0_SIZE(size);
607
+}
608
+
609
+enum vxge_hw_status vxge_hw_device_hw_info_get(
610
+	void __iomem *bar0,
611
+	struct vxge_hw_device_hw_info *hw_info);
612
+
613
+enum vxge_hw_status
614
+__vxge_hw_vpath_fw_ver_get(
615
+	struct vxge_hw_vpath_reg __iomem *vpath_reg,
616
+	struct vxge_hw_device_hw_info *hw_info);
617
+
618
+enum vxge_hw_status
619
+__vxge_hw_vpath_card_info_get(
620
+	struct vxge_hw_vpath_reg __iomem *vpath_reg,
621
+	struct vxge_hw_device_hw_info *hw_info);
622
+
623
+/**
624
+ * vxge_hw_device_link_state_get - Get link state.
625
+ * @devh: HW device handle.
626
+ *
627
+ * Get link state.
628
+ * Returns: link state.
629
+ */
630
+static inline
631
+enum vxge_hw_device_link_state vxge_hw_device_link_state_get(
632
+	struct __vxge_hw_device *devh)
633
+{
634
+	return devh->link_state;
635
+}
636
+
637
+void vxge_hw_device_terminate(struct __vxge_hw_device *devh);
638
+
639
+enum vxge_hw_status vxge_hw_device_initialize(
640
+	struct __vxge_hw_device **devh,
641
+	void *bar0,
642
+	struct pci_device *pdev,
643
+	u8 titan1);
644
+
645
+enum vxge_hw_status
646
+vxge_hw_vpath_open(struct __vxge_hw_device *hldev, struct vxge_vpath *vpath);
647
+
648
+enum vxge_hw_status
649
+__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog);
650
+
651
+enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_virtualpath *vpath);
652
+
653
+enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_virtualpath *vpath);
654
+
655
+enum vxge_hw_status
656
+vxge_hw_vpath_recover_from_reset(struct __vxge_hw_virtualpath *vpath);
657
+
658
+void
659
+vxge_hw_vpath_enable(struct __vxge_hw_virtualpath *vpath);
660
+
661
+enum vxge_hw_status
662
+vxge_hw_vpath_mtu_set(struct __vxge_hw_virtualpath *vpath, u32 new_mtu);
663
+
664
+void
665
+vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_virtualpath *vpath);
666
+
667
+void
668
+__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev);
669
+
670
+enum vxge_hw_status
671
+__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg);
672
+
673
+enum vxge_hw_status
674
+__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg);
675
+
676
+enum vxge_hw_status
677
+__vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg,
678
+	struct vxge_hw_vpath_reg __iomem *vpath_reg);
679
+
680
+enum vxge_hw_status
681
+__vxge_hw_device_register_poll(
682
+	void __iomem	*reg,
683
+	u64 mask, u32 max_millis);
684
+
685
+#ifndef readq
686
+static inline u64 readq(void __iomem *addr)
687
+{
688
+	u64 ret = 0;
689
+	ret = readl(addr + 4);
690
+	ret <<= 32;
691
+	ret |= readl(addr);
692
+
693
+	return ret;
694
+}
695
+#endif
696
+
697
+#ifndef writeq
698
+static inline void writeq(u64 val, void __iomem *addr)
699
+{
700
+	writel((u32) (val), addr);
701
+	writel((u32) (val >> 32), (addr + 4));
702
+}
703
+#endif
704
+
705
+static inline void __vxge_hw_pio_mem_write32_upper(u32 val, void __iomem *addr)
706
+{
707
+	writel(val, addr + 4);
708
+}
709
+
710
+static inline void __vxge_hw_pio_mem_write32_lower(u32 val, void __iomem *addr)
711
+{
712
+	writel(val, addr);
713
+}
714
+
715
+static inline enum vxge_hw_status
716
+__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
717
+			  u64 mask, u32 max_millis)
718
+{
719
+	enum vxge_hw_status status = VXGE_HW_OK;
720
+
721
+	__vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
722
+	wmb();
723
+	__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
724
+	wmb();
725
+
726
+	status = __vxge_hw_device_register_poll(addr, mask, max_millis);
727
+	return status;
728
+}
729
+
730
+struct vxge_hw_toc_reg __iomem *
731
+__vxge_hw_device_toc_get(void __iomem *bar0);
732
+
733
+enum vxge_hw_status
734
+__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev);
735
+
736
+void
737
+__vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev);
738
+
739
+enum vxge_hw_status
740
+__vxge_hw_device_initialize(struct __vxge_hw_device *hldev);
741
+
742
+enum vxge_hw_status
743
+__vxge_hw_vpath_pci_read(
744
+	struct __vxge_hw_virtualpath	*vpath,
745
+	u32			phy_func_0,
746
+	u32			offset,
747
+	u32			*val);
748
+
749
+enum vxge_hw_status
750
+__vxge_hw_vpath_addr_get(
751
+	struct vxge_hw_vpath_reg __iomem *vpath_reg,
752
+	u8 (macaddr)[ETH_ALEN],
753
+	u8 (macaddr_mask)[ETH_ALEN]);
754
+
755
+u32
756
+__vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg);
757
+
758
+enum vxge_hw_status
759
+__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath);
760
+
761
+enum vxge_hw_status
762
+vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
763
+
764
+/**
765
+ * vxge_debug
766
+ * @mask: mask for the debug
767
+ * @fmt: printf like format string
768
+ */
769
+static const u16 debug_filter = VXGE_ERR;
770
+#define vxge_debug(mask, fmt...) 	do { 	\
771
+		if (debug_filter & mask)	\
772
+			DBG(fmt); 		\
773
+	} while (0);
774
+
775
+#define vxge_trace() 	vxge_debug(VXGE_TRACE, "%s:%d\n", __func__, __LINE__);
776
+
777
+enum vxge_hw_status
778
+vxge_hw_get_func_mode(struct __vxge_hw_device *hldev, u32 *func_mode);
779
+
780
+enum vxge_hw_status
781
+vxge_hw_set_fw_api(struct __vxge_hw_device *hldev,
782
+		u64 vp_id, u32 action,
783
+		u32 offset, u64 data0, u64 data1);
784
+void
785
+vxge_hw_vpath_set_zero_rx_frm_len(struct __vxge_hw_device *hldev);
786
+
787
+#endif

+ 724
- 0
src/drivers/net/vxge/vxge_main.c View File

@@ -0,0 +1,724 @@
1
+/*
2
+ * vxge-main.c: gPXE driver for Neterion Inc's X3100 Series 10GbE
3
+ *              PCIe I/O Virtualized Server Adapter.
4
+ *
5
+ * Copyright(c) 2002-2010 Neterion Inc.
6
+ *
7
+ * This software may be used and distributed according to the terms of
8
+ * the GNU General Public License (GPL), incorporated herein by
9
+ * reference.  Drivers based on or derived from this code fall under
10
+ * the GPL and must retain the authorship, copyright and license
11
+ * notice.
12
+ *
13
+ */
14
+
15
+FILE_LICENCE(GPL2_ONLY);
16
+
17
+#include <stdlib.h>
18
+#include <stdio.h>
19
+#include <string.h>
20
+#include <gpxe/io.h>
21
+#include <errno.h>
22
+#include <byteswap.h>
23
+#include <gpxe/pci.h>
24
+#include <gpxe/malloc.h>
25
+#include <gpxe/if_ether.h>
26
+#include <gpxe/ethernet.h>
27
+#include <gpxe/iobuf.h>
28
+#include <gpxe/netdevice.h>
29
+#include <gpxe/timer.h>
30
+#include <nic.h>
31
+
32
+#include "vxge_main.h"
33
+#include "vxge_reg.h"
34
+
35
+/* function modes strings */
36
+static char *vxge_func_mode_names[] = {
37
+	"Single Function - 1 func, 17 vpath",
38
+	"Multi Function 8 - 8 func, 2 vpath per func",
39
+	"SRIOV 17 - 17 VF, 1 vpath per VF",
40
+	"WLPEX/SharedIO 17 - 17 VH, 1 vpath/func/hierarchy",
41
+	"WLPEX/SharedIO 8 - 8 VH, 2 vpath/func/hierarchy",
42
+	"Multi Function 17 - 17 func, 1 vpath per func",
43
+	"SRIOV 8 - 1 PF, 7 VF, 2 vpath per VF",
44
+	"SRIOV 4 - 1 PF, 3 VF, 4 vpath per VF",
45
+	"Multi Function 2 - 2 func, 8 vpath per func",
46
+	"Multi Function 4 - 4 func, 4 vpath per func",
47
+	"WLPEX/SharedIO 4 - 17 func, 1 vpath per func (PCIe ARI)",
48
+};
49
+
50
+static inline int is_vxge_card_up(struct vxgedev *vdev)
51
+{
52
+	return test_bit(__VXGE_STATE_CARD_UP, vdev->state);
53
+}
54
+
55
+/*
56
+ * vxge_xmit_compl
57
+ *
58
+ * If an interrupt was raised to indicate DMA complete of the Tx packet,
59
+ * this function is called. It identifies the last TxD whose buffer was
60
+ * freed and frees all skbs whose data have already DMA'ed into the NICs
61
+ * internal memory.
62
+ */
63
+enum vxge_hw_status
64
+vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw,
65
+		struct vxge_hw_fifo_txd *txdp, enum vxge_hw_fifo_tcode tcode)
66
+{
67
+	struct net_device *netdev;
68
+	struct io_buffer *tx_iob = NULL;
69
+
70
+	vxge_trace();
71
+
72
+	netdev = fifo_hw->vpathh->hldev->ndev;
73
+
74
+	tx_iob = (struct io_buffer *)(intptr_t)txdp->host_control;
75
+
76
+	if (tcode == VXGE_HW_FIFO_T_CODE_OK) {
77
+		netdev_tx_complete(netdev, tx_iob);
78
+	} else {
79
+		netdev_tx_complete_err(netdev, tx_iob, -EINVAL);
80
+		vxge_debug(VXGE_ERR, "%s: transmit failed, tcode %d\n",
81
+				netdev->name, tcode);
82
+	}
83
+
84
+	memset(txdp, 0, sizeof(struct vxge_hw_fifo_txd));
85
+
86
+	return VXGE_HW_OK;
87
+}
88
+
89
+/* reset vpaths */
90
+enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
91
+{
92
+	enum vxge_hw_status status = VXGE_HW_OK;
93
+	struct __vxge_hw_virtualpath *vpath;
94
+
95
+	vxge_trace();
96
+
97
+	vpath = vdev->vpath.vpathh;
98
+
99
+	if (vpath) {
100
+		if ((status = vxge_hw_vpath_reset(vpath)) == VXGE_HW_OK) {
101
+			if (is_vxge_card_up(vdev) &&
102
+				(status = vxge_hw_vpath_recover_from_reset(
103
+					vpath))	!= VXGE_HW_OK) {
104
+				vxge_debug(VXGE_ERR, "vxge_hw_vpath_recover_"
105
+					"from_reset failed\n");
106
+				return status;
107
+			} else {
108
+				status = __vxge_hw_vpath_reset_check(vpath);
109
+				if (status != VXGE_HW_OK) {
110
+					vxge_debug(VXGE_ERR,
111
+					"__vxge_hw_vpath_reset_check error\n");
112
+					return status;
113
+				}
114
+			}
115
+		} else {
116
+			vxge_debug(VXGE_ERR, "vxge_hw_vpath_reset failed\n");
117
+			return status;
118
+		}
119
+	}
120
+	return status;
121
+}
122
+
123
+/* close vpaths */
124
+void vxge_close_vpaths(struct vxgedev *vdev)
125
+{
126
+
127
+	if (vdev->vpath.vpathh && vdev->vpath.is_open)
128
+		vxge_hw_vpath_close(vdev->vpath.vpathh);
129
+
130
+	vdev->vpath.is_open = 0;
131
+	vdev->vpath.vpathh = NULL;
132
+}
133
+
134
+/* open vpaths */
135
+int vxge_open_vpaths(struct vxgedev *vdev)
136
+{
137
+	enum vxge_hw_status status;
138
+	struct __vxge_hw_device *hldev;
139
+
140
+	hldev = (struct __vxge_hw_device  *)pci_get_drvdata(vdev->pdev);
141
+
142
+	vdev->vpath.vpathh = &hldev->virtual_path;
143
+	vdev->vpath.fifo.ndev = vdev->ndev;
144
+	vdev->vpath.fifo.pdev = vdev->pdev;
145
+	vdev->vpath.fifo.fifoh = &hldev->virtual_path.fifoh;
146
+	vdev->vpath.ring.ndev = vdev->ndev;
147
+	vdev->vpath.ring.pdev = vdev->pdev;
148
+	vdev->vpath.ring.ringh = &hldev->virtual_path.ringh;
149
+
150
+	status = vxge_hw_vpath_open(vdev->devh,	&vdev->vpath);
151
+	if (status == VXGE_HW_OK) {
152
+		vdev->vpath.is_open = 1;
153
+	} else {
154
+		vxge_debug(VXGE_ERR,
155
+			"%s: vpath: %d failed to open "
156
+			"with status: %d\n",
157
+			vdev->ndev->name, vdev->vpath.device_id,
158
+			status);
159
+		vxge_close_vpaths(vdev);
160
+		return status;
161
+	}
162
+
163
+	hldev->vpaths_deployed |= vxge_mBIT(vdev->vpath.vpathh->vp_id);
164
+
165
+	return VXGE_HW_OK;
166
+}
167
+
168
+/** Functions that implement the gPXE driver API **/
169
+
170
+/**
171
+ * vxge_xmit
172
+ * @skb : the socket buffer containing the Tx data.
173
+ * @dev : device pointer.
174
+ *
175
+ * This function is the Tx entry point of the driver. Neterion NIC supports
176
+ * certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
177
+ */
178
+static int
179
+vxge_xmit(struct net_device *dev, struct io_buffer *iobuf)
180
+{
181
+	struct vxge_fifo *fifo = NULL;
182
+	struct vxgedev *vdev = NULL;
183
+	struct __vxge_hw_fifo *fifoh;
184
+	struct __vxge_hw_device  *hldev;
185
+	struct vxge_hw_fifo_txd *txdp;
186
+
187
+	vxge_trace();
188
+
189
+	vdev = (struct vxgedev *)netdev_priv(dev);
190
+	hldev = (struct __vxge_hw_device  *)pci_get_drvdata(vdev->pdev);
191
+
192
+	if (!is_vxge_card_up(vdev)) {
193
+		vxge_debug(VXGE_ERR,
194
+			"%s: vdev not initialized\n", dev->name);
195
+		return -EIO;
196
+	}
197
+
198
+	if (!netdev_link_ok(dev)) {
199
+		vxge_debug(VXGE_ERR,
200
+			"%s: Link down, transmit failed\n", dev->name);
201
+		return -ENETDOWN;
202
+	}
203
+
204
+	fifo = &vdev->vpath.fifo;
205
+	fifoh = fifo->fifoh;
206
+
207
+	txdp = vxge_hw_fifo_free_txdl_get(fifoh);
208
+	if (!txdp) {
209
+		vxge_debug(VXGE_ERR,
210
+			"%s: Out of tx descriptors\n", dev->name);
211
+		return -ENOBUFS;
212
+	}
213
+
214
+	vxge_debug(VXGE_XMIT, "%s: %s:%d fifoh offset= %d\n",
215
+		dev->name, __func__, __LINE__, fifoh->sw_offset);
216
+
217
+	vxge_hw_fifo_txdl_buffer_set(fifoh, txdp, iobuf);
218
+
219
+	vxge_hw_fifo_txdl_post(fifoh, txdp);
220
+
221
+	return 0;
222
+}
223
+
224
+/*
225
+ *  vxge_poll
226
+ *  @ndev: net device pointer
227
+ *
228
+ *  This function acks the interrupt. It polls for rx packets
229
+ *  and send to upper layer. It also checks for tx completion
230
+ *  and frees iobs.
231
+ */
232
+static void vxge_poll(struct net_device *ndev)
233
+{
234
+	struct __vxge_hw_device  *hldev;
235
+	struct vxgedev *vdev;
236
+
237
+	vxge_debug(VXGE_POLL, "%s:%d \n", __func__, __LINE__);
238
+
239
+	vdev = (struct vxgedev *)netdev_priv(ndev);
240
+	hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
241
+
242
+	if (!is_vxge_card_up(vdev))
243
+		return;
244
+
245
+	/* process alarm and acknowledge the interrupts */
246
+	vxge_hw_device_begin_irq(hldev);
247
+
248
+	vxge_hw_vpath_poll_tx(&hldev->virtual_path.fifoh);
249
+
250
+	vxge_hw_vpath_poll_rx(&hldev->virtual_path.ringh);
251
+}
252
+
253
+/*
254
+ * vxge_irq - enable or Disable interrupts
255
+ *
256
+ * @netdev   netdevice sturcture reference
257
+ * @action   requested interrupt action
258
+ */
259
+static void vxge_irq(struct net_device *netdev __unused, int action)
260
+{
261
+	struct __vxge_hw_device  *hldev;
262
+	struct vxgedev *vdev;
263
+
264
+	vxge_debug(VXGE_INFO,
265
+		"%s:%d action(%d)\n", __func__, __LINE__, action);
266
+
267
+	vdev = (struct vxgedev *)netdev_priv(netdev);
268
+	hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
269
+
270
+	switch (action) {
271
+	case DISABLE:
272
+		vxge_hw_device_mask_all(hldev);
273
+		break;
274
+	default:
275
+		vxge_hw_device_unmask_all(hldev);
276
+		break;
277
+	}
278
+}
279
+
280
+/**
281
+ * vxge_open
282
+ * @dev: pointer to the device structure.
283
+ *
284
+ * This function is the open entry point of the driver. It mainly calls a
285
+ * function to allocate Rx buffers and inserts them into the buffer
286
+ * descriptors and then enables the Rx part of the NIC.
287
+ * Return value: '0' on success and an appropriate (-)ve integer as
288
+ * defined in errno.h file on failure.
289
+ */
290
+int
291
+vxge_open(struct net_device *dev)
292
+{
293
+	enum vxge_hw_status status;
294
+	struct vxgedev *vdev;
295
+	struct __vxge_hw_device *hldev;
296
+	int ret = 0;
297
+
298
+	vxge_debug(VXGE_INFO, "%s: %s:%d\n",
299
+			VXGE_DRIVER_NAME, __func__, __LINE__);
300
+
301
+	vdev = (struct vxgedev *)netdev_priv(dev);
302
+	hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
303
+
304
+	/* make sure you have link off by default every time Nic is
305
+	 * initialized */
306
+	netdev_link_down(dev);
307
+
308
+	/* Open VPATHs */
309
+	status = vxge_open_vpaths(vdev);
310
+	if (status != VXGE_HW_OK) {
311
+		vxge_debug(VXGE_ERR, "%s: fatal: Vpath open failed\n",
312
+				VXGE_DRIVER_NAME);
313
+		ret = -EPERM;
314
+		goto out0;
315
+	}
316
+
317
+	vdev->mtu = VXGE_HW_DEFAULT_MTU;
318
+	/* set initial mtu before enabling the device */
319
+	status = vxge_hw_vpath_mtu_set(vdev->vpath.vpathh, vdev->mtu);
320
+	if (status != VXGE_HW_OK) {
321
+		vxge_debug(VXGE_ERR,
322
+			"%s: fatal: can not set new MTU\n", dev->name);
323
+		ret = -EPERM;
324
+		goto out2;
325
+	}
326
+	vxge_debug(VXGE_INFO,
327
+		"%s: MTU is %d\n", vdev->ndev->name, vdev->mtu);
328
+
329
+	set_bit(__VXGE_STATE_CARD_UP, vdev->state);
330
+
331
+	wmb();
332
+
333
+	if (vxge_hw_device_link_state_get(vdev->devh) == VXGE_HW_LINK_UP) {
334
+		netdev_link_up(vdev->ndev);
335
+		vxge_debug(VXGE_INFO, "%s: Link Up\n", vdev->ndev->name);
336
+	}
337
+
338
+	vxge_hw_device_intr_enable(hldev);
339
+
340
+	vxge_hw_vpath_enable(vdev->vpath.vpathh);
341
+	wmb();
342
+	vxge_hw_vpath_rx_doorbell_init(vdev->vpath.vpathh);
343
+
344
+	goto out0;
345
+
346
+out2:
347
+	vxge_close_vpaths(vdev);
348
+out0:
349
+	vxge_debug(VXGE_INFO, "%s: %s:%d  Exiting...\n",
350
+				dev->name, __func__, __LINE__);
351
+	return ret;
352
+}
353
+
354
+/**
355
+ * vxge_close
356
+ * @dev: device pointer.
357
+ *
358
+ * This is the stop entry point of the driver. It needs to undo exactly
359
+ * whatever was done by the open entry point, thus it's usually referred to
360
+ * as the close function.Among other things this function mainly stops the
361
+ * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
362
+ * Return value: '0' on success and an appropriate (-)ve integer as
363
+ * defined in errno.h file on failure.
364
+ */
365
+static void vxge_close(struct net_device *dev)
366
+{
367
+	struct vxgedev *vdev;
368
+	struct __vxge_hw_device *hldev;
369
+
370
+	vxge_debug(VXGE_INFO, "%s: %s:%d\n",
371
+		dev->name, __func__, __LINE__);
372
+
373
+	vdev = (struct vxgedev *)netdev_priv(dev);
374
+	hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
375
+
376
+	if (!is_vxge_card_up(vdev))
377
+		return;
378
+
379
+	clear_bit(__VXGE_STATE_CARD_UP, vdev->state);
380
+
381
+	vxge_hw_vpath_set_zero_rx_frm_len(hldev);
382
+
383
+	netdev_link_down(vdev->ndev);
384
+	vxge_debug(VXGE_INFO, "%s: Link Down\n", vdev->ndev->name);
385
+
386
+	/* Note that at this point xmit() is stopped by upper layer */
387
+	vxge_hw_device_intr_disable(hldev);
388
+
389
+	/* Multi function shares INTA, hence we should
390
+	 * leave it in enabled state
391
+	 */
392
+	if (is_mf(hldev->hw_info.function_mode))
393
+		vxge_hw_device_unmask_all(hldev);
394
+
395
+	vxge_reset_all_vpaths(vdev);
396
+
397
+	vxge_close_vpaths(vdev);
398
+
399
+	vxge_debug(VXGE_INFO,
400
+		"%s: %s:%d  Exiting...\n", dev->name, __func__, __LINE__);
401
+}
402
+
403
+static struct net_device_operations vxge_operations;
404
+
405
+int vxge_device_register(struct __vxge_hw_device *hldev,
406
+				struct vxgedev **vdev_out)
407
+{
408
+	struct net_device *ndev;
409
+	struct vxgedev *vdev;
410
+	int ret = 0;
411
+
412
+	*vdev_out = NULL;
413
+
414
+	ndev = alloc_etherdev(sizeof(struct vxgedev));
415
+	if (ndev == NULL) {
416
+		vxge_debug(VXGE_ERR, "%s : device allocation failed\n",
417
+				__func__);
418
+		ret = -ENODEV;
419
+		goto _out0;
420
+	}
421
+
422
+	vxge_debug(VXGE_INFO, "%s:%d  netdev registering\n",
423
+		__func__, __LINE__);
424
+	vdev = netdev_priv(ndev);
425
+	memset(vdev, 0, sizeof(struct vxgedev));
426
+
427
+	vdev->ndev = ndev;
428
+	vdev->devh = hldev;
429
+	vdev->pdev = hldev->pdev;
430
+
431
+	ndev->dev = &vdev->pdev->dev;
432
+	/* Associate vxge-specific network operations operations with
433
+	 * generic network device layer */
434
+	netdev_init(ndev, &vxge_operations);
435
+
436
+	memcpy(ndev->hw_addr,
437
+		(u8 *)hldev->hw_info.mac_addrs[hldev->first_vp_id], ETH_ALEN);
438
+
439
+	if (register_netdev(ndev)) {
440
+		vxge_debug(VXGE_ERR, "%s : device registration failed!\n",
441
+			__func__);
442
+		ret = -ENODEV;
443
+		goto _out2;
444
+	}
445
+
446
+	/* Make Link state as off at this point, when the Link change
447
+	 * interrupt comes the state will be automatically changed to
448
+	 * the right state.
449
+	 */
450
+	netdev_link_down(ndev);
451
+
452
+	vxge_debug(VXGE_INFO, "%s: Ethernet device registered\n",
453
+		VXGE_DRIVER_NAME);
454
+
455
+	*vdev_out = vdev;
456
+
457
+	return ret;
458
+_out2:
459
+	netdev_put(ndev);
460
+_out0:
461
+	return ret;
462
+}
463
+
464
+/*
465
+ * vxge_device_unregister
466
+ *
467
+ * This function will unregister and free network device
468
+ */
469
+void
470
+vxge_device_unregister(struct __vxge_hw_device *hldev)
471
+{
472
+	struct vxgedev *vdev;
473
+	struct net_device *ndev;
474
+
475
+	ndev = hldev->ndev;
476
+	vdev = netdev_priv(ndev);
477
+
478
+	unregister_netdev(ndev);
479
+	netdev_nullify(ndev);
480
+	netdev_put(ndev);
481
+
482
+	vxge_debug(VXGE_INFO, "%s: ethernet device unregistered\n",
483
+				VXGE_DRIVER_NAME);
484
+}
485
+
486
+/**
487
+ * vxge_probe
488
+ * @pdev : structure containing the PCI related information of the device.
489
+ * @id: List of PCI devices supported by the driver listed in vxge_id_table.
490
+ * Description:
491
+ * This function is called when a new PCI device gets detected and initializes
492
+ * it.
493
+ * Return value:
494
+ * returns 0 on success and negative on failure.
495
+ *
496
+ */
497
+static int
498
+vxge_probe(struct pci_device *pdev, const struct pci_device_id *id __unused)
499
+{
500
+	struct __vxge_hw_device  *hldev;
501
+	enum vxge_hw_status status;
502
+	int ret = 0;
503
+	u64 vpath_mask = 0;
504
+	struct vxgedev *vdev;
505
+	int i;
506
+	u8 revision, titan1;
507
+	u32 host_type;
508
+	u32 function_mode;
509
+	unsigned long mmio_start, mmio_len;
510
+	void *bar0;
511
+	struct vxge_hw_device_hw_info hw_info;
512
+	struct vxge_hw_device_version *fw_version;
513
+
514
+	vxge_debug(VXGE_INFO, "vxge_probe for device %02X:%02X.%X\n",
515
+			pdev->bus, PCI_SLOT(pdev->devfn),
516
+			PCI_FUNC(pdev->devfn));
517
+
518
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &revision);
519
+	titan1 = is_titan1(pdev->device, revision);
520
+
521
+	mmio_start = pci_bar_start(pdev, PCI_BASE_ADDRESS_0);
522
+	mmio_len   = pci_bar_size(pdev, PCI_BASE_ADDRESS_0);
523
+	vxge_debug(VXGE_INFO, "mmio_start: %#08lx, mmio_len: %#08lx\n",
524
+			mmio_start, mmio_len);
525
+
526
+	/* sets the bus master */
527
+	adjust_pci_device(pdev);
528
+
529
+	bar0 = ioremap(mmio_start, mmio_len);
530
+	if (!bar0) {
531
+		vxge_debug(VXGE_ERR,
532
+			"%s : cannot remap io memory bar0\n", __func__);
533
+		ret = -ENODEV;
534
+		goto _exit0;
535
+	}
536
+
537
+	status = vxge_hw_device_hw_info_get(bar0, &hw_info);
538
+	if (status != VXGE_HW_OK) {
539
+		vxge_debug(VXGE_ERR,
540
+			"%s: Reading of hardware info failed.\n",
541
+			VXGE_DRIVER_NAME);
542
+		ret = -EINVAL;
543
+		goto _exit1;
544
+	}
545
+
546
+	if (hw_info.func_id != 0) {
547
+		/* Non zero function, So do not load the driver */
548
+		iounmap(bar0);
549
+		pci_set_drvdata(pdev, NULL);
550
+		return -EINVAL;
551
+	}
552
+
553
+
554
+	vpath_mask = hw_info.vpath_mask;
555
+	if (vpath_mask == 0) {
556
+		vxge_debug(VXGE_ERR,
557
+			"%s: No vpaths available in device\n",
558
+			VXGE_DRIVER_NAME);
559
+		ret = -EINVAL;
560
+		goto _exit1;
561
+	}
562
+	vxge_debug(VXGE_INFO,
563
+		"%s:%d  Vpath mask = %llx\n", __func__, __LINE__,
564
+		(unsigned long long)vpath_mask);
565
+
566
+	host_type = hw_info.host_type;
567
+	fw_version = &hw_info.fw_version;
568
+	/* fail the driver loading if firmware is incompatible */
569
+	if ((fw_version->major != VXGE_CERT_FW_VER_MAJOR) ||
570
+		(fw_version->minor < VXGE_CERT_FW_VER_MINOR)) {
571
+		printf("%s: Adapter's current firmware version: %d.%d.%d\n",
572
+			VXGE_DRIVER_NAME, fw_version->major,
573
+			fw_version->minor, fw_version->build);
574
+
575
+		printf("%s: Upgrade firmware to version %d.%d.%d\n",
576
+			VXGE_DRIVER_NAME, VXGE_CERT_FW_VER_MAJOR,
577
+			VXGE_CERT_FW_VER_MINOR,	VXGE_CERT_FW_VER_BUILD);
578
+
579
+		ret = -EACCES;
580
+		goto _exit1;
581
+	}
582
+
583
+	status = vxge_hw_device_initialize(&hldev, bar0, pdev, titan1);
584
+	if (status != VXGE_HW_OK) {
585
+		vxge_debug(VXGE_ERR,
586
+			"Failed to initialize device (%d)\n", status);
587
+			ret = -EINVAL;
588
+			goto _exit1;
589
+	}
590
+	memcpy(&hldev->hw_info, &hw_info,
591
+		sizeof(struct vxge_hw_device_hw_info));
592
+
593
+	/* find the vpath id of the first available one */
594
+	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
595
+		if (vpath_mask & vxge_mBIT(i)) {
596
+			hldev->first_vp_id = i;
597
+			break;
598
+		}
599
+	/* if FCS stripping is not disabled in MAC fail driver load */
600
+	if (vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask) != VXGE_HW_OK) {
601
+		vxge_debug(VXGE_ERR,
602
+			"%s: FCS stripping is not disabled in MAC"
603
+			" failing driver load\n", VXGE_DRIVER_NAME);
604
+		ret = -EINVAL;
605
+		goto _exit2;
606
+	}
607
+
608
+	/* Read function mode */
609
+	status = vxge_hw_get_func_mode(hldev, &function_mode);
610
+	if (status != VXGE_HW_OK)
611
+		goto _exit2;
612
+
613
+	hldev->hw_info.function_mode = function_mode;
614
+
615
+	/* set private device info */
616
+	pci_set_drvdata(pdev, hldev);
617
+
618
+	if (vxge_device_register(hldev,	&vdev)) {
619
+		ret = -EINVAL;
620
+		goto _exit2;
621
+	}
622
+
623
+	/* set private HW device info */
624
+	hldev->ndev = vdev->ndev;
625
+	hldev->vdev = vdev;
626
+	hldev->pdev = pdev;
627
+	vdev->mtu = VXGE_HW_DEFAULT_MTU;
628
+	vdev->bar0 = bar0;
629
+	vdev->titan1 = titan1;
630
+	/* Virtual Path count */
631
+	vdev->vpath.device_id = hldev->first_vp_id;
632
+	vdev->vpath.vdev = vdev;
633
+	memcpy((u8 *)vdev->vpath.macaddr,
634
+			(u8 *)hldev->hw_info.mac_addrs[hldev->first_vp_id],
635
+			ETH_ALEN);
636
+
637
+	hldev->hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0';
638
+	hldev->hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0';
639
+	hldev->hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0';
640
+
641
+	vxge_debug(VXGE_INFO, "%s: Neterion %s Server Adapter\n",
642
+		VXGE_DRIVER_NAME, hldev->hw_info.product_desc);
643
+	vxge_debug(VXGE_INFO, "%s: SERIAL NUMBER: %s\n",
644
+		VXGE_DRIVER_NAME, hldev->hw_info.serial_number);
645
+	vxge_debug(VXGE_INFO, "%s: PART NUMBER: %s\n",
646
+		VXGE_DRIVER_NAME, hldev->hw_info.part_number);
647
+	vxge_debug(VXGE_INFO, "%s: MAC ADDR: %s\n",
648
+		VXGE_DRIVER_NAME, eth_ntoa(vdev->vpath.macaddr));
649
+	vxge_debug(VXGE_INFO,
650
+		"%s: Firmware version : %s Date : %s\n", VXGE_DRIVER_NAME,
651
+		hldev->hw_info.fw_version.version,
652
+		hldev->hw_info.fw_date.date);
653
+	vxge_debug(VXGE_INFO, "%s: %s Enabled\n",
654
+			VXGE_DRIVER_NAME, vxge_func_mode_names[function_mode]);
655
+
656
+	vxge_debug(VXGE_INFO, "%s: %s:%d  Probe Exiting...\n",
657
+		VXGE_DRIVER_NAME, __func__, __LINE__);
658
+
659
+	return 0;
660
+
661
+_exit2:
662
+	vxge_hw_device_terminate(hldev);
663
+_exit1:
664
+	iounmap(bar0);
665
+_exit0:
666
+	pci_set_drvdata(pdev, NULL);
667
+	printf("%s: WARNING!! Driver loading failed!!\n",
668
+		VXGE_DRIVER_NAME);
669
+
670
+	return ret;
671
+}
672
+
673
+/**
674
+ * vxge_remove - Free the PCI device
675
+ * @pdev: structure containing the PCI related information of the device.
676
+ * Description: This function is called by the Pci subsystem to release a
677
+ * PCI device and free up all resource held up by the device.
678
+ */
679
+static void
680
+vxge_remove(struct pci_device *pdev)
681
+{
682
+	struct __vxge_hw_device  *hldev;
683
+	struct vxgedev *vdev = NULL;
684
+	struct net_device *ndev;
685
+
686
+	vxge_debug(VXGE_INFO,
687
+		"%s:%d\n", __func__, __LINE__);
688
+	hldev = (struct __vxge_hw_device  *) pci_get_drvdata(pdev);
689
+	if (hldev == NULL)
690
+		return;
691
+
692
+	ndev = hldev->ndev;
693
+	vdev = netdev_priv(ndev);
694
+
695
+	iounmap(vdev->bar0);
696
+
697
+	vxge_device_unregister(hldev);
698
+
699
+	vxge_debug(VXGE_INFO,
700
+		"%s:%d  Device unregistered\n", __func__, __LINE__);
701
+
702
+	vxge_hw_device_terminate(hldev);
703
+	pci_set_drvdata(pdev, NULL);
704
+}
705
+
706
+/* vxge net device operations */
707
+static struct net_device_operations vxge_operations = {
708
+	.open           = vxge_open,
709
+	.close          = vxge_close,
710
+	.transmit       = vxge_xmit,
711
+	.poll           = vxge_poll,
712
+	.irq            = vxge_irq,
713
+};
714
+
715
+static struct pci_device_id vxge_nics[] = {
716
+	PCI_ROM(0x17d5, 0x5833, "vxge-x3100", "Neterion X3100 Series", 0),
717
+};
718
+
719
+struct pci_driver vxge_driver __pci_driver = {
720
+	.ids = vxge_nics,
721
+	.id_count = (sizeof(vxge_nics) / sizeof(vxge_nics[0])),
722
+	.probe = vxge_probe,
723
+	.remove = vxge_remove,
724
+};

+ 246
- 0
src/drivers/net/vxge/vxge_main.h View File

@@ -0,0 +1,246 @@
1
+/*
2
+ * vxge-main.h: gPXE driver for Neterion Inc's X3100 Series 10GbE
3
+ *              PCIe I/O Virtualized Server Adapter.
4
+ *
5
+ * Copyright(c) 2002-2010 Neterion Inc.
6
+ *
7
+ * This software may be used and distributed according to the terms of
8
+ * the GNU General Public License (GPL), incorporated herein by
9
+ * reference.  Drivers based on or derived from this code fall under
10
+ * the GPL and must retain the authorship, copyright and license
11
+ * notice.
12
+ *
13
+ */
14
+
15
+FILE_LICENCE(GPL2_ONLY);
16
+
17
+#ifndef VXGE_MAIN_H
18
+#define VXGE_MAIN_H
19
+
20
+#include <unistd.h>
21
+#include "vxge_traffic.h"
22
+#include "vxge_config.h"
23
+
24
+#define VXGE_DRIVER_NAME		"vxge"
25
+#define VXGE_DRIVER_VENDOR		"Neterion, Inc"
26
+
27
+#ifndef PCI_VENDOR_ID_S2IO
28
+#define PCI_VENDOR_ID_S2IO		0x17D5
29
+#endif
30
+
31
+#ifndef PCI_DEVICE_ID_TITAN_WIN
32
+#define PCI_DEVICE_ID_TITAN_WIN		0x5733
33
+#endif
34
+
35
+#ifndef PCI_DEVICE_ID_TITAN_UNI
36
+#define PCI_DEVICE_ID_TITAN_UNI		0x5833
37
+#endif
38
+
39
+#define VXGE_HW_TITAN1_PCI_REVISION	1
40
+#define	VXGE_HW_TITAN1A_PCI_REVISION	2
41
+
42
+#define	VXGE_HP_ISS_SUBSYS_VENDORID	0x103C
43
+#define	VXGE_HP_ISS_SUBSYS_DEVICEID_1	0x323B
44
+#define	VXGE_HP_ISS_SUBSYS_DEVICEID_2	0x323C
45
+
46
+#define	VXGE_USE_DEFAULT		0xffffffff
47
+#define VXGE_HW_VPATH_MSIX_ACTIVE	4
48
+#define VXGE_ALARM_MSIX_ID		2
49
+#define VXGE_HW_RXSYNC_FREQ_CNT		4
50
+#define VXGE_LL_RX_COPY_THRESHOLD	256
51
+#define VXGE_DEF_FIFO_LENGTH		84
52
+
53
+#define NO_STEERING		0
54
+#define PORT_STEERING		0x1
55
+#define RTH_TCP_UDP_STEERING	0x2
56
+#define RTH_IPV4_STEERING	0x3
57
+#define RTH_IPV6_EX_STEERING	0x4
58
+#define RTH_BUCKET_SIZE		8
59
+
60
+#define	TX_PRIORITY_STEERING		1
61
+#define	TX_VLAN_STEERING		2
62
+#define	TX_PORT_STEERING		3
63
+#define	TX_MULTIQ_STEERING		4
64
+
65
+#define VXGE_HW_PROM_MODE_ENABLE	1
66
+#define VXGE_HW_PROM_MODE_DISABLE	0
67
+
68
+#define VXGE_HW_FW_UPGRADE_DISABLE	0
69
+#define VXGE_HW_FW_UPGRADE_ALL		1
70
+#define VXGE_HW_FW_UPGRADE_FORCE	2
71
+#define VXGE_HW_FUNC_MODE_DISABLE	0
72
+
73
+#define VXGE_TTI_BTIMER_VAL 250000
74
+#define VXGE_T1A_TTI_LTIMER_VAL 80
75
+#define VXGE_T1A_TTI_RTIMER_VAL 400
76
+
77
+#define VXGE_TTI_LTIMER_VAL 1000
78
+#define VXGE_TTI_RTIMER_VAL 0
79
+#define VXGE_RTI_BTIMER_VAL 250
80
+#define VXGE_RTI_LTIMER_VAL 100
81
+#define VXGE_RTI_RTIMER_VAL 0
82
+#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH
83
+#define VXGE_ISR_POLLING_CNT 	8
84
+#define VXGE_MAX_CONFIG_DEV	0xFF
85
+#define VXGE_EXEC_MODE_DISABLE	0
86
+#define VXGE_EXEC_MODE_ENABLE	1
87
+#define VXGE_MAX_CONFIG_PORT	1
88
+#define VXGE_ALL_VID_DISABLE	0
89
+#define VXGE_ALL_VID_ENABLE	1
90
+#define VXGE_PAUSE_CTRL_DISABLE	0
91
+#define VXGE_PAUSE_CTRL_ENABLE	1
92
+
93
+#define TTI_TX_URANGE_A	5
94
+#define TTI_TX_URANGE_B	15
95
+#define TTI_TX_URANGE_C	40
96
+#define TTI_TX_UFC_A	5
97
+#define TTI_TX_UFC_B	40
98
+#define TTI_TX_UFC_C	60
99
+#define TTI_TX_UFC_D	100
100
+#define TTI_T1A_TX_UFC_A	30
101
+#define TTI_T1A_TX_UFC_B	80
102
+
103
+/* Slope - (max_mtu - min_mtu)/(max_mtu_ufc - min_mtu_ufc) */
104
+/* Slope - 93 */
105
+/* 60 - 9k Mtu, 140 - 1.5k mtu */
106
+#define TTI_T1A_TX_UFC_C(mtu)	(60 + ((VXGE_HW_MAX_MTU - mtu)/93))
107
+
108
+/* Slope - 37 */
109
+/* 100 - 9k Mtu, 300 - 1.5k mtu */
110
+#define TTI_T1A_TX_UFC_D(mtu)	(100 + ((VXGE_HW_MAX_MTU - mtu)/37))
111
+
112
+#define RTI_RX_URANGE_A		5
113
+#define RTI_RX_URANGE_B		15
114
+#define RTI_RX_URANGE_C		40
115
+#define RTI_T1A_RX_URANGE_A	1
116
+#define RTI_T1A_RX_URANGE_B	20
117
+#define RTI_T1A_RX_URANGE_C	50
118
+#define RTI_RX_UFC_A		1
119
+#define RTI_RX_UFC_B		5
120
+#define RTI_RX_UFC_C		10
121
+#define RTI_RX_UFC_D		15
122
+#define RTI_T1A_RX_UFC_B	20
123
+#define RTI_T1A_RX_UFC_C	50
124
+#define RTI_T1A_RX_UFC_D	60
125
+
126
+/*
127
+ * The interrupt rate is maintained at 3k per second with the moderation
128
+ * parameters for most traffics but not all. This is the maximum interrupt
129
+ * count per allowed per function with INTA or per vector in the case of in a
130
+ * MSI-X 10 millisecond time period. Enabled only for Titan 1A.
131
+ */
132
+#define VXGE_T1A_MAX_INTERRUPT_COUNT 100
133
+
134
+#define VXGE_ENABLE_NAPI	1
135
+#define VXGE_DISABLE_NAPI	0
136
+#define VXGE_LRO_MAX_BYTES 0x4000
137
+#define VXGE_T1A_LRO_MAX_BYTES 0xC000
138
+
139
+#define VXGE_HW_MIN_VPATH_TX_BW_SUPPORT 0
140
+#define VXGE_HW_MAX_VPATH_TX_BW_SUPPORT 7
141
+
142
+/* Milli secs timer period */
143
+#define VXGE_TIMER_DELAY		10000
144
+
145
+#define VXGE_TIMER_COUNT    	(2 * 60)
146
+
147
+#define VXGE_LL_MAX_FRAME_SIZE(dev) ((dev)->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE)
148
+
149
+#define VXGE_REG_DUMP_BUFSIZE           65000
150
+
151
+#define is_mf(function_mode) \
152
+	((function_mode == VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) ||   \
153
+	(function_mode == VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17) || \
154
+	(function_mode == VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2) ||  \
155
+	(function_mode == VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_4))
156
+
157
+#define is_titan1(dev_id, rev) (((dev_id == PCI_DEVICE_ID_TITAN_UNI) || \
158
+				(dev_id == PCI_DEVICE_ID_TITAN_WIN)) && \
159
+				(rev == VXGE_HW_TITAN1_PCI_REVISION))
160
+
161
+/* These flags represent the devices temporary state */
162
+#define __VXGE_STATE_RESET_CARD 	0x01
163
+#define __VXGE_STATE_CARD_UP		0x02
164
+
165
+#define test_bit(bit, loc) 	((bit) & (loc))
166
+#define set_bit(bit, loc) 	do { (loc) |= (bit); } while (0);
167
+#define clear_bit(bit, loc) 	do { (loc) &= ~(bit); } while (0);
168
+
169
+#define msleep(n)       mdelay(n)
170
+
171
+struct vxge_fifo {
172
+	struct net_device	*ndev;
173
+	struct pci_device	*pdev;
174
+	struct __vxge_hw_fifo   *fifoh;
175
+};
176
+
177
+struct vxge_ring {
178
+	struct net_device	*ndev;
179
+	struct pci_device	*pdev;
180
+	struct __vxge_hw_ring	*ringh;
181
+};
182
+
183
+struct vxge_vpath {
184
+
185
+	struct vxge_fifo fifo;
186
+	struct vxge_ring ring;
187
+
188
+	/* Actual vpath id for this vpath in the device - 0 to 16 */
189
+	int device_id;
190
+	int is_open;
191
+	int vp_open;
192
+	u8 (macaddr)[ETH_ALEN];
193
+	u8 (macmask)[ETH_ALEN];
194
+	struct vxgedev *vdev;
195
+	struct __vxge_hw_virtualpath *vpathh;
196
+};
197
+
198
+struct vxgedev {
199
+	struct net_device	*ndev;
200
+	struct pci_device	*pdev;
201
+	struct __vxge_hw_device *devh;
202
+	u8			titan1;
203
+
204
+	unsigned long		state;
205
+
206
+	struct vxge_vpath 	vpath;
207
+
208
+	void __iomem 		*bar0;
209
+	int			mtu;
210
+
211
+	char			fw_version[VXGE_HW_FW_STRLEN];
212
+};
213
+
214
+static inline int is_zero_ether_addr(const u8 *addr)
215
+{
216
+	return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]);
217
+}
218
+
219
+static inline int is_multicast_ether_addr(const u8 *addr)
220
+{
221
+	return (0x01 & addr[0]);
222
+}
223
+
224
+/* checks the ethernet address @addr is a valid unicast */
225
+static inline int is_valid_ether_addr(const u8 *addr)
226
+{
227
+	return !is_multicast_ether_addr(addr) && !is_zero_ether_addr(addr);
228
+}
229
+
230
+void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id);
231
+
232
+void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id);
233
+
234
+int vxge_reset(struct vxgedev *vdev);
235
+
236
+enum vxge_hw_status
237
+vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw,
238
+	struct vxge_hw_fifo_txd *txdp, enum vxge_hw_fifo_tcode tcode);
239
+
240
+void vxge_close_vpaths(struct vxgedev *vdev);
241
+
242
+int vxge_open_vpaths(struct vxgedev *vdev);
243
+
244
+enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
245
+
246
+#endif

+ 4700
- 0
src/drivers/net/vxge/vxge_reg.h
File diff suppressed because it is too large
View File


+ 742
- 0
src/drivers/net/vxge/vxge_traffic.c View File

@@ -0,0 +1,742 @@
1
+/*
2
+ * vxge-traffic.c: gPXE driver for Neterion Inc's X3100 Series 10GbE
3
+ *              PCIe I/O Virtualized Server Adapter.
4
+ *
5
+ * Copyright(c) 2002-2010 Neterion Inc.
6
+ *
7
+ * This software may be used and distributed according to the terms of
8
+ * the GNU General Public License (GPL), incorporated herein by
9
+ * reference.  Drivers based on or derived from this code fall under
10
+ * the GPL and must retain the authorship, copyright and license
11
+ * notice.
12
+ *
13
+ */
14
+
15
+FILE_LICENCE(GPL2_ONLY);
16
+
17
+#include <gpxe/netdevice.h>
18
+#include <errno.h>
19
+
20
+#include "vxge_traffic.h"
21
+#include "vxge_config.h"
22
+#include "vxge_main.h"
23
+
24
+/*
25
+ * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
26
+ * @vpath: Virtual Path handle.
27
+ *
28
+ * Enable vpath interrupts. The function is to be executed the last in
29
+ * vpath initialization sequence.
30
+ *
31
+ * See also: vxge_hw_vpath_intr_disable()
32
+ */
33
+enum vxge_hw_status
34
+vxge_hw_vpath_intr_enable(struct __vxge_hw_virtualpath *vpath)
35
+{
36
+	u64 val64;
37
+	struct vxge_hw_vpath_reg *vp_reg;
38
+	enum vxge_hw_status status = VXGE_HW_OK;
39
+
40
+	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
41
+		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
42
+	goto exit;
43
+	}
44
+
45
+	vp_reg = vpath->vp_reg;
46
+
47
+	writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
48
+
49
+	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
50
+				&vp_reg->general_errors_reg);
51
+
52
+	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
53
+				&vp_reg->pci_config_errors_reg);
54
+
55
+	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
56
+				&vp_reg->mrpcim_to_vpath_alarm_reg);
57
+
58
+	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
59
+				&vp_reg->srpcim_to_vpath_alarm_reg);
60
+
61
+	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
62
+				&vp_reg->vpath_ppif_int_status);
63
+
64
+	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
65
+				&vp_reg->srpcim_msg_to_vpath_reg);
66
+
67
+	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
68
+				&vp_reg->vpath_pcipif_int_status);
69
+
70
+	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
71
+				&vp_reg->prc_alarm_reg);
72
+
73
+	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
74
+				&vp_reg->wrdma_alarm_status);
75
+
76
+	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
77
+				&vp_reg->asic_ntwk_vp_err_reg);
78
+
79
+	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
80
+				&vp_reg->xgmac_vp_int_status);
81
+
82
+	val64 = readq(&vp_reg->vpath_general_int_status);
83
+
84
+	/* Mask unwanted interrupts */
85
+	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
86
+				&vp_reg->vpath_pcipif_int_mask);
87
+
88
+	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
89
+				&vp_reg->srpcim_msg_to_vpath_mask);
90
+
91
+	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
92
+				&vp_reg->srpcim_to_vpath_alarm_mask);
93
+
94
+	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
95
+				&vp_reg->mrpcim_to_vpath_alarm_mask);
96
+
97
+	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
98
+				&vp_reg->pci_config_errors_mask);
99
+
100
+	/* Unmask the individual interrupts */
101
+	writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
102
+		VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
103
+		VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
104
+		VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
105
+		&vp_reg->general_errors_mask);
106
+
107
+	__vxge_hw_pio_mem_write32_upper(
108
+		(u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
109
+		VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
110
+		VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
111
+		VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
112
+		VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
113
+		VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
114
+		&vp_reg->kdfcctl_errors_mask);
115
+
116
+	__vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
117
+
118
+	__vxge_hw_pio_mem_write32_upper(
119
+		(u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
120
+		&vp_reg->prc_alarm_mask);
121
+
122
+	__vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
123
+	__vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
124
+
125
+	if (vpath->hldev->first_vp_id != vpath->vp_id)
126
+		__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
127
+				&vp_reg->asic_ntwk_vp_err_mask);
128
+	else
129
+		__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
130
+		VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT|
131
+			VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK),
132
+			0, 32), &vp_reg->asic_ntwk_vp_err_mask);
133
+
134
+	__vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_general_int_mask);
135
+exit:
136
+	return status;
137
+
138
+}
139
+
140
+/*
141
+ * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
142
+ * @vpath: Virtual Path handle.
143
+ *
144
+ * Disable vpath interrupts. The function is to be executed the last in
145
+ * vpath initialization sequence.
146
+ *
147
+ * See also: vxge_hw_vpath_intr_enable()
148
+ */
149
+enum vxge_hw_status
150
+vxge_hw_vpath_intr_disable(struct __vxge_hw_virtualpath *vpath)
151
+{
152
+	u64 val64;
153
+	enum vxge_hw_status status = VXGE_HW_OK;
154
+	struct vxge_hw_vpath_reg __iomem *vp_reg;
155
+
156
+	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
157
+		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
158
+		goto exit;
159
+	}
160
+	vp_reg = vpath->vp_reg;
161
+
162
+	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
163
+			&vp_reg->vpath_general_int_mask);
164
+
165
+	val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id));
166
+
167
+	writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
168
+
169
+	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
170
+			&vp_reg->general_errors_mask);
171
+
172
+	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
173
+			&vp_reg->pci_config_errors_mask);
174
+
175
+	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
176
+			&vp_reg->mrpcim_to_vpath_alarm_mask);
177
+
178
+	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
179
+			&vp_reg->srpcim_to_vpath_alarm_mask);
180
+
181
+	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
182
+			&vp_reg->vpath_ppif_int_mask);
183
+
184
+	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
185
+			&vp_reg->srpcim_msg_to_vpath_mask);
186
+
187
+	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
188
+			&vp_reg->vpath_pcipif_int_mask);
189
+
190
+	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
191
+			&vp_reg->wrdma_alarm_mask);
192
+
193
+	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
194
+			&vp_reg->prc_alarm_mask);
195
+
196
+	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
197
+			&vp_reg->xgmac_vp_int_mask);
198
+
199
+	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
200
+			&vp_reg->asic_ntwk_vp_err_mask);
201
+
202
+exit:
203
+	return status;
204
+}
205
+
206
+/**
207
+ * vxge_hw_device_mask_all - Mask all device interrupts.
208
+ * @hldev: HW device handle.
209
+ *
210
+ * Mask all device interrupts.
211
+ *
212
+ * See also: vxge_hw_device_unmask_all()
213
+ */
214
+void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
215
+{
216
+	u64 val64;
217
+
218
+	val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
219
+			VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
220
+
221
+	__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
222
+			&hldev->common_reg->titan_mask_all_int);
223
+
224
+	return;
225
+}
226
+
227
+/**
228
+ * vxge_hw_device_unmask_all - Unmask all device interrupts.
229
+ * @hldev: HW device handle.
230
+ *
231
+ * Unmask all device interrupts.
232
+ *
233
+ * See also: vxge_hw_device_mask_all()
234
+ */
235
+void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
236
+{
237
+	u64 val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
238
+
239
+	__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
240
+			&hldev->common_reg->titan_mask_all_int);
241
+
242
+	return;
243
+}
244
+
245
+/**
246
+ * vxge_hw_device_intr_enable - Enable interrupts.
247
+ * @hldev: HW device handle.
248
+ *
249
+ * Enable Titan interrupts. The function is to be executed the last in
250
+ * Titan initialization sequence.
251
+ *
252
+ * See also: vxge_hw_device_intr_disable()
253
+ */
254
+void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
255
+{
256
+	u64 val64;
257
+	u32 val32;
258
+
259
+	vxge_hw_device_mask_all(hldev);
260
+
261
+	vxge_hw_vpath_intr_enable(&hldev->virtual_path);
262
+
263
+	val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
264
+			hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
265
+
266
+	if (val64 != 0) {
267
+		writeq(val64, &hldev->common_reg->tim_int_status0);
268
+
269
+		writeq(~val64, &hldev->common_reg->tim_int_mask0);
270
+	}
271
+
272
+	val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
273
+			hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
274
+
275
+	if (val32 != 0) {
276
+		__vxge_hw_pio_mem_write32_upper(val32,
277
+				&hldev->common_reg->tim_int_status1);
278
+
279
+		__vxge_hw_pio_mem_write32_upper(~val32,
280
+				&hldev->common_reg->tim_int_mask1);
281
+	}
282
+
283
+	val64 = readq(&hldev->common_reg->titan_general_int_status);
284
+
285
+	/* We have not enabled the top level interrupt yet.
286
+	 * This will be controlled from vxge_irq() entry api.
287
+	 */
288
+	return;
289
+}
290
+
291
+/**
292
+ * vxge_hw_device_intr_disable - Disable Titan interrupts.
293
+ * @hldev: HW device handle.
294
+ *
295
+ * Disable Titan interrupts.
296
+ *
297
+ * See also: vxge_hw_device_intr_enable()
298
+ */
299
+void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
300
+{
301
+	vxge_hw_device_mask_all(hldev);
302
+
303
+	/* mask all the tim interrupts */
304
+	writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
305
+	__vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
306
+				&hldev->common_reg->tim_int_mask1);
307
+
308
+	vxge_hw_vpath_intr_disable(&hldev->virtual_path);
309
+
310
+	return;
311
+}
312
+
313
+/**
314
+ * vxge_hw_ring_rxd_post - Post descriptor on the ring.
315
+ * @ring: Handle to the ring object used for receive
316
+ * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
317
+ *
318
+ * Post	descriptor on the ring.
319
+ * Prior to posting the	descriptor should be filled in accordance with
320
+ * Host/Titan interface specification for a given service (LL, etc.).
321
+ */
322
+void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring __unused,
323
+				struct vxge_hw_ring_rxd_1 *rxdp)
324
+{
325
+	rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
326
+}
327
+
328
+/**
329
+ * __vxge_hw_non_offload_db_post - Post non offload doorbell
330
+ *
331
+ * @fifo: fifohandle
332
+ * @txdl_ptr: The starting location of the TxDL in host memory
333
+ * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
334
+ *
335
+ * This function posts a non-offload doorbell to doorbell FIFO
336
+ *
337
+ */
338
+static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
339
+	u64 txdl_ptr, u32 num_txds)
340
+{
341
+	writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
342
+		VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds),
343
+		&fifo->nofl_db->control_0);
344
+
345
+	wmb();
346
+
347
+	writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
348
+
349
+	wmb();
350
+}
351
+
352
+/**
353
+ * vxge_hw_fifo_free_txdl_get: fetch next available txd in the fifo
354
+ *
355
+ * @fifo: tx channel handle
356
+ */
357
+struct vxge_hw_fifo_txd *
358
+	vxge_hw_fifo_free_txdl_get(struct __vxge_hw_fifo *fifo)
359
+{
360
+	struct vxge_hw_fifo_txd *txdp;
361
+
362
+	txdp = fifo->txdl + fifo->sw_offset;
363
+	if (txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER) {
364
+		vxge_debug(VXGE_ERR, "%s:%d, error: txd(%d) owned by hw\n",
365
+				__func__, __LINE__, fifo->sw_offset);
366
+		return NULL;
367
+	}
368
+
369
+	return txdp;
370
+}
371
+/**
372
+ * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
373
+ * descriptor.
374
+ * @fifo: Handle to the fifo object used for non offload send
375
+ * @txdlh: Descriptor handle.
376
+ * @iob: data buffer.
377
+ */
378
+void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
379
+			struct vxge_hw_fifo_txd *txdp,
380
+			struct io_buffer *iob)
381
+{
382
+	txdp->control_0 = VXGE_HW_FIFO_TXD_GATHER_CODE(
383
+			VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST);
384
+	txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(iob_len(iob));
385
+
386
+	txdp->control_1 = VXGE_HW_FIFO_TXD_INT_NUMBER(fifo->tx_intr_num);
387
+	txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
388
+
389
+	txdp->host_control = (intptr_t)iob;
390
+	txdp->buffer_pointer = virt_to_bus(iob->data);
391
+}
392
+
393
+/**
394
+ * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
395
+ * @fifo: Handle to the fifo object used for non offload send
396
+ * @txdp: Tx Descriptor
397
+ *
398
+ * Post descriptor on the 'fifo' type channel for transmission.
399
+ * Prior to posting the descriptor should be filled in accordance with
400
+ * Host/Titan interface specification for a given service (LL, etc.).
401
+ *
402
+ */
403
+void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo,
404
+			struct vxge_hw_fifo_txd *txdp)
405
+{
406
+	txdp->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
407
+
408
+	__vxge_hw_non_offload_db_post(fifo, (u64) virt_to_bus(txdp), 0);
409
+
410
+	vxge_hw_fifo_txd_offset_up(&fifo->sw_offset);
411
+}
412
+
413
+/*
414
+ * __vxge_hw_vpath_alarm_process - Process Alarms.
415
+ * @vpath: Virtual Path.
416
+ * @skip_alarms: Do not clear the alarms
417
+ *
418
+ * Process vpath alarms.
419
+ *
420
+ */
421
+static enum vxge_hw_status __vxge_hw_vpath_alarm_process(
422
+			struct __vxge_hw_virtualpath *vpath)
423
+{
424
+	u64 val64;
425
+	u64 alarm_status;
426
+	enum vxge_hw_status status = VXGE_HW_OK;
427
+	struct __vxge_hw_device *hldev = NULL;
428
+	struct vxge_hw_vpath_reg *vp_reg;
429
+
430
+	hldev = vpath->hldev;
431
+	vp_reg = vpath->vp_reg;
432
+	alarm_status = readq(&vp_reg->vpath_general_int_status);
433
+
434
+	if (alarm_status == VXGE_HW_ALL_FOXES) {
435
+
436
+		vxge_debug(VXGE_ERR, "%s: %s:%d, slot freeze error\n",
437
+			hldev->ndev->name, __func__, __LINE__);
438
+		status = VXGE_HW_ERR_SLOT_FREEZE;
439
+		goto out;
440
+	}
441
+
442
+	if (alarm_status & ~(
443
+		VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
444
+		VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
445
+		VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
446
+		VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
447
+
448
+		vxge_debug(VXGE_ERR, "%s: %s:%d, Unknown vpath alarm\n",
449
+			hldev->ndev->name, __func__, __LINE__);
450
+		status = VXGE_HW_FAIL;
451
+		goto out;
452
+	}
453
+
454
+	if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
455
+
456
+		val64 = readq(&vp_reg->xgmac_vp_int_status);
457
+
458
+		if (val64 &
459
+		VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
460
+
461
+			val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
462
+
463
+			if (((val64 &
464
+				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
465
+			    (!(val64 &
466
+				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
467
+			    ((val64 &
468
+				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
469
+				&& (!(val64 &
470
+				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
471
+			))) {
472
+				writeq(VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
473
+					&vp_reg->asic_ntwk_vp_err_mask);
474
+
475
+				netdev_link_down(hldev->ndev);
476
+				vxge_debug(VXGE_INTR, "%s: %s:%d link down\n",
477
+					hldev->ndev->name, __func__, __LINE__);
478
+			}
479
+
480
+			if (((val64 &
481
+				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
482
+			    (!(val64 &
483
+				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
484
+			    ((val64 &
485
+				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
486
+				&& (!(val64 &
487
+				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
488
+			))) {
489
+				writeq(VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
490
+					&vp_reg->asic_ntwk_vp_err_mask);
491
+
492
+				netdev_link_up(hldev->ndev);
493
+				vxge_debug(VXGE_INTR, "%s: %s:%d link up\n",
494
+					hldev->ndev->name, __func__, __LINE__);
495
+			}
496
+
497
+			writeq(VXGE_HW_INTR_MASK_ALL,
498
+				&vp_reg->asic_ntwk_vp_err_reg);
499
+		}
500
+	} else {
501
+		vxge_debug(VXGE_INFO, "%s: %s:%d unhandled alarm %llx\n",
502
+				hldev->ndev->name, __func__, __LINE__,
503
+				alarm_status);
504
+	}
505
+out:
506
+	return status;
507
+}
508
+
509
+/**
510
+ * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
511
+ * condition that has caused the Tx and RX interrupt.
512
+ * @hldev: HW device.
513
+ *
514
+ * Acknowledge (that is, clear) the condition that has caused
515
+ * the Tx and Rx interrupt.
516
+ * See also: vxge_hw_device_begin_irq(),
517
+ * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
518
+ */
519
+void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
520
+{
521
+
522
+	if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
523
+			(hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
524
+		writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
525
+			hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
526
+			&hldev->common_reg->tim_int_status0);
527
+	}
528
+
529
+	if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
530
+			(hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
531
+		__vxge_hw_pio_mem_write32_upper(
532
+			(hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
533
+			hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
534
+			&hldev->common_reg->tim_int_status1);
535
+	}
536
+
537
+	return;
538
+}
539
+
540
+
541
+/**
542
+ * vxge_hw_device_begin_irq - Begin IRQ processing.
543
+ * @hldev: HW device handle.
544
+ *
545
+ * The function performs two actions, It first checks whether (shared IRQ) the
546
+ * interrupt was raised by the device. Next, it masks the device interrupts.
547
+ *
548
+ * Note:
549
+ * vxge_hw_device_begin_irq() does not flush MMIO writes through the
550
+ * bridge. Therefore, two back-to-back interrupts are potentially possible.
551
+ *
552
+ * Returns: 0, if the interrupt is not "ours" (note that in this case the
553
+ * device remain enabled).
554
+ * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
555
+ * status.
556
+ */
557
+enum vxge_hw_status
558
+vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev)
559
+{
560
+	u64 val64;
561
+	u64 adapter_status;
562
+	u64 vpath_mask;
563
+	enum vxge_hw_status ret = VXGE_HW_OK;
564
+
565
+	val64 = readq(&hldev->common_reg->titan_general_int_status);
566
+
567
+	if (!val64) {
568
+		ret = VXGE_HW_ERR_WRONG_IRQ;
569
+		goto exit;
570
+	}
571
+
572
+	if (val64 == VXGE_HW_ALL_FOXES) {
573
+
574
+		adapter_status = readq(&hldev->common_reg->adapter_status);
575
+
576
+		if (adapter_status == VXGE_HW_ALL_FOXES) {
577
+
578
+			vxge_debug(VXGE_ERR, "%s: %s:%d critical error "
579
+				"occurred\n", hldev->ndev->name,
580
+				__func__, __LINE__);
581
+			ret = VXGE_HW_ERR_SLOT_FREEZE;
582
+			goto exit;
583
+		}
584
+	}
585
+
586
+	vpath_mask = hldev->vpaths_deployed >>
587
+				(64 - VXGE_HW_MAX_VIRTUAL_PATHS);
588
+	if (val64 & VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(
589
+				vpath_mask))
590
+		vxge_hw_device_clear_tx_rx(hldev);
591
+
592
+	if (val64 & VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)
593
+		ret = __vxge_hw_vpath_alarm_process(&hldev->virtual_path);
594
+
595
+exit:
596
+	return ret;
597
+}
598
+
599
+/**
600
+ * vxge_hw_vpath_doorbell_rx - Indicates to hw the qwords of receive
601
+ * descriptors posted.
602
+ * @ring: Handle to the ring object used for receive
603
+ *
604
+ * The function writes the number of qwords of rxds posted during replishment.
605
+ * Since the function is called frequently, a flush is not required to post the
606
+ * write transaction. At the very least, the previous write will be flushed
607
+ * once the subsequent write is made.
608
+ *
609
+ * Returns: None.
610
+ */
611
+void vxge_hw_vpath_doorbell_rx(struct __vxge_hw_ring *ring)
612
+{
613
+	u32 rxds_qw_per_block = VXGE_HW_MAX_RXDS_PER_BLOCK_1 *
614
+		VXGE_HW_RING_RXD_QWORDS_MODE_1;
615
+
616
+	ring->doorbell_cnt += VXGE_HW_RING_RXD_QWORDS_MODE_1;
617
+
618
+	ring->total_db_cnt += VXGE_HW_RING_RXD_QWORDS_MODE_1;
619
+
620
+	if (ring->total_db_cnt >= rxds_qw_per_block) {
621
+		/* For each block add 4 more qwords */
622
+		ring->doorbell_cnt += VXGE_HW_RING_RXD_QWORDS_MODE_1;
623
+
624
+		/* Reset total count */
625
+		ring->total_db_cnt -= rxds_qw_per_block;
626
+	}
627
+
628
+	if (ring->doorbell_cnt >= ring->rxd_qword_limit) {
629
+		wmb();
630
+		writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(
631
+			ring->doorbell_cnt),
632
+			&ring->vp_reg->prc_rxd_doorbell);
633
+		ring->doorbell_cnt = 0;
634
+	}
635
+}
636
+
637
+/**
638
+ * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
639
+ * descriptors and process the same.
640
+ * @ring: Handle to the ring object used for receive
641
+ *
642
+ * The function	polls the Rx for the completed	descriptors.
643
+ */
644
+#define ETH_FCS_LEN	4
645
+enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
646
+{
647
+	struct __vxge_hw_device *hldev;
648
+	enum vxge_hw_status status = VXGE_HW_OK;
649
+	struct vxge_hw_ring_rxd_1 *rxd;
650
+	unsigned int len;
651
+	enum vxge_hw_ring_tcode tcode;
652
+	struct io_buffer *rx_iob, *iobuf = NULL;
653
+	u16 poll_count = 0;
654
+
655
+	hldev = ring->vpathh->hldev;
656
+
657
+	do {
658
+		rxd = &ring->rxdl->rxd[ring->rxd_offset];
659
+		tcode = VXGE_HW_RING_RXD_T_CODE_GET(rxd->control_0);
660
+
661
+		/* if tcode is VXGE_HW_RING_T_CODE_FRM_DROP, it is
662
+		 * possible the ownership bit still set to adapter
663
+		 */
664
+		if ((rxd->control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER)
665
+			&& (tcode == VXGE_HW_RING_T_CODE_OK)) {
666
+
667
+			status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
668
+			goto err0;
669
+		}
670
+
671
+		vxge_debug(VXGE_INFO, "%s: rx frame received at offset %d\n",
672
+			hldev->ndev->name, ring->rxd_offset);
673
+
674
+		if (tcode != VXGE_HW_RING_T_CODE_OK) {
675
+			netdev_rx_err(hldev->ndev, NULL, -EINVAL);
676
+			vxge_debug(VXGE_ERR, "%s:%d, rx error tcode %d\n",
677
+				__func__, __LINE__, tcode);
678
+			status = VXGE_HW_FAIL;
679
+			goto err1;
680
+		}
681
+
682
+		iobuf = (struct io_buffer *)(intptr_t)rxd->host_control;
683
+
684
+		len = VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(rxd->control_1);
685
+		len -= ETH_FCS_LEN;
686
+
687
+		rx_iob = alloc_iob(len);
688
+		if (!rx_iob) {
689
+			netdev_rx_err(hldev->ndev, NULL, -ENOMEM);
690
+			vxge_debug(VXGE_ERR, "%s:%d, alloc_iob error\n",
691
+				__func__, __LINE__);
692
+			status = VXGE_HW_ERR_OUT_OF_MEMORY;
693
+			goto err1;
694
+		}
695
+
696
+		memcpy(iob_put(rx_iob, len), iobuf->data, len);
697
+		/* Add this packet to the receive queue. */
698
+		netdev_rx(hldev->ndev, rx_iob);
699
+
700
+err1:
701
+		/* repost the rxd */
702
+		rxd->control_0 = rxd->control_1 = 0;
703
+		vxge_hw_ring_rxd_1b_set(rxd, iobuf,
704
+				VXGE_LL_MAX_FRAME_SIZE(hldev->vdev));
705
+		vxge_hw_ring_rxd_post(ring, rxd);
706
+
707
+		/* repost the qword count for doorbell */
708
+		vxge_hw_vpath_doorbell_rx(ring);
709
+
710
+		/* increment the descriptor offset */
711
+		vxge_hw_ring_rxd_offset_up(&ring->rxd_offset);
712
+
713
+	} while (++poll_count < ring->rx_poll_weight);
714
+err0:
715
+	return status;
716
+}
717
+
718
+/**
719
+ * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process
720
+ * the same.
721
+ * @fifo: Handle to the fifo object used for non offload send
722
+ *
723
+ * The function	polls the Tx for the completed	descriptors and	calls
724
+ * the driver via supplied completion callback.
725
+ */
726
+enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo)
727
+{
728
+	enum vxge_hw_status status = VXGE_HW_OK;
729
+	struct vxge_hw_fifo_txd *txdp;
730
+
731
+	txdp = fifo->txdl + fifo->hw_offset;
732
+	if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)
733
+		&& (txdp->host_control)) {
734
+
735
+		vxge_xmit_compl(fifo, txdp,
736
+			VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0));
737
+
738
+		vxge_hw_fifo_txd_offset_up(&fifo->hw_offset);
739
+	}
740
+
741
+	return status;
742
+}

+ 309
- 0
src/drivers/net/vxge/vxge_traffic.h View File

@@ -0,0 +1,309 @@
1
+/*
2
+ * vxge-traffic.h: gPXE driver for Neterion Inc's X3100 Series 10GbE
3
+ *              PCIe I/O Virtualized Server Adapter.
4
+ *
5
+ * Copyright(c) 2002-2010 Neterion Inc.
6
+ *
7
+ * This software may be used and distributed according to the terms of
8
+ * the GNU General Public License (GPL), incorporated herein by
9
+ * reference.  Drivers based on or derived from this code fall under
10
+ * the GPL and must retain the authorship, copyright and license
11
+ * notice.
12
+ *
13
+ */
14
+
15
+FILE_LICENCE(GPL2_ONLY);
16
+
17
+#ifndef VXGE_TRAFFIC_H
18
+#define VXGE_TRAFFIC_H
19
+
20
+#include <stdint.h>
21
+#include <gpxe/if_ether.h>
22
+#include <gpxe/iobuf.h>
23
+
24
+#include "vxge_reg.h"
25
+#include "vxge_version.h"
26
+
27
+#define VXGE_HW_DTR_MAX_T_CODE		16
28
+#define VXGE_HW_ALL_FOXES		0xFFFFFFFFFFFFFFFFULL
29
+#define VXGE_HW_INTR_MASK_ALL		0xFFFFFFFFFFFFFFFFULL
30
+#define	VXGE_HW_MAX_VIRTUAL_PATHS	17
31
+
32
+#define VXGE_HW_MAX_VIRTUAL_FUNCTIONS	8
33
+
34
+#define VXGE_HW_MAC_MAX_MAC_PORT_ID	3
35
+
36
+#define VXGE_HW_DEFAULT_32		0xffffffff
37
+/* frames sizes */
38
+#define VXGE_HW_HEADER_802_2_SIZE	3
39
+#define VXGE_HW_HEADER_SNAP_SIZE	5
40
+#define VXGE_HW_HEADER_VLAN_SIZE	4
41
+#define VXGE_HW_MAC_HEADER_MAX_SIZE \
42
+			(ETH_HLEN + \
43
+			VXGE_HW_HEADER_802_2_SIZE + \
44
+			VXGE_HW_HEADER_VLAN_SIZE + \
45
+			VXGE_HW_HEADER_SNAP_SIZE)
46
+
47
+/* 32bit alignments */
48
+
49
+/* A receive data corruption can occur resulting in either a single-bit or
50
+double-bit ECC error being flagged in the ASIC if the starting offset of a
51
+buffer in single buffer mode is 0x2 to 0xa. The single bit ECC error will not
52
+lock up the card but can hide the data corruption while the double-bit ECC
53
+error will lock up the card. Limiting the starting offset of the buffers to
54
+0x0, 0x1 or to a value greater than 0xF will workaround this issue.
55
+VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN of 2 causes the starting offset of
56
+buffer to be 0x2, 0x12 and so on, to have the start of the ip header dword
57
+aligned. The start of buffer of 0x2 will cause this problem to occur. To
58
+avoid this problem in all cases, add 0x10 to 0x2, to ensure that the start of
59
+buffer is outside of the problem causing offsets.
60
+*/
61
+#define VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN		0x12
62
+#define VXGE_HW_HEADER_802_2_SNAP_ALIGN			2
63
+#define VXGE_HW_HEADER_802_2_ALIGN			3
64
+#define VXGE_HW_HEADER_SNAP_ALIGN			1
65
+
66
+#define VXGE_HW_L3_CKSUM_OK				0xFFFF
67
+#define VXGE_HW_L4_CKSUM_OK				0xFFFF
68
+
69
+/* Forward declarations */
70
+struct __vxge_hw_device;
71
+struct __vxge_hw_virtualpath;
72
+struct __vxge_hw_fifo;
73
+struct __vxge_hw_ring;
74
+struct vxge_hw_ring_rxd_1;
75
+struct vxge_hw_fifo_txd;
76
+
77
+#ifndef TRUE
78
+#define TRUE 1
79
+#endif
80
+
81
+#ifndef FALSE
82
+#define FALSE 0
83
+#endif
84
+
85
+/*VXGE_HW_STATUS_H*/
86
+#define VXGE_HW_EVENT_BASE                      0
87
+#define VXGE_LL_EVENT_BASE                      100
88
+
89
+/**
90
+ * enum vxge_hw_event- Enumerates slow-path HW events.
91
+ * @VXGE_HW_EVENT_UNKNOWN: Unknown (and invalid) event.
92
+ * @VXGE_HW_EVENT_SERR: Serious vpath hardware error event.
93
+ * @VXGE_HW_EVENT_ECCERR: vpath ECC error event.
94
+ * @VXGE_HW_EVENT_VPATH_ERR: Error local to the respective vpath
95
+ * @VXGE_HW_EVENT_FIFO_ERR: FIFO Doorbell fifo error.
96
+ * @VXGE_HW_EVENT_SRPCIM_SERR: srpcim hardware error event.
97
+ * @VXGE_HW_EVENT_MRPCIM_SERR: mrpcim hardware error event.
98
+ * @VXGE_HW_EVENT_MRPCIM_ECCERR: mrpcim ecc error event.
99
+ * @VXGE_HW_EVENT_RESET_START: Privileged entity is starting device reset
100
+ * @VXGE_HW_EVENT_RESET_COMPLETE: Device reset has been completed
101
+ * @VXGE_HW_EVENT_SLOT_FREEZE: Slot-freeze event. Driver tries to distinguish
102
+ * slot-freeze from the rest critical events (e.g. ECC) when it is
103
+ * impossible to PIO read "through" the bus, i.e. when getting all-foxes.
104
+ *
105
+ * enum vxge_hw_event enumerates slow-path HW eventis.
106
+ *
107
+ * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_up_f{},
108
+ * vxge_uld_link_down_f{}.
109
+ */
110
+enum vxge_hw_event {
111
+	VXGE_HW_EVENT_UNKNOWN           = 0,
112
+	/* HW events */
113
+	VXGE_HW_EVENT_RESET_START       = VXGE_HW_EVENT_BASE + 1,
114
+	VXGE_HW_EVENT_RESET_COMPLETE    = VXGE_HW_EVENT_BASE + 2,
115
+	VXGE_HW_EVENT_LINK_DOWN         = VXGE_HW_EVENT_BASE + 3,
116
+	VXGE_HW_EVENT_LINK_UP           = VXGE_HW_EVENT_BASE + 4,
117
+	VXGE_HW_EVENT_ALARM_CLEARED     = VXGE_HW_EVENT_BASE + 5,
118
+	VXGE_HW_EVENT_ECCERR            = VXGE_HW_EVENT_BASE + 6,
119
+	VXGE_HW_EVENT_MRPCIM_ECCERR     = VXGE_HW_EVENT_BASE + 7,
120
+	VXGE_HW_EVENT_FIFO_ERR          = VXGE_HW_EVENT_BASE + 8,
121
+	VXGE_HW_EVENT_VPATH_ERR         = VXGE_HW_EVENT_BASE + 9,
122
+	VXGE_HW_EVENT_CRITICAL_ERR      = VXGE_HW_EVENT_BASE + 10,
123
+	VXGE_HW_EVENT_SERR              = VXGE_HW_EVENT_BASE + 11,
124
+	VXGE_HW_EVENT_SRPCIM_SERR       = VXGE_HW_EVENT_BASE + 12,
125
+	VXGE_HW_EVENT_MRPCIM_SERR       = VXGE_HW_EVENT_BASE + 13,
126
+	VXGE_HW_EVENT_SLOT_FREEZE       = VXGE_HW_EVENT_BASE + 14,
127
+};
128
+
129
+#define VXGE_HW_MAX_INTR_PER_VP        4
130
+#define VXGE_HW_VPATH_INTR_TX          0
131
+#define VXGE_HW_VPATH_INTR_RX          1
132
+#define VXGE_HW_VPATH_INTR_EINTA       2
133
+#define VXGE_HW_VPATH_INTR_BMAP        3
134
+
135
+#define VXGE_HW_BLOCK_SIZE             4096
136
+
137
+#define VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL         17
138
+#define VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL         18
139
+#define VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_RX_AVE_NET_UTIL  19
140
+#define VXGE_HW_TIM_UTIL_SEL_PER_VPATH                  63
141
+
142
+/**
143
+ * enum vxge_hw_ring_tcode - Transfer codes returned by adapter
144
+ * @VXGE_HW_RING_T_CODE_OK: Transfer ok.
145
+ * @VXGE_HW_RING_T_CODE_L3_CKSUM_MISMATCH: Layer 3 checksum presentation
146
+ *		configuration mismatch.
147
+ * @VXGE_HW_RING_T_CODE_L4_CKSUM_MISMATCH: Layer 4 checksum presentation
148
+ *		configuration mismatch.
149
+ * @VXGE_HW_RING_T_CODE_L3_L4_CKSUM_MISMATCH: Layer 3 and Layer 4 checksum
150
+ *		presentation configuration mismatch.
151
+ * @VXGE_HW_RING_T_CODE_L3_PKT_ERR: Layer 3 error unparseable packet,
152
+ *		such as unknown IPv6 header.
153
+ * @VXGE_HW_RING_T_CODE_L2_FRM_ERR: Layer 2 error frame integrity
154
+ *		error, such as FCS or ECC).
155
+ * @VXGE_HW_RING_T_CODE_BUF_SIZE_ERR: Buffer size error the RxD buffer(
156
+ *		s) were not appropriately sized and data loss occurred.
157
+ * @VXGE_HW_RING_T_CODE_INT_ECC_ERR: Internal ECC error RxD corrupted.
158
+ * @VXGE_HW_RING_T_CODE_BENIGN_OVFLOW: Benign overflow the contents of
159
+ *		Segment1 exceeded the capacity of Buffer1 and the remainder
160
+ *		was placed in Buffer2. Segment2 now starts in Buffer3.
161
+ *		No data loss or errors occurred.
162
+ * @VXGE_HW_RING_T_CODE_ZERO_LEN_BUFF: Buffer size 0 one of the RxDs
163
+ *		assigned buffers has a size of 0 bytes.
164
+ * @VXGE_HW_RING_T_CODE_FRM_DROP: Frame dropped either due to
165
+ *		VPath Reset or because of a VPIN mismatch.
166
+ * @VXGE_HW_RING_T_CODE_UNUSED: Unused
167
+ * @VXGE_HW_RING_T_CODE_MULTI_ERR: Multiple errors more than one
168
+ *		transfer code condition occurred.
169
+ *
170
+ * Transfer codes returned by adapter.
171
+ */
172
+enum vxge_hw_ring_tcode {
173
+	VXGE_HW_RING_T_CODE_OK				= 0x0,
174
+	VXGE_HW_RING_T_CODE_L3_CKSUM_MISMATCH		= 0x1,
175
+	VXGE_HW_RING_T_CODE_L4_CKSUM_MISMATCH		= 0x2,
176
+	VXGE_HW_RING_T_CODE_L3_L4_CKSUM_MISMATCH	= 0x3,
177
+	VXGE_HW_RING_T_CODE_L3_PKT_ERR			= 0x5,
178
+	VXGE_HW_RING_T_CODE_L2_FRM_ERR			= 0x6,
179
+	VXGE_HW_RING_T_CODE_BUF_SIZE_ERR		= 0x7,
180
+	VXGE_HW_RING_T_CODE_INT_ECC_ERR			= 0x8,
181
+	VXGE_HW_RING_T_CODE_BENIGN_OVFLOW		= 0x9,
182
+	VXGE_HW_RING_T_CODE_ZERO_LEN_BUFF		= 0xA,
183
+	VXGE_HW_RING_T_CODE_FRM_DROP			= 0xC,
184
+	VXGE_HW_RING_T_CODE_UNUSED			= 0xE,
185
+	VXGE_HW_RING_T_CODE_MULTI_ERR			= 0xF
186
+};
187
+
188
+
189
+/**
190
+ * enum enum vxge_hw_fifo_gather_code - Gather codes used in fifo TxD
191
+ * @VXGE_HW_FIFO_GATHER_CODE_FIRST: First TxDL
192
+ * @VXGE_HW_FIFO_GATHER_CODE_MIDDLE: Middle TxDL
193
+ * @VXGE_HW_FIFO_GATHER_CODE_LAST: Last TxDL
194
+ * @VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST: First and Last TxDL.
195
+ *
196
+ * These gather codes are used to indicate the position of a TxD in a TxD list
197
+ */
198
+enum vxge_hw_fifo_gather_code {
199
+	VXGE_HW_FIFO_GATHER_CODE_FIRST		= 0x2,
200
+	VXGE_HW_FIFO_GATHER_CODE_MIDDLE		= 0x0,
201
+	VXGE_HW_FIFO_GATHER_CODE_LAST		= 0x1,
202
+	VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST	= 0x3
203
+};
204
+
205
+/**
206
+ * enum enum vxge_hw_fifo_tcode - tcodes used in fifo
207
+ * @VXGE_HW_FIFO_T_CODE_OK: Transfer OK
208
+ * @VXGE_HW_FIFO_T_CODE_PCI_READ_CORRUPT: PCI read transaction (either TxD or
209
+ *             frame data) returned with corrupt data.
210
+ * @VXGE_HW_FIFO_T_CODE_PCI_READ_FAIL:PCI read transaction was returned
211
+ *             with no data.
212
+ * @VXGE_HW_FIFO_T_CODE_INVALID_MSS: The host attempted to send either a
213
+ *             frame or LSO MSS that was too long (>9800B).
214
+ * @VXGE_HW_FIFO_T_CODE_LSO_ERROR: Error detected during TCP/UDP Large Send
215
+	*	       Offload operation, due to improper header template,
216
+	*	       unsupported protocol, etc.
217
+ * @VXGE_HW_FIFO_T_CODE_UNUSED: Unused
218
+ * @VXGE_HW_FIFO_T_CODE_MULTI_ERROR: Set to 1 by the adapter if multiple
219
+ *             data buffer transfer errors are encountered (see below).
220
+ *             Otherwise it is set to 0.
221
+ *
222
+ * These tcodes are returned in various API for TxD status
223
+ */
224
+enum vxge_hw_fifo_tcode {
225
+	VXGE_HW_FIFO_T_CODE_OK			= 0x0,
226
+	VXGE_HW_FIFO_T_CODE_PCI_READ_CORRUPT	= 0x1,
227
+	VXGE_HW_FIFO_T_CODE_PCI_READ_FAIL	= 0x2,
228
+	VXGE_HW_FIFO_T_CODE_INVALID_MSS		= 0x3,
229
+	VXGE_HW_FIFO_T_CODE_LSO_ERROR		= 0x4,
230
+	VXGE_HW_FIFO_T_CODE_UNUSED		= 0x7,
231
+	VXGE_HW_FIFO_T_CODE_MULTI_ERROR		= 0x8
232
+};
233
+
234
+enum vxge_hw_status
235
+vxge_hw_ring_replenish(struct __vxge_hw_ring *ring);
236
+
237
+void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring_handle,
238
+		struct vxge_hw_ring_rxd_1 *rxdp);
239
+
240
+void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
241
+		struct vxge_hw_fifo_txd *txdp,
242
+		struct io_buffer *iob);
243
+
244
+void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo,
245
+		struct vxge_hw_fifo_txd *txdp);
246
+
247
+enum vxge_hw_status __vxge_hw_ring_create(
248
+	struct __vxge_hw_virtualpath *vpath,
249
+	struct __vxge_hw_ring *ring);
250
+
251
+enum vxge_hw_status __vxge_hw_ring_delete(
252
+	struct __vxge_hw_ring *ringh);
253
+
254
+enum vxge_hw_status __vxge_hw_fifo_create(
255
+	struct __vxge_hw_virtualpath *vpath,
256
+	struct __vxge_hw_fifo *fifo);
257
+
258
+enum vxge_hw_status
259
+__vxge_hw_fifo_delete(struct __vxge_hw_fifo *fifo);
260
+
261
+enum vxge_hw_status __vxge_hw_vpath_reset(
262
+	struct __vxge_hw_device *devh, u32 vp_id);
263
+
264
+enum vxge_hw_status
265
+__vxge_hw_vpath_enable(struct __vxge_hw_device *devh, u32 vp_id);
266
+
267
+void
268
+__vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev);
269
+
270
+enum vxge_hw_status
271
+__vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *devh, u32 vp_id);
272
+
273
+enum vxge_hw_status
274
+__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *devh);
275
+
276
+enum vxge_hw_status
277
+__vxge_hw_vpath_tim_configure(struct __vxge_hw_device *devh, u32 vp_id);
278
+
279
+enum vxge_hw_status
280
+__vxge_hw_vpath_initialize(struct __vxge_hw_device *devh, u32 vp_id);
281
+
282
+enum vxge_hw_status __vxge_hw_vp_initialize(
283
+	struct __vxge_hw_device *hldev, u32 vp_id,
284
+	struct __vxge_hw_virtualpath *vpath);
285
+
286
+void __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev,
287
+			struct __vxge_hw_virtualpath *vpath);
288
+
289
+enum vxge_hw_status
290
+vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev);
291
+
292
+void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev);
293
+
294
+void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev);
295
+
296
+void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev);
297
+
298
+void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev);
299
+
300
+void vxge_hw_vpath_doorbell_rx(struct __vxge_hw_ring *ringh);
301
+
302
+enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ringh);
303
+
304
+enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo);
305
+
306
+struct vxge_hw_fifo_txd *
307
+vxge_hw_fifo_free_txdl_get(struct __vxge_hw_fifo *fifo);
308
+
309
+#endif

+ 40
- 0
src/drivers/net/vxge/vxge_version.h View File

@@ -0,0 +1,40 @@
1
+/*
2
+ * vxge-version.h: gPXE driver for Neterion Inc's X3100 Series 10GbE
3
+ *              PCIe I/O Virtualized Server Adapter.
4
+ *
5
+ * Copyright(c) 2002-2010 Neterion Inc.
6
+ *
7
+ * This software may be used and distributed according to the terms of
8
+ * the GNU General Public License (GPL), incorporated herein by
9
+ * reference.  Drivers based on or derived from this code fall under
10
+ * the GPL and must retain the authorship, copyright and license
11
+ * notice.
12
+ *
13
+ */
14
+
15
+FILE_LICENCE(GPL2_ONLY);
16
+
17
+#ifndef VXGE_VERSION_H
18
+
19
+#define VXGE_VERSION_H
20
+
21
+/* gpxe vxge driver version fields.
22
+ * Note: Each field must be a nibble size
23
+ */
24
+#define VXGE_VERSION_MAJOR	3
25
+#define VXGE_VERSION_MINOR	1
26
+#define VXGE_VERSION_FIX	0
27
+#define VXGE_VERSION_BUILD	0
28
+
29
+#define VXGE_FW_VER(major, minor, build) \
30
+	(((major) << 16) + ((minor) << 8) + (build))
31
+
32
+/* Certified FW version. */
33
+#define VXGE_CERT_FW_VER_MAJOR	1
34
+#define VXGE_CERT_FW_VER_MINOR	6
35
+#define VXGE_CERT_FW_VER_BUILD	0
36
+
37
+#define VXGE_CERT_FW_VER 	VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, 	\
38
+				VXGE_CERT_FW_VER_MINOR,	VXGE_CERT_FW_VER_BUILD)
39
+
40
+#endif

+ 3
- 0
src/include/gpxe/errfile.h View File

@@ -117,6 +117,9 @@ FILE_LICENCE ( GPL2_OR_LATER );
117 117
 #define ERRFILE_sis190		     ( ERRFILE_DRIVER | 0x00520000 )
118 118
 #define ERRFILE_myri10ge	     ( ERRFILE_DRIVER | 0x00530000 )
119 119
 #define ERRFILE_skge		     ( ERRFILE_DRIVER | 0x00540000 )
120
+#define ERRFILE_vxge_main	     ( ERRFILE_DRIVER | 0x00550000 )
121
+#define ERRFILE_vxge_config	     ( ERRFILE_DRIVER | 0x00560000 )
122
+#define ERRFILE_vxge_traffic	     ( ERRFILE_DRIVER | 0x00570000 )
120 123
 
121 124
 #define ERRFILE_scsi		     ( ERRFILE_DRIVER | 0x00700000 )
122 125
 #define ERRFILE_arbel		     ( ERRFILE_DRIVER | 0x00710000 )

Loading…
Cancel
Save