Nevar pievienot vairāk kā 25 tēmas Tēmai ir jāsākas ar burtu vai ciparu, tā var saturēt domu zīmes ('-') un var būt līdz 35 simboliem gara.

vxge_config.h 23KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783
  1. /*
  2. * vxge-config.h: iPXE driver for Neterion Inc's X3100 Series 10GbE
  3. * PCIe I/O Virtualized Server Adapter.
  4. *
  5. * Copyright(c) 2002-2010 Neterion Inc.
  6. *
  7. * This software may be used and distributed according to the terms of
  8. * the GNU General Public License (GPL), incorporated herein by
  9. * reference. Drivers based on or derived from this code fall under
  10. * the GPL and must retain the authorship, copyright and license
  11. * notice.
  12. *
  13. */
  14. FILE_LICENCE(GPL2_ONLY);
  15. #ifndef VXGE_CONFIG_H
  16. #define VXGE_CONFIG_H
  17. #include <stdint.h>
  18. #include <ipxe/list.h>
  19. #include <ipxe/pci.h>
  20. #ifndef VXGE_CACHE_LINE_SIZE
  21. #define VXGE_CACHE_LINE_SIZE 4096
  22. #endif
  23. #define WAIT_FACTOR 1
  24. #ifndef ARRAY_SIZE
  25. #define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
  26. #endif
  27. #define VXGE_HW_MAC_MAX_WIRE_PORTS 2
  28. #define VXGE_HW_MAC_MAX_AGGR_PORTS 2
  29. #define VXGE_HW_MAC_MAX_PORTS 3
  30. #define VXGE_HW_MIN_MTU 68
  31. #define VXGE_HW_MAX_MTU 9600
  32. #define VXGE_HW_DEFAULT_MTU 1500
  33. #ifndef __iomem
  34. #define __iomem
  35. #endif
  36. #ifndef ____cacheline_aligned
  37. #define ____cacheline_aligned
  38. #endif
  39. /**
  40. * debug filtering masks
  41. */
  42. #define VXGE_NONE 0x00
  43. #define VXGE_INFO 0x01
  44. #define VXGE_INTR 0x02
  45. #define VXGE_XMIT 0x04
  46. #define VXGE_POLL 0x08
  47. #define VXGE_ERR 0x10
  48. #define VXGE_TRACE 0x20
  49. #define VXGE_ALL (VXGE_INFO|VXGE_INTR|VXGE_XMIT\
  50. |VXGE_POLL|VXGE_ERR|VXGE_TRACE)
  51. #define NULL_VPID 0xFFFFFFFF
  52. #define VXGE_HW_EVENT_BASE 0
  53. #define VXGE_LL_EVENT_BASE 100
  54. #define VXGE_HW_BASE_INF 100
  55. #define VXGE_HW_BASE_ERR 200
  56. #define VXGE_HW_BASE_BADCFG 300
  57. #define VXGE_HW_DEF_DEVICE_POLL_MILLIS 1000
  58. #define VXGE_HW_MAX_PAYLOAD_SIZE_512 2
  59. enum vxge_hw_status {
  60. VXGE_HW_OK = 0,
  61. VXGE_HW_FAIL = 1,
  62. VXGE_HW_PENDING = 2,
  63. VXGE_HW_COMPLETIONS_REMAIN = 3,
  64. VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS = VXGE_HW_BASE_INF + 1,
  65. VXGE_HW_INF_OUT_OF_DESCRIPTORS = VXGE_HW_BASE_INF + 2,
  66. VXGE_HW_INF_SW_LRO_BEGIN = VXGE_HW_BASE_INF + 3,
  67. VXGE_HW_INF_SW_LRO_CONT = VXGE_HW_BASE_INF + 4,
  68. VXGE_HW_INF_SW_LRO_UNCAPABLE = VXGE_HW_BASE_INF + 5,
  69. VXGE_HW_INF_SW_LRO_FLUSH_SESSION = VXGE_HW_BASE_INF + 6,
  70. VXGE_HW_INF_SW_LRO_FLUSH_BOTH = VXGE_HW_BASE_INF + 7,
  71. VXGE_HW_ERR_INVALID_HANDLE = VXGE_HW_BASE_ERR + 1,
  72. VXGE_HW_ERR_OUT_OF_MEMORY = VXGE_HW_BASE_ERR + 2,
  73. VXGE_HW_ERR_VPATH_NOT_AVAILABLE = VXGE_HW_BASE_ERR + 3,
  74. VXGE_HW_ERR_VPATH_NOT_OPEN = VXGE_HW_BASE_ERR + 4,
  75. VXGE_HW_ERR_WRONG_IRQ = VXGE_HW_BASE_ERR + 5,
  76. VXGE_HW_ERR_SWAPPER_CTRL = VXGE_HW_BASE_ERR + 6,
  77. VXGE_HW_ERR_INVALID_MTU_SIZE = VXGE_HW_BASE_ERR + 7,
  78. VXGE_HW_ERR_INVALID_INDEX = VXGE_HW_BASE_ERR + 8,
  79. VXGE_HW_ERR_INVALID_TYPE = VXGE_HW_BASE_ERR + 9,
  80. VXGE_HW_ERR_INVALID_OFFSET = VXGE_HW_BASE_ERR + 10,
  81. VXGE_HW_ERR_INVALID_DEVICE = VXGE_HW_BASE_ERR + 11,
  82. VXGE_HW_ERR_VERSION_CONFLICT = VXGE_HW_BASE_ERR + 12,
  83. VXGE_HW_ERR_INVALID_PCI_INFO = VXGE_HW_BASE_ERR + 13,
  84. VXGE_HW_ERR_INVALID_TCODE = VXGE_HW_BASE_ERR + 14,
  85. VXGE_HW_ERR_INVALID_BLOCK_SIZE = VXGE_HW_BASE_ERR + 15,
  86. VXGE_HW_ERR_INVALID_STATE = VXGE_HW_BASE_ERR + 16,
  87. VXGE_HW_ERR_PRIVILAGED_OPEARATION = VXGE_HW_BASE_ERR + 17,
  88. VXGE_HW_ERR_INVALID_PORT = VXGE_HW_BASE_ERR + 18,
  89. VXGE_HW_ERR_FIFO = VXGE_HW_BASE_ERR + 19,
  90. VXGE_HW_ERR_VPATH = VXGE_HW_BASE_ERR + 20,
  91. VXGE_HW_ERR_CRITICAL = VXGE_HW_BASE_ERR + 21,
  92. VXGE_HW_ERR_SLOT_FREEZE = VXGE_HW_BASE_ERR + 22,
  93. VXGE_HW_ERR_INVALID_MIN_BANDWIDTH = VXGE_HW_BASE_ERR + 25,
  94. VXGE_HW_ERR_INVALID_MAX_BANDWIDTH = VXGE_HW_BASE_ERR + 26,
  95. VXGE_HW_ERR_INVALID_TOTAL_BANDWIDTH = VXGE_HW_BASE_ERR + 27,
  96. VXGE_HW_ERR_INVALID_BANDWIDTH_LIMIT = VXGE_HW_BASE_ERR + 28,
  97. VXGE_HW_ERR_RESET_IN_PROGRESS = VXGE_HW_BASE_ERR + 29,
  98. VXGE_HW_ERR_OUT_OF_SPACE = VXGE_HW_BASE_ERR + 30,
  99. VXGE_HW_ERR_INVALID_FUNC_MODE = VXGE_HW_BASE_ERR + 31,
  100. VXGE_HW_ERR_INVALID_DP_MODE = VXGE_HW_BASE_ERR + 32,
  101. VXGE_HW_ERR_INVALID_FAILURE_BEHAVIOUR = VXGE_HW_BASE_ERR + 33,
  102. VXGE_HW_ERR_INVALID_L2_SWITCH_STATE = VXGE_HW_BASE_ERR + 34,
  103. VXGE_HW_ERR_INVALID_CATCH_BASIN_MODE = VXGE_HW_BASE_ERR + 35,
  104. VXGE_HW_BADCFG_RING_INDICATE_MAX_PKTS = VXGE_HW_BASE_BADCFG + 1,
  105. VXGE_HW_BADCFG_FIFO_BLOCKS = VXGE_HW_BASE_BADCFG + 2,
  106. VXGE_HW_BADCFG_VPATH_MTU = VXGE_HW_BASE_BADCFG + 3,
  107. VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG = VXGE_HW_BASE_BADCFG + 4,
  108. VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH = VXGE_HW_BASE_BADCFG + 5,
  109. VXGE_HW_BADCFG_VPATH_BANDWIDTH_LIMIT = VXGE_HW_BASE_BADCFG + 6,
  110. VXGE_HW_BADCFG_INTR_MODE = VXGE_HW_BASE_BADCFG + 7,
  111. VXGE_HW_BADCFG_RTS_MAC_EN = VXGE_HW_BASE_BADCFG + 8,
  112. VXGE_HW_BADCFG_VPATH_AGGR_ACK = VXGE_HW_BASE_BADCFG + 9,
  113. VXGE_HW_BADCFG_VPATH_PRIORITY = VXGE_HW_BASE_BADCFG + 10,
  114. VXGE_HW_EOF_TRACE_BUF = -1
  115. };
  116. /**
  117. * enum enum vxge_hw_device_link_state - Link state enumeration.
  118. * @VXGE_HW_LINK_NONE: Invalid link state.
  119. * @VXGE_HW_LINK_DOWN: Link is down.
  120. * @VXGE_HW_LINK_UP: Link is up.
  121. *
  122. */
  123. enum vxge_hw_device_link_state {
  124. VXGE_HW_LINK_NONE,
  125. VXGE_HW_LINK_DOWN,
  126. VXGE_HW_LINK_UP
  127. };
  128. /*forward declaration*/
  129. struct vxge_vpath;
  130. struct __vxge_hw_virtualpath;
  131. /**
  132. * struct vxge_hw_ring_rxd_1 - One buffer mode RxD for ring
  133. *
  134. * One buffer mode RxD for ring structure
  135. */
  136. struct vxge_hw_ring_rxd_1 {
  137. u64 host_control;
  138. u64 control_0;
  139. #define VXGE_HW_RING_RXD_RTH_BUCKET_GET(ctrl0) vxge_bVALn(ctrl0, 0, 7)
  140. #define VXGE_HW_RING_RXD_LIST_OWN_ADAPTER vxge_mBIT(7)
  141. #define VXGE_HW_RING_RXD_FAST_PATH_ELIGIBLE_GET(ctrl0) vxge_bVALn(ctrl0, 8, 1)
  142. #define VXGE_HW_RING_RXD_L3_CKSUM_CORRECT_GET(ctrl0) vxge_bVALn(ctrl0, 9, 1)
  143. #define VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(ctrl0) vxge_bVALn(ctrl0, 10, 1)
  144. #define VXGE_HW_RING_RXD_T_CODE_GET(ctrl0) vxge_bVALn(ctrl0, 12, 4)
  145. #define VXGE_HW_RING_RXD_T_CODE(val) vxge_vBIT(val, 12, 4)
  146. #define VXGE_HW_RING_RXD_T_CODE_UNUSED VXGE_HW_RING_T_CODE_UNUSED
  147. #define VXGE_HW_RING_RXD_SYN_GET(ctrl0) vxge_bVALn(ctrl0, 16, 1)
  148. #define VXGE_HW_RING_RXD_IS_ICMP_GET(ctrl0) vxge_bVALn(ctrl0, 17, 1)
  149. #define VXGE_HW_RING_RXD_RTH_SPDM_HIT_GET(ctrl0) vxge_bVALn(ctrl0, 18, 1)
  150. #define VXGE_HW_RING_RXD_RTH_IT_HIT_GET(ctrl0) vxge_bVALn(ctrl0, 19, 1)
  151. #define VXGE_HW_RING_RXD_RTH_HASH_TYPE_GET(ctrl0) vxge_bVALn(ctrl0, 20, 4)
  152. #define VXGE_HW_RING_RXD_IS_VLAN_GET(ctrl0) vxge_bVALn(ctrl0, 24, 1)
  153. #define VXGE_HW_RING_RXD_ETHER_ENCAP_GET(ctrl0) vxge_bVALn(ctrl0, 25, 2)
  154. #define VXGE_HW_RING_RXD_FRAME_PROTO_GET(ctrl0) vxge_bVALn(ctrl0, 27, 5)
  155. #define VXGE_HW_RING_RXD_L3_CKSUM_GET(ctrl0) vxge_bVALn(ctrl0, 32, 16)
  156. #define VXGE_HW_RING_RXD_L4_CKSUM_GET(ctrl0) vxge_bVALn(ctrl0, 48, 16)
  157. u64 control_1;
  158. #define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(ctrl1) vxge_bVALn(ctrl1, 2, 14)
  159. #define VXGE_HW_RING_RXD_1_BUFFER0_SIZE(val) vxge_vBIT(val, 2, 14)
  160. #define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK vxge_vBIT(0x3FFF, 2, 14)
  161. #define VXGE_HW_RING_RXD_1_RTH_HASH_VAL_GET(ctrl1) vxge_bVALn(ctrl1, 16, 32)
  162. #define VXGE_HW_RING_RXD_VLAN_TAG_GET(ctrl1) vxge_bVALn(ctrl1, 48, 16)
  163. u64 buffer0_ptr;
  164. };
  165. /**
  166. * struct vxge_hw_fifo_txd - Transmit Descriptor
  167. *
  168. * Transmit descriptor (TxD).Fifo descriptor contains configured number
  169. * (list) of TxDs. * For more details please refer to Titan User Guide,
  170. * Section 5.4.2 "Transmit Descriptor (TxD) Format".
  171. */
  172. struct vxge_hw_fifo_txd {
  173. u64 control_0;
  174. #define VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER vxge_mBIT(7)
  175. #define VXGE_HW_FIFO_TXD_T_CODE_GET(ctrl0) vxge_bVALn(ctrl0, 12, 4)
  176. #define VXGE_HW_FIFO_TXD_T_CODE(val) vxge_vBIT(val, 12, 4)
  177. #define VXGE_HW_FIFO_TXD_T_CODE_UNUSED VXGE_HW_FIFO_T_CODE_UNUSED
  178. #define VXGE_HW_FIFO_TXD_GATHER_CODE(val) vxge_vBIT(val, 22, 2)
  179. #define VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST VXGE_HW_FIFO_GATHER_CODE_FIRST
  180. #define VXGE_HW_FIFO_TXD_GATHER_CODE_LAST VXGE_HW_FIFO_GATHER_CODE_LAST
  181. #define VXGE_HW_FIFO_TXD_LSO_EN vxge_mBIT(30)
  182. #define VXGE_HW_FIFO_TXD_LSO_MSS(val) vxge_vBIT(val, 34, 14)
  183. #define VXGE_HW_FIFO_TXD_BUFFER_SIZE(val) vxge_vBIT(val, 48, 16)
  184. u64 control_1;
  185. #define VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN vxge_mBIT(5)
  186. #define VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN vxge_mBIT(6)
  187. #define VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN vxge_mBIT(7)
  188. #define VXGE_HW_FIFO_TXD_VLAN_ENABLE vxge_mBIT(15)
  189. #define VXGE_HW_FIFO_TXD_VLAN_TAG(val) vxge_vBIT(val, 16, 16)
  190. #define VXGE_HW_FIFO_TXD_NO_BW_LIMIT vxge_mBIT(43)
  191. #define VXGE_HW_FIFO_TXD_INT_NUMBER(val) vxge_vBIT(val, 34, 6)
  192. #define VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST vxge_mBIT(46)
  193. #define VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ vxge_mBIT(47)
  194. u64 buffer_pointer;
  195. u64 host_control;
  196. };
  197. /**
  198. * struct vxge_hw_device_date - Date Format
  199. * @day: Day
  200. * @month: Month
  201. * @year: Year
  202. * @date: Date in string format
  203. *
  204. * Structure for returning date
  205. */
  206. #define VXGE_HW_FW_STRLEN 32
  207. struct vxge_hw_device_date {
  208. u32 day;
  209. u32 month;
  210. u32 year;
  211. char date[VXGE_HW_FW_STRLEN];
  212. };
  213. struct vxge_hw_device_version {
  214. u32 major;
  215. u32 minor;
  216. u32 build;
  217. char version[VXGE_HW_FW_STRLEN];
  218. };
  219. u64 __vxge_hw_vpath_pci_func_mode_get(
  220. u32 vp_id,
  221. struct vxge_hw_vpath_reg __iomem *vpath_reg);
  222. /*
  223. * struct __vxge_hw_non_offload_db_wrapper - Non-offload Doorbell Wrapper
  224. * @control_0: Bits 0 to 7 - Doorbell type.
  225. * Bits 8 to 31 - Reserved.
  226. * Bits 32 to 39 - The highest TxD in this TxDL.
  227. * Bits 40 to 47 - Reserved.
  228. * Bits 48 to 55 - Reserved.
  229. * Bits 56 to 63 - No snoop flags.
  230. * @txdl_ptr: The starting location of the TxDL in host memory.
  231. *
  232. * Created by the host and written to the adapter via PIO to a Kernel Doorbell
  233. * FIFO. All non-offload doorbell wrapper fields must be written by the host as
  234. * part of a doorbell write. Consumed by the adapter but is not written by the
  235. * adapter.
  236. */
  237. struct __vxge_hw_non_offload_db_wrapper {
  238. u64 control_0;
  239. #define VXGE_HW_NODBW_GET_TYPE(ctrl0) vxge_bVALn(ctrl0, 0, 8)
  240. #define VXGE_HW_NODBW_TYPE(val) vxge_vBIT(val, 0, 8)
  241. #define VXGE_HW_NODBW_TYPE_NODBW 0
  242. #define VXGE_HW_NODBW_GET_LAST_TXD_NUMBER(ctrl0) vxge_bVALn(ctrl0, 32, 8)
  243. #define VXGE_HW_NODBW_LAST_TXD_NUMBER(val) vxge_vBIT(val, 32, 8)
  244. #define VXGE_HW_NODBW_GET_NO_SNOOP(ctrl0) vxge_bVALn(ctrl0, 56, 8)
  245. #define VXGE_HW_NODBW_LIST_NO_SNOOP(val) vxge_vBIT(val, 56, 8)
  246. #define VXGE_HW_NODBW_LIST_NO_SNOOP_TXD_READ_TXD0_WRITE 0x2
  247. #define VXGE_HW_NODBW_LIST_NO_SNOOP_TX_FRAME_DATA_READ 0x1
  248. u64 txdl_ptr;
  249. };
  250. /*
  251. * struct __vxge_hw_fifo - Fifo.
  252. * @vp_id: Virtual path id
  253. * @tx_intr_num: Interrupt Number associated with the TX
  254. * @txdl: Start pointer of the txdl list of this fifo.
  255. * iPXE does not support tx fragmentation, so we need
  256. * only one txd in a list
  257. * @depth: total number of lists in this fifo
  258. * @hw_offset: txd index from where adapter owns the txd list
  259. * @sw_offset: txd index from where driver owns the txd list
  260. *
  261. * @stats: Statistics of this fifo
  262. *
  263. */
  264. struct __vxge_hw_fifo {
  265. struct vxge_hw_vpath_reg *vp_reg;
  266. struct __vxge_hw_non_offload_db_wrapper *nofl_db;
  267. u32 vp_id;
  268. u32 tx_intr_num;
  269. struct vxge_hw_fifo_txd *txdl;
  270. #define VXGE_HW_FIFO_TXD_DEPTH 128
  271. u16 depth;
  272. u16 hw_offset;
  273. u16 sw_offset;
  274. struct __vxge_hw_virtualpath *vpathh;
  275. };
  276. /* Structure that represents the Rx descriptor block which contains
  277. * 128 Rx descriptors.
  278. */
  279. struct __vxge_hw_ring_block {
  280. #define VXGE_HW_MAX_RXDS_PER_BLOCK_1 127
  281. struct vxge_hw_ring_rxd_1 rxd[VXGE_HW_MAX_RXDS_PER_BLOCK_1];
  282. u64 reserved_0;
  283. #define END_OF_BLOCK 0xFEFFFFFFFFFFFFFFULL
  284. /* 0xFEFFFFFFFFFFFFFF to mark last Rxd in this blk */
  285. u64 reserved_1;
  286. /* Logical ptr to next */
  287. u64 reserved_2_pNext_RxD_block;
  288. /* Buff0_ptr.In a 32 bit arch the upper 32 bits should be 0 */
  289. u64 pNext_RxD_Blk_physical;
  290. };
  291. /*
  292. * struct __vxge_hw_ring - Ring channel.
  293. *
  294. * Note: The structure is cache line aligned to better utilize
  295. * CPU cache performance.
  296. */
  297. struct __vxge_hw_ring {
  298. struct vxge_hw_vpath_reg *vp_reg;
  299. struct vxge_hw_common_reg *common_reg;
  300. u32 vp_id;
  301. #define VXGE_HW_RING_RXD_QWORDS_MODE_1 4
  302. u32 doorbell_cnt;
  303. u32 total_db_cnt;
  304. #define VXGE_HW_RING_RXD_QWORD_LIMIT 16
  305. u64 rxd_qword_limit;
  306. struct __vxge_hw_ring_block *rxdl;
  307. #define VXGE_HW_RING_BUF_PER_BLOCK 9
  308. u16 buf_per_block;
  309. u16 rxd_offset;
  310. #define VXGE_HW_RING_RX_POLL_WEIGHT 8
  311. u16 rx_poll_weight;
  312. struct io_buffer *iobuf[VXGE_HW_RING_BUF_PER_BLOCK + 1];
  313. struct __vxge_hw_virtualpath *vpathh;
  314. };
  315. /*
  316. * struct __vxge_hw_virtualpath - Virtual Path
  317. *
  318. * Virtual path structure to encapsulate the data related to a virtual path.
  319. * Virtual paths are allocated by the HW upon getting configuration from the
  320. * driver and inserted into the list of virtual paths.
  321. */
  322. struct __vxge_hw_virtualpath {
  323. u32 vp_id;
  324. u32 vp_open;
  325. #define VXGE_HW_VP_NOT_OPEN 0
  326. #define VXGE_HW_VP_OPEN 1
  327. struct __vxge_hw_device *hldev;
  328. struct vxge_hw_vpath_reg *vp_reg;
  329. struct vxge_hw_vpmgmt_reg *vpmgmt_reg;
  330. struct __vxge_hw_non_offload_db_wrapper *nofl_db;
  331. u32 max_mtu;
  332. u32 vsport_number;
  333. u32 max_kdfc_db;
  334. u32 max_nofl_db;
  335. struct __vxge_hw_ring ringh;
  336. struct __vxge_hw_fifo fifoh;
  337. };
  338. #define VXGE_HW_INFO_LEN 64
  339. #define VXGE_HW_PMD_INFO_LEN 16
  340. #define VXGE_MAX_PRINT_BUF_SIZE 128
  341. /**
  342. * struct vxge_hw_device_hw_info - Device information
  343. * @host_type: Host Type
  344. * @func_id: Function Id
  345. * @vpath_mask: vpath bit mask
  346. * @fw_version: Firmware version
  347. * @fw_date: Firmware Date
  348. * @flash_version: Firmware version
  349. * @flash_date: Firmware Date
  350. * @mac_addrs: Mac addresses for each vpath
  351. * @mac_addr_masks: Mac address masks for each vpath
  352. *
  353. * Returns the vpath mask that has the bits set for each vpath allocated
  354. * for the driver and the first mac address for each vpath
  355. */
  356. struct vxge_hw_device_hw_info {
  357. u32 host_type;
  358. #define VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION 0
  359. #define VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION 1
  360. #define VXGE_HW_NO_MR_SR_VH0_FUNCTION0 2
  361. #define VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION 3
  362. #define VXGE_HW_MR_SR_VH0_INVALID_CONFIG 4
  363. #define VXGE_HW_SR_VH_FUNCTION0 5
  364. #define VXGE_HW_SR_VH_VIRTUAL_FUNCTION 6
  365. #define VXGE_HW_VH_NORMAL_FUNCTION 7
  366. u64 function_mode;
  367. #define VXGE_HW_FUNCTION_MODE_MIN 0
  368. #define VXGE_HW_FUNCTION_MODE_MAX 11
  369. #define VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION 0
  370. #define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION 1
  371. #define VXGE_HW_FUNCTION_MODE_SRIOV 2
  372. #define VXGE_HW_FUNCTION_MODE_MRIOV 3
  373. #define VXGE_HW_FUNCTION_MODE_MRIOV_8 4
  374. #define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17 5
  375. #define VXGE_HW_FUNCTION_MODE_SRIOV_8 6
  376. #define VXGE_HW_FUNCTION_MODE_SRIOV_4 7
  377. #define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2 8
  378. #define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_4 9
  379. #define VXGE_HW_FUNCTION_MODE_MRIOV_4 10
  380. #define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_DIRECT_IO 11
  381. u32 func_id;
  382. u64 vpath_mask;
  383. struct vxge_hw_device_version fw_version;
  384. struct vxge_hw_device_date fw_date;
  385. struct vxge_hw_device_version flash_version;
  386. struct vxge_hw_device_date flash_date;
  387. u8 serial_number[VXGE_HW_INFO_LEN];
  388. u8 part_number[VXGE_HW_INFO_LEN];
  389. u8 product_desc[VXGE_HW_INFO_LEN];
  390. u8 (mac_addrs)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
  391. u8 (mac_addr_masks)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
  392. };
  393. /**
  394. * struct __vxge_hw_device - Hal device object
  395. * @magic: Magic Number
  396. * @bar0: BAR0 virtual address.
  397. * @pdev: Physical device handle
  398. * @config: Confguration passed by the LL driver at initialization
  399. * @link_state: Link state
  400. *
  401. * HW device object. Represents Titan adapter
  402. */
  403. struct __vxge_hw_device {
  404. u32 magic;
  405. #define VXGE_HW_DEVICE_MAGIC 0x12345678
  406. #define VXGE_HW_DEVICE_DEAD 0xDEADDEAD
  407. void __iomem *bar0;
  408. struct pci_device *pdev;
  409. struct net_device *ndev;
  410. struct vxgedev *vdev;
  411. enum vxge_hw_device_link_state link_state;
  412. u32 host_type;
  413. u32 func_id;
  414. u8 titan1;
  415. u32 access_rights;
  416. #define VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH 0x1
  417. #define VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM 0x2
  418. #define VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM 0x4
  419. struct vxge_hw_legacy_reg *legacy_reg;
  420. struct vxge_hw_toc_reg *toc_reg;
  421. struct vxge_hw_common_reg *common_reg;
  422. struct vxge_hw_mrpcim_reg *mrpcim_reg;
  423. struct vxge_hw_srpcim_reg *srpcim_reg \
  424. [VXGE_HW_TITAN_SRPCIM_REG_SPACES];
  425. struct vxge_hw_vpmgmt_reg *vpmgmt_reg \
  426. [VXGE_HW_TITAN_VPMGMT_REG_SPACES];
  427. struct vxge_hw_vpath_reg *vpath_reg \
  428. [VXGE_HW_TITAN_VPATH_REG_SPACES];
  429. u8 *kdfc;
  430. u8 *usdc;
  431. struct __vxge_hw_virtualpath virtual_path;
  432. u64 vpath_assignments;
  433. u64 vpaths_deployed;
  434. u32 first_vp_id;
  435. u64 tim_int_mask0[4];
  436. u32 tim_int_mask1[4];
  437. struct vxge_hw_device_hw_info hw_info;
  438. };
  439. #define VXGE_HW_DEVICE_LINK_STATE_SET(hldev, ls) (hldev->link_state = ls)
  440. #define VXGE_HW_DEVICE_TIM_INT_MASK_SET(m0, m1, i) { \
  441. if (i < 16) { \
  442. m0[0] |= vxge_vBIT(0x8, (i*4), 4); \
  443. m0[1] |= vxge_vBIT(0x4, (i*4), 4); \
  444. } \
  445. else { \
  446. m1[0] = 0x80000000; \
  447. m1[1] = 0x40000000; \
  448. } \
  449. }
  450. #define VXGE_HW_DEVICE_TIM_INT_MASK_RESET(m0, m1, i) { \
  451. if (i < 16) { \
  452. m0[0] &= ~vxge_vBIT(0x8, (i*4), 4); \
  453. m0[1] &= ~vxge_vBIT(0x4, (i*4), 4); \
  454. } \
  455. else { \
  456. m1[0] = 0; \
  457. m1[1] = 0; \
  458. } \
  459. }
  460. /**
  461. * enum enum vxge_hw_txdl_state - Descriptor (TXDL) state.
  462. * @VXGE_HW_TXDL_STATE_NONE: Invalid state.
  463. * @VXGE_HW_TXDL_STATE_AVAIL: Descriptor is available for reservation.
  464. * @VXGE_HW_TXDL_STATE_POSTED: Descriptor is posted for processing by the
  465. * device.
  466. * @VXGE_HW_TXDL_STATE_FREED: Descriptor is free and can be reused for
  467. * filling-in and posting later.
  468. *
  469. * Titan/HW descriptor states.
  470. *
  471. */
  472. enum vxge_hw_txdl_state {
  473. VXGE_HW_TXDL_STATE_NONE = 0,
  474. VXGE_HW_TXDL_STATE_AVAIL = 1,
  475. VXGE_HW_TXDL_STATE_POSTED = 2,
  476. VXGE_HW_TXDL_STATE_FREED = 3
  477. };
  478. /* fifo and ring circular buffer offset tracking apis */
  479. static inline void __vxge_hw_desc_offset_up(u16 upper_limit,
  480. u16 *offset)
  481. {
  482. if (++(*offset) >= upper_limit)
  483. *offset = 0;
  484. }
  485. /* rxd offset handling apis */
  486. static inline void vxge_hw_ring_rxd_offset_up(u16 *offset)
  487. {
  488. __vxge_hw_desc_offset_up(VXGE_HW_MAX_RXDS_PER_BLOCK_1,
  489. offset);
  490. }
  491. /* txd offset handling apis */
  492. static inline void vxge_hw_fifo_txd_offset_up(u16 *offset)
  493. {
  494. __vxge_hw_desc_offset_up(VXGE_HW_FIFO_TXD_DEPTH, offset);
  495. }
  496. /**
  497. * vxge_hw_ring_rxd_1b_set - Prepare 1-buffer-mode descriptor.
  498. * @rxdh: Descriptor handle.
  499. * @dma_pointer: DMA address of a single receive buffer this descriptor
  500. * should carry. Note that by the time vxge_hw_ring_rxd_1b_set is called,
  501. * the receive buffer should be already mapped to the device
  502. * @size: Size of the receive @dma_pointer buffer.
  503. *
  504. * Prepare 1-buffer-mode Rx descriptor for posting
  505. * (via vxge_hw_ring_rxd_post()).
  506. *
  507. * This inline helper-function does not return any parameters and always
  508. * succeeds.
  509. *
  510. */
  511. static inline
  512. void vxge_hw_ring_rxd_1b_set(struct vxge_hw_ring_rxd_1 *rxdp,
  513. struct io_buffer *iob, u32 size)
  514. {
  515. rxdp->host_control = (intptr_t)(iob);
  516. rxdp->buffer0_ptr = virt_to_bus(iob->data);
  517. rxdp->control_1 &= ~VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK;
  518. rxdp->control_1 |= VXGE_HW_RING_RXD_1_BUFFER0_SIZE(size);
  519. }
  520. enum vxge_hw_status vxge_hw_device_hw_info_get(
  521. struct pci_device *pdev,
  522. void __iomem *bar0,
  523. struct vxge_hw_device_hw_info *hw_info);
  524. enum vxge_hw_status
  525. __vxge_hw_vpath_fw_ver_get(
  526. struct vxge_hw_vpath_reg __iomem *vpath_reg,
  527. struct vxge_hw_device_hw_info *hw_info);
  528. enum vxge_hw_status
  529. __vxge_hw_vpath_card_info_get(
  530. struct vxge_hw_vpath_reg __iomem *vpath_reg,
  531. struct vxge_hw_device_hw_info *hw_info);
  532. /**
  533. * vxge_hw_device_link_state_get - Get link state.
  534. * @devh: HW device handle.
  535. *
  536. * Get link state.
  537. * Returns: link state.
  538. */
  539. static inline
  540. enum vxge_hw_device_link_state vxge_hw_device_link_state_get(
  541. struct __vxge_hw_device *devh)
  542. {
  543. return devh->link_state;
  544. }
  545. void vxge_hw_device_terminate(struct __vxge_hw_device *devh);
  546. enum vxge_hw_status vxge_hw_device_initialize(
  547. struct __vxge_hw_device **devh,
  548. void *bar0,
  549. struct pci_device *pdev,
  550. u8 titan1);
  551. enum vxge_hw_status
  552. vxge_hw_vpath_open(struct __vxge_hw_device *hldev, struct vxge_vpath *vpath);
  553. enum vxge_hw_status
  554. __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog);
  555. enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_virtualpath *vpath);
  556. enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_virtualpath *vpath);
  557. enum vxge_hw_status
  558. vxge_hw_vpath_recover_from_reset(struct __vxge_hw_virtualpath *vpath);
  559. void
  560. vxge_hw_vpath_enable(struct __vxge_hw_virtualpath *vpath);
  561. enum vxge_hw_status
  562. vxge_hw_vpath_mtu_set(struct __vxge_hw_virtualpath *vpath, u32 new_mtu);
  563. void
  564. vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_virtualpath *vpath);
  565. void
  566. __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev);
  567. enum vxge_hw_status
  568. __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg);
  569. enum vxge_hw_status
  570. __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg);
  571. enum vxge_hw_status
  572. __vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg,
  573. struct vxge_hw_vpath_reg __iomem *vpath_reg);
  574. enum vxge_hw_status
  575. __vxge_hw_device_register_poll(
  576. void __iomem *reg,
  577. u64 mask, u32 max_millis);
  578. #ifndef readq
  579. static inline u64 readq(void __iomem *addr)
  580. {
  581. u64 ret = 0;
  582. ret = readl(addr + 4);
  583. ret <<= 32;
  584. ret |= readl(addr);
  585. return ret;
  586. }
  587. #endif
  588. #ifndef writeq
  589. static inline void writeq(u64 val, void __iomem *addr)
  590. {
  591. writel((u32) (val), addr);
  592. writel((u32) (val >> 32), (addr + 4));
  593. }
  594. #endif
  595. static inline void __vxge_hw_pio_mem_write32_upper(u32 val, void __iomem *addr)
  596. {
  597. writel(val, addr + 4);
  598. }
  599. static inline void __vxge_hw_pio_mem_write32_lower(u32 val, void __iomem *addr)
  600. {
  601. writel(val, addr);
  602. }
  603. static inline enum vxge_hw_status
  604. __vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
  605. u64 mask, u32 max_millis)
  606. {
  607. enum vxge_hw_status status = VXGE_HW_OK;
  608. __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
  609. wmb();
  610. __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
  611. wmb();
  612. status = __vxge_hw_device_register_poll(addr, mask, max_millis);
  613. return status;
  614. }
  615. void
  616. __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev);
  617. enum vxge_hw_status
  618. __vxge_hw_device_initialize(struct __vxge_hw_device *hldev);
  619. enum vxge_hw_status
  620. __vxge_hw_vpath_pci_read(
  621. struct __vxge_hw_virtualpath *vpath,
  622. u32 phy_func_0,
  623. u32 offset,
  624. u32 *val);
  625. enum vxge_hw_status
  626. __vxge_hw_vpath_addr_get(
  627. struct vxge_hw_vpath_reg __iomem *vpath_reg,
  628. u8 (macaddr)[ETH_ALEN],
  629. u8 (macaddr_mask)[ETH_ALEN]);
  630. u32
  631. __vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg);
  632. enum vxge_hw_status
  633. __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath);
  634. enum vxge_hw_status
  635. vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
  636. /**
  637. * vxge_debug
  638. * @mask: mask for the debug
  639. * @fmt: printf like format string
  640. */
  641. static const u16 debug_filter = VXGE_ERR;
  642. #define vxge_debug(mask, fmt...) do { \
  643. if (debug_filter & mask) \
  644. DBG(fmt); \
  645. } while (0);
  646. #define vxge_trace() vxge_debug(VXGE_TRACE, "%s:%d\n", __func__, __LINE__);
  647. enum vxge_hw_status
  648. vxge_hw_get_func_mode(struct __vxge_hw_device *hldev, u32 *func_mode);
  649. enum vxge_hw_status
  650. vxge_hw_set_fw_api(struct __vxge_hw_device *hldev,
  651. u64 vp_id, u32 action,
  652. u32 offset, u64 data0, u64 data1);
  653. void
  654. vxge_hw_vpath_set_zero_rx_frm_len(struct __vxge_hw_device *hldev);
  655. #endif