You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

vxge_traffic.c 20KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738
  1. /*
  2. * vxge-traffic.c: iPXE driver for Neterion Inc's X3100 Series 10GbE
  3. * PCIe I/O Virtualized Server Adapter.
  4. *
  5. * Copyright(c) 2002-2010 Neterion Inc.
  6. *
  7. * This software may be used and distributed according to the terms of
  8. * the GNU General Public License (GPL), incorporated herein by
  9. * reference. Drivers based on or derived from this code fall under
  10. * the GPL and must retain the authorship, copyright and license
  11. * notice.
  12. *
  13. */
  14. FILE_LICENCE(GPL2_ONLY);
  15. #include <ipxe/netdevice.h>
  16. #include <errno.h>
  17. #include "vxge_traffic.h"
  18. #include "vxge_config.h"
  19. #include "vxge_main.h"
  20. /*
  21. * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
  22. * @vpath: Virtual Path handle.
  23. *
  24. * Enable vpath interrupts. The function is to be executed the last in
  25. * vpath initialization sequence.
  26. *
  27. * See also: vxge_hw_vpath_intr_disable()
  28. */
  29. enum vxge_hw_status
  30. vxge_hw_vpath_intr_enable(struct __vxge_hw_virtualpath *vpath)
  31. {
  32. struct vxge_hw_vpath_reg *vp_reg;
  33. enum vxge_hw_status status = VXGE_HW_OK;
  34. if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
  35. status = VXGE_HW_ERR_VPATH_NOT_OPEN;
  36. goto exit;
  37. }
  38. vp_reg = vpath->vp_reg;
  39. writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
  40. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  41. &vp_reg->general_errors_reg);
  42. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  43. &vp_reg->pci_config_errors_reg);
  44. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  45. &vp_reg->mrpcim_to_vpath_alarm_reg);
  46. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  47. &vp_reg->srpcim_to_vpath_alarm_reg);
  48. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  49. &vp_reg->vpath_ppif_int_status);
  50. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  51. &vp_reg->srpcim_msg_to_vpath_reg);
  52. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  53. &vp_reg->vpath_pcipif_int_status);
  54. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  55. &vp_reg->prc_alarm_reg);
  56. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  57. &vp_reg->wrdma_alarm_status);
  58. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  59. &vp_reg->asic_ntwk_vp_err_reg);
  60. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  61. &vp_reg->xgmac_vp_int_status);
  62. readq(&vp_reg->vpath_general_int_status);
  63. /* Mask unwanted interrupts */
  64. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  65. &vp_reg->vpath_pcipif_int_mask);
  66. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  67. &vp_reg->srpcim_msg_to_vpath_mask);
  68. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  69. &vp_reg->srpcim_to_vpath_alarm_mask);
  70. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  71. &vp_reg->mrpcim_to_vpath_alarm_mask);
  72. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  73. &vp_reg->pci_config_errors_mask);
  74. /* Unmask the individual interrupts */
  75. writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
  76. VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
  77. VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
  78. VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
  79. &vp_reg->general_errors_mask);
  80. __vxge_hw_pio_mem_write32_upper(
  81. (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
  82. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
  83. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
  84. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
  85. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
  86. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
  87. &vp_reg->kdfcctl_errors_mask);
  88. __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
  89. __vxge_hw_pio_mem_write32_upper(
  90. (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
  91. &vp_reg->prc_alarm_mask);
  92. __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
  93. __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
  94. if (vpath->hldev->first_vp_id != vpath->vp_id)
  95. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  96. &vp_reg->asic_ntwk_vp_err_mask);
  97. else
  98. __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
  99. VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT|
  100. VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK),
  101. 0, 32), &vp_reg->asic_ntwk_vp_err_mask);
  102. __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_general_int_mask);
  103. exit:
  104. return status;
  105. }
  106. /*
  107. * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
  108. * @vpath: Virtual Path handle.
  109. *
  110. * Disable vpath interrupts. The function is to be executed the last in
  111. * vpath initialization sequence.
  112. *
  113. * See also: vxge_hw_vpath_intr_enable()
  114. */
  115. enum vxge_hw_status
  116. vxge_hw_vpath_intr_disable(struct __vxge_hw_virtualpath *vpath)
  117. {
  118. enum vxge_hw_status status = VXGE_HW_OK;
  119. struct vxge_hw_vpath_reg __iomem *vp_reg;
  120. if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
  121. status = VXGE_HW_ERR_VPATH_NOT_OPEN;
  122. goto exit;
  123. }
  124. vp_reg = vpath->vp_reg;
  125. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  126. &vp_reg->vpath_general_int_mask);
  127. writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
  128. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  129. &vp_reg->general_errors_mask);
  130. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  131. &vp_reg->pci_config_errors_mask);
  132. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  133. &vp_reg->mrpcim_to_vpath_alarm_mask);
  134. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  135. &vp_reg->srpcim_to_vpath_alarm_mask);
  136. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  137. &vp_reg->vpath_ppif_int_mask);
  138. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  139. &vp_reg->srpcim_msg_to_vpath_mask);
  140. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  141. &vp_reg->vpath_pcipif_int_mask);
  142. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  143. &vp_reg->wrdma_alarm_mask);
  144. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  145. &vp_reg->prc_alarm_mask);
  146. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  147. &vp_reg->xgmac_vp_int_mask);
  148. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  149. &vp_reg->asic_ntwk_vp_err_mask);
  150. exit:
  151. return status;
  152. }
  153. /**
  154. * vxge_hw_device_mask_all - Mask all device interrupts.
  155. * @hldev: HW device handle.
  156. *
  157. * Mask all device interrupts.
  158. *
  159. * See also: vxge_hw_device_unmask_all()
  160. */
  161. void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
  162. {
  163. u64 val64;
  164. val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
  165. VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
  166. __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
  167. &hldev->common_reg->titan_mask_all_int);
  168. return;
  169. }
  170. /**
  171. * vxge_hw_device_unmask_all - Unmask all device interrupts.
  172. * @hldev: HW device handle.
  173. *
  174. * Unmask all device interrupts.
  175. *
  176. * See also: vxge_hw_device_mask_all()
  177. */
  178. void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
  179. {
  180. u64 val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
  181. __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
  182. &hldev->common_reg->titan_mask_all_int);
  183. return;
  184. }
  185. /**
  186. * vxge_hw_device_intr_enable - Enable interrupts.
  187. * @hldev: HW device handle.
  188. *
  189. * Enable Titan interrupts. The function is to be executed the last in
  190. * Titan initialization sequence.
  191. *
  192. * See also: vxge_hw_device_intr_disable()
  193. */
  194. void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
  195. {
  196. u64 val64;
  197. u32 val32;
  198. vxge_hw_device_mask_all(hldev);
  199. vxge_hw_vpath_intr_enable(&hldev->virtual_path);
  200. val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
  201. hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
  202. if (val64 != 0) {
  203. writeq(val64, &hldev->common_reg->tim_int_status0);
  204. writeq(~val64, &hldev->common_reg->tim_int_mask0);
  205. }
  206. val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
  207. hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
  208. if (val32 != 0) {
  209. __vxge_hw_pio_mem_write32_upper(val32,
  210. &hldev->common_reg->tim_int_status1);
  211. __vxge_hw_pio_mem_write32_upper(~val32,
  212. &hldev->common_reg->tim_int_mask1);
  213. }
  214. val64 = readq(&hldev->common_reg->titan_general_int_status);
  215. /* We have not enabled the top level interrupt yet.
  216. * This will be controlled from vxge_irq() entry api.
  217. */
  218. return;
  219. }
  220. /**
  221. * vxge_hw_device_intr_disable - Disable Titan interrupts.
  222. * @hldev: HW device handle.
  223. *
  224. * Disable Titan interrupts.
  225. *
  226. * See also: vxge_hw_device_intr_enable()
  227. */
  228. void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
  229. {
  230. vxge_hw_device_mask_all(hldev);
  231. /* mask all the tim interrupts */
  232. writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
  233. __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
  234. &hldev->common_reg->tim_int_mask1);
  235. vxge_hw_vpath_intr_disable(&hldev->virtual_path);
  236. return;
  237. }
  238. /**
  239. * vxge_hw_ring_rxd_post - Post descriptor on the ring.
  240. * @ring: Handle to the ring object used for receive
  241. * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
  242. *
  243. * Post descriptor on the ring.
  244. * Prior to posting the descriptor should be filled in accordance with
  245. * Host/Titan interface specification for a given service (LL, etc.).
  246. */
  247. void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring __unused,
  248. struct vxge_hw_ring_rxd_1 *rxdp)
  249. {
  250. rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
  251. }
  252. /**
  253. * __vxge_hw_non_offload_db_post - Post non offload doorbell
  254. *
  255. * @fifo: fifohandle
  256. * @txdl_ptr: The starting location of the TxDL in host memory
  257. * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
  258. *
  259. * This function posts a non-offload doorbell to doorbell FIFO
  260. *
  261. */
  262. static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
  263. u64 txdl_ptr, u32 num_txds)
  264. {
  265. writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
  266. VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds),
  267. &fifo->nofl_db->control_0);
  268. wmb();
  269. writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
  270. wmb();
  271. }
  272. /**
  273. * vxge_hw_fifo_free_txdl_get: fetch next available txd in the fifo
  274. *
  275. * @fifo: tx channel handle
  276. */
  277. struct vxge_hw_fifo_txd *
  278. vxge_hw_fifo_free_txdl_get(struct __vxge_hw_fifo *fifo)
  279. {
  280. struct vxge_hw_fifo_txd *txdp;
  281. txdp = fifo->txdl + fifo->sw_offset;
  282. if (txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER) {
  283. vxge_debug(VXGE_ERR, "%s:%d, error: txd(%d) owned by hw\n",
  284. __func__, __LINE__, fifo->sw_offset);
  285. return NULL;
  286. }
  287. return txdp;
  288. }
  289. /**
  290. * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
  291. * descriptor.
  292. * @fifo: Handle to the fifo object used for non offload send
  293. * @txdlh: Descriptor handle.
  294. * @iob: data buffer.
  295. */
  296. void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
  297. struct vxge_hw_fifo_txd *txdp,
  298. struct io_buffer *iob)
  299. {
  300. txdp->control_0 = VXGE_HW_FIFO_TXD_GATHER_CODE(
  301. VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST);
  302. txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(iob_len(iob));
  303. txdp->control_1 = VXGE_HW_FIFO_TXD_INT_NUMBER(fifo->tx_intr_num);
  304. txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
  305. txdp->host_control = (intptr_t)iob;
  306. txdp->buffer_pointer = virt_to_bus(iob->data);
  307. }
  308. /**
  309. * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
  310. * @fifo: Handle to the fifo object used for non offload send
  311. * @txdp: Tx Descriptor
  312. *
  313. * Post descriptor on the 'fifo' type channel for transmission.
  314. * Prior to posting the descriptor should be filled in accordance with
  315. * Host/Titan interface specification for a given service (LL, etc.).
  316. *
  317. */
  318. void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo,
  319. struct vxge_hw_fifo_txd *txdp)
  320. {
  321. txdp->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
  322. __vxge_hw_non_offload_db_post(fifo, (u64) virt_to_bus(txdp), 0);
  323. vxge_hw_fifo_txd_offset_up(&fifo->sw_offset);
  324. }
  325. /*
  326. * __vxge_hw_vpath_alarm_process - Process Alarms.
  327. * @vpath: Virtual Path.
  328. * @skip_alarms: Do not clear the alarms
  329. *
  330. * Process vpath alarms.
  331. *
  332. */
  333. static enum vxge_hw_status __vxge_hw_vpath_alarm_process(
  334. struct __vxge_hw_virtualpath *vpath)
  335. {
  336. u64 val64;
  337. u64 alarm_status;
  338. enum vxge_hw_status status = VXGE_HW_OK;
  339. struct __vxge_hw_device *hldev = NULL;
  340. struct vxge_hw_vpath_reg *vp_reg;
  341. hldev = vpath->hldev;
  342. vp_reg = vpath->vp_reg;
  343. alarm_status = readq(&vp_reg->vpath_general_int_status);
  344. if (alarm_status == VXGE_HW_ALL_FOXES) {
  345. vxge_debug(VXGE_ERR, "%s: %s:%d, slot freeze error\n",
  346. hldev->ndev->name, __func__, __LINE__);
  347. status = VXGE_HW_ERR_SLOT_FREEZE;
  348. goto out;
  349. }
  350. if (alarm_status & ~(
  351. VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
  352. VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
  353. VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
  354. VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
  355. vxge_debug(VXGE_ERR, "%s: %s:%d, Unknown vpath alarm\n",
  356. hldev->ndev->name, __func__, __LINE__);
  357. status = VXGE_HW_FAIL;
  358. goto out;
  359. }
  360. if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
  361. val64 = readq(&vp_reg->xgmac_vp_int_status);
  362. if (val64 &
  363. VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
  364. val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
  365. if (((val64 &
  366. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
  367. (!(val64 &
  368. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
  369. ((val64 &
  370. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
  371. && (!(val64 &
  372. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
  373. ))) {
  374. writeq(VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
  375. &vp_reg->asic_ntwk_vp_err_mask);
  376. netdev_link_down(hldev->ndev);
  377. vxge_debug(VXGE_INTR, "%s: %s:%d link down\n",
  378. hldev->ndev->name, __func__, __LINE__);
  379. }
  380. if (((val64 &
  381. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
  382. (!(val64 &
  383. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
  384. ((val64 &
  385. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
  386. && (!(val64 &
  387. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
  388. ))) {
  389. writeq(VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
  390. &vp_reg->asic_ntwk_vp_err_mask);
  391. netdev_link_up(hldev->ndev);
  392. vxge_debug(VXGE_INTR, "%s: %s:%d link up\n",
  393. hldev->ndev->name, __func__, __LINE__);
  394. }
  395. writeq(VXGE_HW_INTR_MASK_ALL,
  396. &vp_reg->asic_ntwk_vp_err_reg);
  397. }
  398. } else {
  399. vxge_debug(VXGE_INFO, "%s: %s:%d unhandled alarm %llx\n",
  400. hldev->ndev->name, __func__, __LINE__,
  401. alarm_status);
  402. }
  403. out:
  404. return status;
  405. }
  406. /**
  407. * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
  408. * condition that has caused the Tx and RX interrupt.
  409. * @hldev: HW device.
  410. *
  411. * Acknowledge (that is, clear) the condition that has caused
  412. * the Tx and Rx interrupt.
  413. * See also: vxge_hw_device_begin_irq(),
  414. * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
  415. */
  416. void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
  417. {
  418. if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
  419. (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
  420. writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
  421. hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
  422. &hldev->common_reg->tim_int_status0);
  423. }
  424. if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
  425. (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
  426. __vxge_hw_pio_mem_write32_upper(
  427. (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
  428. hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
  429. &hldev->common_reg->tim_int_status1);
  430. }
  431. return;
  432. }
  433. /**
  434. * vxge_hw_device_begin_irq - Begin IRQ processing.
  435. * @hldev: HW device handle.
  436. *
  437. * The function performs two actions, It first checks whether (shared IRQ) the
  438. * interrupt was raised by the device. Next, it masks the device interrupts.
  439. *
  440. * Note:
  441. * vxge_hw_device_begin_irq() does not flush MMIO writes through the
  442. * bridge. Therefore, two back-to-back interrupts are potentially possible.
  443. *
  444. * Returns: 0, if the interrupt is not "ours" (note that in this case the
  445. * device remain enabled).
  446. * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
  447. * status.
  448. */
  449. enum vxge_hw_status
  450. vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev)
  451. {
  452. u64 val64;
  453. u64 adapter_status;
  454. u64 vpath_mask;
  455. enum vxge_hw_status ret = VXGE_HW_OK;
  456. val64 = readq(&hldev->common_reg->titan_general_int_status);
  457. if (!val64) {
  458. ret = VXGE_HW_ERR_WRONG_IRQ;
  459. goto exit;
  460. }
  461. if (val64 == VXGE_HW_ALL_FOXES) {
  462. adapter_status = readq(&hldev->common_reg->adapter_status);
  463. if (adapter_status == VXGE_HW_ALL_FOXES) {
  464. vxge_debug(VXGE_ERR, "%s: %s:%d critical error "
  465. "occurred\n", hldev->ndev->name,
  466. __func__, __LINE__);
  467. ret = VXGE_HW_ERR_SLOT_FREEZE;
  468. goto exit;
  469. }
  470. }
  471. vpath_mask = hldev->vpaths_deployed >>
  472. (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
  473. if (val64 & VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(
  474. vpath_mask))
  475. vxge_hw_device_clear_tx_rx(hldev);
  476. if (val64 & VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)
  477. ret = __vxge_hw_vpath_alarm_process(&hldev->virtual_path);
  478. exit:
  479. return ret;
  480. }
  481. /**
  482. * vxge_hw_vpath_doorbell_rx - Indicates to hw the qwords of receive
  483. * descriptors posted.
  484. * @ring: Handle to the ring object used for receive
  485. *
  486. * The function writes the number of qwords of rxds posted during replishment.
  487. * Since the function is called frequently, a flush is not required to post the
  488. * write transaction. At the very least, the previous write will be flushed
  489. * once the subsequent write is made.
  490. *
  491. * Returns: None.
  492. */
  493. void vxge_hw_vpath_doorbell_rx(struct __vxge_hw_ring *ring)
  494. {
  495. u32 rxds_qw_per_block = VXGE_HW_MAX_RXDS_PER_BLOCK_1 *
  496. VXGE_HW_RING_RXD_QWORDS_MODE_1;
  497. ring->doorbell_cnt += VXGE_HW_RING_RXD_QWORDS_MODE_1;
  498. ring->total_db_cnt += VXGE_HW_RING_RXD_QWORDS_MODE_1;
  499. if (ring->total_db_cnt >= rxds_qw_per_block) {
  500. /* For each block add 4 more qwords */
  501. ring->doorbell_cnt += VXGE_HW_RING_RXD_QWORDS_MODE_1;
  502. /* Reset total count */
  503. ring->total_db_cnt -= rxds_qw_per_block;
  504. }
  505. if (ring->doorbell_cnt >= ring->rxd_qword_limit) {
  506. wmb();
  507. writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(
  508. ring->doorbell_cnt),
  509. &ring->vp_reg->prc_rxd_doorbell);
  510. ring->doorbell_cnt = 0;
  511. }
  512. }
  513. /**
  514. * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
  515. * descriptors and process the same.
  516. * @ring: Handle to the ring object used for receive
  517. *
  518. * The function polls the Rx for the completed descriptors.
  519. */
  520. #define ETH_FCS_LEN 4
  521. enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
  522. {
  523. struct __vxge_hw_device *hldev;
  524. enum vxge_hw_status status = VXGE_HW_OK;
  525. struct vxge_hw_ring_rxd_1 *rxd;
  526. unsigned int len;
  527. enum vxge_hw_ring_tcode tcode;
  528. struct io_buffer *rx_iob, *iobuf = NULL;
  529. u16 poll_count = 0;
  530. hldev = ring->vpathh->hldev;
  531. do {
  532. rxd = &ring->rxdl->rxd[ring->rxd_offset];
  533. tcode = VXGE_HW_RING_RXD_T_CODE_GET(rxd->control_0);
  534. /* if tcode is VXGE_HW_RING_T_CODE_FRM_DROP, it is
  535. * possible the ownership bit still set to adapter
  536. */
  537. if ((rxd->control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER)
  538. && (tcode == VXGE_HW_RING_T_CODE_OK)) {
  539. status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
  540. goto err0;
  541. }
  542. vxge_debug(VXGE_INFO, "%s: rx frame received at offset %d\n",
  543. hldev->ndev->name, ring->rxd_offset);
  544. iobuf = (struct io_buffer *)(intptr_t)rxd->host_control;
  545. if (tcode != VXGE_HW_RING_T_CODE_OK) {
  546. netdev_rx_err(hldev->ndev, NULL, -EINVAL);
  547. vxge_debug(VXGE_ERR, "%s:%d, rx error tcode %d\n",
  548. __func__, __LINE__, tcode);
  549. status = VXGE_HW_FAIL;
  550. goto err1;
  551. }
  552. len = VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(rxd->control_1);
  553. len -= ETH_FCS_LEN;
  554. rx_iob = alloc_iob(len);
  555. if (!rx_iob) {
  556. netdev_rx_err(hldev->ndev, NULL, -ENOMEM);
  557. vxge_debug(VXGE_ERR, "%s:%d, alloc_iob error\n",
  558. __func__, __LINE__);
  559. status = VXGE_HW_ERR_OUT_OF_MEMORY;
  560. goto err1;
  561. }
  562. memcpy(iob_put(rx_iob, len), iobuf->data, len);
  563. /* Add this packet to the receive queue. */
  564. netdev_rx(hldev->ndev, rx_iob);
  565. err1:
  566. /* repost the rxd */
  567. rxd->control_0 = rxd->control_1 = 0;
  568. vxge_hw_ring_rxd_1b_set(rxd, iobuf,
  569. VXGE_LL_MAX_FRAME_SIZE(hldev->vdev));
  570. vxge_hw_ring_rxd_post(ring, rxd);
  571. /* repost the qword count for doorbell */
  572. vxge_hw_vpath_doorbell_rx(ring);
  573. /* increment the descriptor offset */
  574. vxge_hw_ring_rxd_offset_up(&ring->rxd_offset);
  575. } while (++poll_count < ring->rx_poll_weight);
  576. err0:
  577. return status;
  578. }
  579. /**
  580. * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process
  581. * the same.
  582. * @fifo: Handle to the fifo object used for non offload send
  583. *
  584. * The function polls the Tx for the completed descriptors and calls
  585. * the driver via supplied completion callback.
  586. */
  587. enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo)
  588. {
  589. enum vxge_hw_status status = VXGE_HW_OK;
  590. struct vxge_hw_fifo_txd *txdp;
  591. txdp = fifo->txdl + fifo->hw_offset;
  592. if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)
  593. && (txdp->host_control)) {
  594. vxge_xmit_compl(fifo, txdp,
  595. VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0));
  596. vxge_hw_fifo_txd_offset_up(&fifo->hw_offset);
  597. }
  598. return status;
  599. }