Ви не можете вибрати більше 25 тем Теми мають розпочинатися з літери або цифри, можуть містити дефіси (-) і не повинні перевищувати 35 символів.

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510
  1. /**************************************************************************
  2. *
  3. * Driver datapath for Solarflare network cards
  4. *
  5. * Written by Shradha Shah <sshah@solarflare.com>
  6. *
  7. * Copyright 2012-2017 Solarflare Communications Inc.
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License as
  11. * published by the Free Software Foundation; either version 2 of the
  12. * License, or any later version.
  13. *
  14. * You can also choose to distribute this program under the terms of
  15. * the Unmodified Binary Distribution Licence (as given in the file
  16. * COPYING.UBDL), provided that you have satisfied its requirements.
  17. *
  18. ***************************************************************************/
  19. #include <stdint.h>
  20. #include <stdlib.h>
  21. #include <stdio.h>
  22. #include <unistd.h>
  23. #include <errno.h>
  24. #include <assert.h>
  25. #include <byteswap.h>
  26. #include <ipxe/io.h>
  27. #include <ipxe/pci.h>
  28. #include <ipxe/malloc.h>
  29. #include <ipxe/iobuf.h>
  30. #include <ipxe/netdevice.h>
  31. #include "efx_hunt.h"
  32. #include "efx_bitfield.h"
  33. #include "ef10_regs.h"
  34. FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
  35. void efx_hunt_free_special_buffer(void *buf, int bytes)
  36. {
  37. free_dma(buf, bytes);
  38. }
  39. static void *efx_hunt_alloc_special_buffer(int bytes,
  40. struct efx_special_buffer *entry)
  41. {
  42. void *buffer;
  43. dma_addr_t dma_addr;
  44. /* Allocate the buffer, aligned on a buffer address boundary. This
  45. * buffer will be passed into an MC_CMD_INIT_*Q command to setup the
  46. * appropriate type of queue via MCDI.
  47. */
  48. buffer = malloc_dma(bytes, EFX_BUF_ALIGN);
  49. if (!buffer)
  50. return NULL;
  51. entry->dma_addr = dma_addr = virt_to_bus(buffer);
  52. assert((dma_addr & (EFX_BUF_ALIGN - 1)) == 0);
  53. /* Buffer table entries aren't allocated, so set id to zero */
  54. entry->id = 0;
  55. DBGP("Allocated 0x%x bytes at %p\n", bytes, buffer);
  56. return buffer;
  57. }
  58. /*******************************************************************************
  59. *
  60. *
  61. * TX
  62. *
  63. *
  64. ******************************************************************************/
  65. static void
  66. efx_hunt_build_tx_desc(efx_tx_desc_t *txd, struct io_buffer *iob)
  67. {
  68. dma_addr_t dma_addr;
  69. dma_addr = virt_to_bus(iob->data);
  70. EFX_POPULATE_QWORD_4(*txd,
  71. ESF_DZ_TX_KER_TYPE, 0,
  72. ESF_DZ_TX_KER_CONT, 0,
  73. ESF_DZ_TX_KER_BYTE_CNT, iob_len(iob),
  74. ESF_DZ_TX_KER_BUF_ADDR, dma_addr);
  75. }
  76. static void
  77. efx_hunt_notify_tx_desc(struct efx_nic *efx)
  78. {
  79. struct efx_tx_queue *txq = &efx->txq;
  80. int ptr = txq->write_ptr & EFX_TXD_MASK;
  81. efx_dword_t reg;
  82. EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, ptr);
  83. efx_writel_page(efx, &reg, 0, ER_DZ_TX_DESC_UPD_DWORD);
  84. }
  85. int
  86. efx_hunt_transmit(struct net_device *netdev, struct io_buffer *iob)
  87. {
  88. struct efx_nic *efx = netdev_priv(netdev);
  89. struct efx_tx_queue *txq = &efx->txq;
  90. int fill_level, space;
  91. efx_tx_desc_t *txd;
  92. int buf_id;
  93. fill_level = txq->write_ptr - txq->read_ptr;
  94. space = EFX_TXD_SIZE - fill_level - 1;
  95. if (space < 1)
  96. return -ENOBUFS;
  97. /* Save the iobuffer for later completion */
  98. buf_id = txq->write_ptr & EFX_TXD_MASK;
  99. assert(txq->buf[buf_id] == NULL);
  100. txq->buf[buf_id] = iob;
  101. DBGCIO(efx, "tx_buf[%d] for iob %p data %p len %zd\n",
  102. buf_id, iob, iob->data, iob_len(iob));
  103. /* Form the descriptor, and push it to hardware */
  104. txd = txq->ring + buf_id;
  105. efx_hunt_build_tx_desc(txd, iob);
  106. ++txq->write_ptr;
  107. efx_hunt_notify_tx_desc(efx);
  108. return 0;
  109. }
  110. static void
  111. efx_hunt_transmit_done(struct efx_nic *efx, int id)
  112. {
  113. struct efx_tx_queue *txq = &efx->txq;
  114. unsigned int read_ptr, stop;
  115. /* Complete all buffers from read_ptr up to and including id */
  116. read_ptr = txq->read_ptr & EFX_TXD_MASK;
  117. stop = (id + 1) & EFX_TXD_MASK;
  118. while (read_ptr != stop) {
  119. struct io_buffer *iob = txq->buf[read_ptr];
  120. assert(iob);
  121. /* Complete the tx buffer */
  122. if (iob)
  123. netdev_tx_complete(efx->netdev, iob);
  124. DBGCIO(efx, "tx_buf[%d] for iob %p done\n", read_ptr, iob);
  125. txq->buf[read_ptr] = NULL;
  126. ++txq->read_ptr;
  127. read_ptr = txq->read_ptr & EFX_TXD_MASK;
  128. }
  129. }
  130. int efx_hunt_tx_init(struct net_device *netdev, dma_addr_t *dma_addr)
  131. {
  132. struct efx_nic *efx = netdev_priv(netdev);
  133. struct efx_tx_queue *txq = &efx->txq;
  134. size_t bytes;
  135. /* Allocate hardware transmit queue */
  136. bytes = sizeof(efx_tx_desc_t) * EFX_TXD_SIZE;
  137. txq->ring = efx_hunt_alloc_special_buffer(bytes, &txq->entry);
  138. if (!txq->ring)
  139. return -ENOMEM;
  140. txq->read_ptr = txq->write_ptr = 0;
  141. *dma_addr = txq->entry.dma_addr;
  142. return 0;
  143. }
  144. /*******************************************************************************
  145. *
  146. *
  147. * RX
  148. *
  149. *
  150. ******************************************************************************/
  151. static void
  152. efx_hunt_build_rx_desc(efx_rx_desc_t *rxd, struct io_buffer *iob)
  153. {
  154. dma_addr_t dma_addr = virt_to_bus(iob->data);
  155. EFX_POPULATE_QWORD_2(*rxd,
  156. ESF_DZ_RX_KER_BYTE_CNT, EFX_RX_BUF_SIZE,
  157. ESF_DZ_RX_KER_BUF_ADDR, dma_addr);
  158. }
  159. static void
  160. efx_hunt_notify_rx_desc(struct efx_nic *efx)
  161. {
  162. struct efx_rx_queue *rxq = &efx->rxq;
  163. int ptr = rxq->write_ptr & EFX_RXD_MASK;
  164. efx_dword_t reg;
  165. EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR, ptr);
  166. efx_writel_page(efx, &reg, 0, ER_DZ_RX_DESC_UPD);
  167. }
  168. static void
  169. efx_hunt_rxq_fill(struct efx_nic *efx)
  170. {
  171. struct efx_rx_queue *rxq = &efx->rxq;
  172. int fill_level = rxq->write_ptr - rxq->read_ptr;
  173. int space = EFX_NUM_RX_DESC - fill_level - 1;
  174. int pushed = 0;
  175. while (space) {
  176. int buf_id = rxq->write_ptr & (EFX_NUM_RX_DESC - 1);
  177. int desc_id = rxq->write_ptr & EFX_RXD_MASK;
  178. struct io_buffer *iob;
  179. efx_rx_desc_t *rxd;
  180. assert(rxq->buf[buf_id] == NULL);
  181. iob = alloc_iob(EFX_RX_BUF_SIZE);
  182. if (!iob)
  183. break;
  184. DBGCP(efx, "pushing rx_buf[%d] iob %p data %p\n",
  185. buf_id, iob, iob->data);
  186. rxq->buf[buf_id] = iob;
  187. rxd = rxq->ring + desc_id;
  188. efx_hunt_build_rx_desc(rxd, iob);
  189. ++rxq->write_ptr;
  190. ++pushed;
  191. --space;
  192. }
  193. /* Push the ptr to hardware */
  194. if (pushed > 0) {
  195. efx_hunt_notify_rx_desc(efx);
  196. DBGCP(efx, "pushed %d rx buffers to fill level %d\n",
  197. pushed, rxq->write_ptr - rxq->read_ptr);
  198. }
  199. }
  200. static void
  201. efx_hunt_receive(struct efx_nic *efx, unsigned int id, int len, int drop)
  202. {
  203. struct efx_rx_queue *rxq = &efx->rxq;
  204. unsigned int read_ptr = rxq->read_ptr & EFX_RXD_MASK;
  205. unsigned int buf_ptr = rxq->read_ptr & EFX_NUM_RX_DESC_MASK;
  206. struct io_buffer *iob;
  207. /* id is the lower 4 bits of the desc index + 1 in huntington*/
  208. /* hence anding with 15 */
  209. assert((id & 15) == ((read_ptr + (len != 0)) & 15));
  210. /* Pop this rx buffer out of the software ring */
  211. iob = rxq->buf[buf_ptr];
  212. rxq->buf[buf_ptr] = NULL;
  213. DBGCIO(efx, "popping rx_buf[%d] iob %p data %p with %d bytes %s %x\n",
  214. read_ptr, iob, iob->data, len, drop ? "bad" : "ok", drop);
  215. /* Pass the packet up if required */
  216. if (drop)
  217. netdev_rx_err(efx->netdev, iob, EBADMSG);
  218. else {
  219. iob_put(iob, len);
  220. iob_pull(iob, efx->rx_prefix_size);
  221. netdev_rx(efx->netdev, iob);
  222. }
  223. ++rxq->read_ptr;
  224. }
  225. int efx_hunt_rx_init(struct net_device *netdev, dma_addr_t *dma_addr)
  226. {
  227. struct efx_nic *efx = netdev_priv(netdev);
  228. struct efx_rx_queue *rxq = &efx->rxq;
  229. size_t bytes;
  230. /* Allocate hardware receive queue */
  231. bytes = sizeof(efx_rx_desc_t) * EFX_RXD_SIZE;
  232. rxq->ring = efx_hunt_alloc_special_buffer(bytes, &rxq->entry);
  233. if (rxq->ring == NULL)
  234. return -ENOMEM;
  235. rxq->read_ptr = rxq->write_ptr = 0;
  236. *dma_addr = rxq->entry.dma_addr;
  237. return 0;
  238. }
  239. /*******************************************************************************
  240. *
  241. *
  242. * Event queues and interrupts
  243. *
  244. *
  245. ******************************************************************************/
  246. int efx_hunt_ev_init(struct net_device *netdev, dma_addr_t *dma_addr)
  247. {
  248. struct efx_nic *efx = netdev_priv(netdev);
  249. struct efx_ev_queue *evq = &efx->evq;
  250. size_t bytes;
  251. /* Allocate the hardware event queue */
  252. bytes = sizeof(efx_event_t) * EFX_EVQ_SIZE;
  253. evq->ring = efx_hunt_alloc_special_buffer(bytes, &evq->entry);
  254. if (evq->ring == NULL)
  255. return -ENOMEM;
  256. memset(evq->ring, 0xff, bytes);
  257. evq->read_ptr = 0;
  258. *dma_addr = evq->entry.dma_addr;
  259. return 0;
  260. }
  261. static void
  262. efx_hunt_clear_interrupts(struct efx_nic *efx)
  263. {
  264. efx_dword_t reg;
  265. /* read the ISR */
  266. efx_readl(efx, &reg, ER_DZ_BIU_INT_ISR);
  267. }
  268. /**
  269. * See if an event is present
  270. *
  271. * @v event EFX event structure
  272. * @ret True An event is pending
  273. * @ret False No event is pending
  274. *
  275. * We check both the high and low dword of the event for all ones. We
  276. * wrote all ones when we cleared the event, and no valid event can
  277. * have all ones in either its high or low dwords. This approach is
  278. * robust against reordering.
  279. *
  280. * Note that using a single 64-bit comparison is incorrect; even
  281. * though the CPU read will be atomic, the DMA write may not be.
  282. */
  283. static inline int
  284. efx_hunt_event_present(efx_event_t *event)
  285. {
  286. return (!(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
  287. EFX_DWORD_IS_ALL_ONES(event->dword[1])));
  288. }
  289. static void
  290. efx_hunt_evq_read_ack(struct efx_nic *efx)
  291. {
  292. struct efx_ev_queue *evq = &efx->evq;
  293. efx_dword_t reg;
  294. if (efx->workaround_35388) {
  295. EFX_POPULATE_DWORD_2(reg, ERF_DD_EVQ_IND_RPTR_FLAGS,
  296. EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
  297. ERF_DD_EVQ_IND_RPTR,
  298. evq->read_ptr >> ERF_DD_EVQ_IND_RPTR_WIDTH);
  299. efx_writel_page(efx, &reg, 0, ER_DD_EVQ_INDIRECT);
  300. EFX_POPULATE_DWORD_2(reg, ERF_DD_EVQ_IND_RPTR_FLAGS,
  301. EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
  302. ERF_DD_EVQ_IND_RPTR, evq->read_ptr &
  303. ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
  304. efx_writel_page(efx, &reg, 0, ER_DD_EVQ_INDIRECT);
  305. } else {
  306. EFX_POPULATE_DWORD_1(reg, ERF_DZ_EVQ_RPTR, evq->read_ptr);
  307. efx_writel_table(efx, &reg, 0, ER_DZ_EVQ_RPTR);
  308. }
  309. }
  310. static unsigned int
  311. efx_hunt_handle_event(struct efx_nic *efx, efx_event_t *evt)
  312. {
  313. struct efx_rx_queue *rxq = &efx->rxq;
  314. int ev_code, desc_ptr, len;
  315. int next_ptr_lbits, packet_drop;
  316. int rx_cont;
  317. /* Decode event */
  318. ev_code = EFX_QWORD_FIELD(*evt, ESF_DZ_EV_CODE);
  319. switch (ev_code) {
  320. case ESE_DZ_EV_CODE_TX_EV:
  321. desc_ptr = EFX_QWORD_FIELD(*evt, ESF_DZ_TX_DESCR_INDX);
  322. efx_hunt_transmit_done(efx, desc_ptr);
  323. break;
  324. case ESE_DZ_EV_CODE_RX_EV:
  325. len = EFX_QWORD_FIELD(*evt, ESF_DZ_RX_BYTES);
  326. next_ptr_lbits = EFX_QWORD_FIELD(*evt, ESF_DZ_RX_DSC_PTR_LBITS);
  327. rx_cont = EFX_QWORD_FIELD(*evt, ESF_DZ_RX_CONT);
  328. /* We don't expect to receive scattered packets, so drop the
  329. * packet if RX_CONT is set on the current or previous event, or
  330. * if len is zero.
  331. */
  332. packet_drop = (len == 0) | (rx_cont << 1) |
  333. (rxq->rx_cont_prev << 2);
  334. efx_hunt_receive(efx, next_ptr_lbits, len, packet_drop);
  335. rxq->rx_cont_prev = rx_cont;
  336. return 1;
  337. default:
  338. DBGCP(efx, "Unknown event type %d\n", ev_code);
  339. break;
  340. }
  341. return 0;
  342. }
  343. void efx_hunt_poll(struct net_device *netdev)
  344. {
  345. struct efx_nic *efx = netdev_priv(netdev);
  346. struct efx_ev_queue *evq = &efx->evq;
  347. efx_event_t *evt;
  348. int budget = 10;
  349. /* Read the event queue by directly looking for events
  350. * (we don't even bother to read the eventq write ptr)
  351. */
  352. evt = evq->ring + evq->read_ptr;
  353. while (efx_hunt_event_present(evt) && (budget > 0)) {
  354. DBGCP(efx, "Event at index 0x%x address %p is "
  355. EFX_QWORD_FMT "\n", evq->read_ptr,
  356. evt, EFX_QWORD_VAL(*evt));
  357. budget -= efx_hunt_handle_event(efx, evt);
  358. /* Clear the event */
  359. EFX_SET_QWORD(*evt);
  360. /* Move to the next event. We don't ack the event
  361. * queue until the end
  362. */
  363. evq->read_ptr = ((evq->read_ptr + 1) & EFX_EVQ_MASK);
  364. evt = evq->ring + evq->read_ptr;
  365. }
  366. /* Push more rx buffers if needed */
  367. efx_hunt_rxq_fill(efx);
  368. /* Clear any pending interrupts */
  369. efx_hunt_clear_interrupts(efx);
  370. /* Ack the event queue if interrupts are enabled */
  371. if (efx->int_en)
  372. efx_hunt_evq_read_ack(efx);
  373. }
  374. void efx_hunt_irq(struct net_device *netdev, int enable)
  375. {
  376. struct efx_nic *efx = netdev_priv(netdev);
  377. efx->int_en = enable;
  378. /* If interrupts are enabled, prime the event queue. Otherwise ack any
  379. * pending interrupts
  380. */
  381. if (enable)
  382. efx_hunt_evq_read_ack(efx);
  383. else if (efx->netdev->state & NETDEV_OPEN)
  384. efx_hunt_clear_interrupts(efx);
  385. }
  386. /*******************************************************************************
  387. *
  388. *
  389. * Initialization and Close
  390. *
  391. *
  392. ******************************************************************************/
  393. int efx_hunt_open(struct net_device *netdev)
  394. {
  395. struct efx_nic *efx = netdev_priv(netdev);
  396. efx_dword_t cmd;
  397. /* Set interrupt moderation to 0*/
  398. EFX_POPULATE_DWORD_2(cmd,
  399. ERF_DZ_TC_TIMER_MODE, 0,
  400. ERF_DZ_TC_TIMER_VAL, 0);
  401. efx_writel_page(efx, &cmd, 0, ER_DZ_EVQ_TMR);
  402. /* Ack the eventq */
  403. if (efx->int_en)
  404. efx_hunt_evq_read_ack(efx);
  405. /* Push receive buffers */
  406. efx_hunt_rxq_fill(efx);
  407. return 0;
  408. }
  409. void efx_hunt_close(struct net_device *netdev)
  410. {
  411. struct efx_nic *efx = netdev_priv(netdev);
  412. struct efx_rx_queue *rxq = &efx->rxq;
  413. struct efx_tx_queue *txq = &efx->txq;
  414. int i;
  415. /* Complete outstanding descriptors */
  416. for (i = 0; i < EFX_NUM_RX_DESC; i++) {
  417. if (rxq->buf[i]) {
  418. free_iob(rxq->buf[i]);
  419. rxq->buf[i] = NULL;
  420. }
  421. }
  422. for (i = 0; i < EFX_TXD_SIZE; i++) {
  423. if (txq->buf[i]) {
  424. netdev_tx_complete(efx->netdev, txq->buf[i]);
  425. txq->buf[i] = NULL;
  426. }
  427. }
  428. /* Clear interrupts */
  429. efx_hunt_clear_interrupts(efx);
  430. }