You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

tg3.c 26KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945
  1. FILE_LICENCE ( GPL2_ONLY );
  2. #include <mii.h>
  3. #include <stdio.h>
  4. #include <errno.h>
  5. #include <unistd.h>
  6. #include <byteswap.h>
  7. #include <ipxe/pci.h>
  8. #include <ipxe/iobuf.h>
  9. #include <ipxe/timer.h>
  10. #include <ipxe/malloc.h>
  11. #include <ipxe/if_ether.h>
  12. #include <ipxe/ethernet.h>
  13. #include <ipxe/netdevice.h>
  14. #include "tg3.h"
  15. #define TG3_DEF_RX_MODE 0
  16. #define TG3_DEF_TX_MODE 0
  17. static void tg3_refill_prod_ring(struct tg3 *tp);
  18. /* Do not place this n-ring entries value into the tp struct itself,
  19. * we really want to expose these constants to GCC so that modulo et
  20. * al. operations are done with shifts and masks instead of with
  21. * hw multiply/modulo instructions. Another solution would be to
  22. * replace things like '% foo' with '& (foo - 1)'.
  23. */
  24. #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
  25. TG3_TX_RING_SIZE)
  26. /* FIXME: does TG3_RX_RET_MAX_SIZE_5705 work for all cards? */
  27. #define TG3_RX_RCB_RING_BYTES(tp) \
  28. (sizeof(struct tg3_rx_buffer_desc) * (TG3_RX_RET_MAX_SIZE_5705))
  29. #define TG3_RX_STD_RING_BYTES(tp) \
  30. (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_MAX_SIZE_5700)
  31. void tg3_rx_prodring_fini(struct tg3_rx_prodring_set *tpr)
  32. { DBGP("%s\n", __func__);
  33. if (tpr->rx_std) {
  34. free_dma(tpr->rx_std, TG3_RX_STD_RING_BYTES(tp));
  35. tpr->rx_std = NULL;
  36. }
  37. }
  38. /*
  39. * Must not be invoked with interrupt sources disabled and
  40. * the hardware shutdown down.
  41. */
  42. static void tg3_free_consistent(struct tg3 *tp)
  43. { DBGP("%s\n", __func__);
  44. if (tp->tx_ring) {
  45. free_dma(tp->tx_ring, TG3_TX_RING_BYTES);
  46. tp->tx_ring = NULL;
  47. }
  48. free(tp->tx_buffers);
  49. tp->tx_buffers = NULL;
  50. if (tp->rx_rcb) {
  51. free_dma(tp->rx_rcb, TG3_RX_RCB_RING_BYTES(tp));
  52. tp->rx_rcb_mapping = 0;
  53. tp->rx_rcb = NULL;
  54. }
  55. tg3_rx_prodring_fini(&tp->prodring);
  56. if (tp->hw_status) {
  57. free_dma(tp->hw_status, TG3_HW_STATUS_SIZE);
  58. tp->status_mapping = 0;
  59. tp->hw_status = NULL;
  60. }
  61. }
  62. /*
  63. * Must not be invoked with interrupt sources disabled and
  64. * the hardware shutdown down. Can sleep.
  65. */
  66. int tg3_alloc_consistent(struct tg3 *tp)
  67. { DBGP("%s\n", __func__);
  68. struct tg3_hw_status *sblk;
  69. struct tg3_rx_prodring_set *tpr = &tp->prodring;
  70. tp->hw_status = malloc_dma(TG3_HW_STATUS_SIZE, TG3_DMA_ALIGNMENT);
  71. if (!tp->hw_status) {
  72. DBGC(tp->dev, "hw_status alloc failed\n");
  73. goto err_out;
  74. }
  75. tp->status_mapping = virt_to_bus(tp->hw_status);
  76. memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
  77. sblk = tp->hw_status;
  78. tpr->rx_std = malloc_dma(TG3_RX_STD_RING_BYTES(tp), TG3_DMA_ALIGNMENT);
  79. if (!tpr->rx_std) {
  80. DBGC(tp->dev, "rx prodring alloc failed\n");
  81. goto err_out;
  82. }
  83. tpr->rx_std_mapping = virt_to_bus(tpr->rx_std);
  84. memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
  85. tp->tx_buffers = zalloc(sizeof(struct ring_info) * TG3_TX_RING_SIZE);
  86. if (!tp->tx_buffers)
  87. goto err_out;
  88. tp->tx_ring = malloc_dma(TG3_TX_RING_BYTES, TG3_DMA_ALIGNMENT);
  89. if (!tp->tx_ring)
  90. goto err_out;
  91. tp->tx_desc_mapping = virt_to_bus(tp->tx_ring);
  92. /*
  93. * When RSS is enabled, the status block format changes
  94. * slightly. The "rx_jumbo_consumer", "reserved",
  95. * and "rx_mini_consumer" members get mapped to the
  96. * other three rx return ring producer indexes.
  97. */
  98. tp->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
  99. tp->rx_rcb = malloc_dma(TG3_RX_RCB_RING_BYTES(tp), TG3_DMA_ALIGNMENT);
  100. if (!tp->rx_rcb)
  101. goto err_out;
  102. tp->rx_rcb_mapping = virt_to_bus(tp->rx_rcb);
  103. memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
  104. return 0;
  105. err_out:
  106. tg3_free_consistent(tp);
  107. return -ENOMEM;
  108. }
  109. #define TG3_RX_STD_BUFF_RING_BYTES(tp) \
  110. (sizeof(struct ring_info) * TG3_RX_STD_MAX_SIZE_5700)
  111. #define TG3_RX_STD_RING_BYTES(tp) \
  112. (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_MAX_SIZE_5700)
  113. /* Initialize rx rings for packet processing.
  114. *
  115. * The chip has been shut down and the driver detached from
  116. * the networking, so no interrupts or new tx packets will
  117. * end up in the driver.
  118. */
  119. static int tg3_rx_prodring_alloc(struct tg3 __unused *tp,
  120. struct tg3_rx_prodring_set *tpr)
  121. { DBGP("%s\n", __func__);
  122. u32 i;
  123. tpr->rx_std_cons_idx = 0;
  124. tpr->rx_std_prod_idx = 0;
  125. /* Initialize invariants of the rings, we only set this
  126. * stuff once. This works because the card does not
  127. * write into the rx buffer posting rings.
  128. */
  129. /* FIXME: does TG3_RX_STD_MAX_SIZE_5700 work on all cards? */
  130. for (i = 0; i < TG3_RX_STD_MAX_SIZE_5700; i++) {
  131. struct tg3_rx_buffer_desc *rxd;
  132. rxd = &tpr->rx_std[i];
  133. rxd->idx_len = (TG3_RX_STD_DMA_SZ - 64 - 2) << RXD_LEN_SHIFT;
  134. rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
  135. rxd->opaque = (RXD_OPAQUE_RING_STD |
  136. (i << RXD_OPAQUE_INDEX_SHIFT));
  137. }
  138. return 0;
  139. }
  140. static void tg3_rx_iob_free(struct io_buffer *iobs[], int i)
  141. { DBGP("%s\n", __func__);
  142. if (iobs[i] == NULL)
  143. return;
  144. free_iob(iobs[i]);
  145. iobs[i] = NULL;
  146. }
  147. static void tg3_rx_prodring_free(struct tg3_rx_prodring_set *tpr)
  148. { DBGP("%s\n", __func__);
  149. unsigned int i;
  150. for (i = 0; i < TG3_DEF_RX_RING_PENDING; i++)
  151. tg3_rx_iob_free(tpr->rx_iobufs, i);
  152. }
  153. /* Initialize tx/rx rings for packet processing.
  154. *
  155. * The chip has been shut down and the driver detached from
  156. * the networking, so no interrupts or new tx packets will
  157. * end up in the driver.
  158. */
  159. int tg3_init_rings(struct tg3 *tp)
  160. { DBGP("%s\n", __func__);
  161. /* Free up all the SKBs. */
  162. /// tg3_free_rings(tp);
  163. tp->last_tag = 0;
  164. tp->last_irq_tag = 0;
  165. tp->hw_status->status = 0;
  166. tp->hw_status->status_tag = 0;
  167. memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
  168. tp->tx_prod = 0;
  169. tp->tx_cons = 0;
  170. if (tp->tx_ring)
  171. memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
  172. tp->rx_rcb_ptr = 0;
  173. if (tp->rx_rcb)
  174. memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
  175. if (tg3_rx_prodring_alloc(tp, &tp->prodring)) {
  176. DBGC(tp->dev, "tg3_rx_prodring_alloc() failed\n");
  177. tg3_rx_prodring_free(&tp->prodring);
  178. return -ENOMEM;
  179. }
  180. return 0;
  181. }
  182. static int tg3_open(struct net_device *dev)
  183. { DBGP("%s\n", __func__);
  184. struct tg3 *tp = netdev_priv(dev);
  185. struct tg3_rx_prodring_set *tpr = &tp->prodring;
  186. int err = 0;
  187. tg3_set_power_state_0(tp);
  188. /* Initialize MAC address and backoff seed. */
  189. __tg3_set_mac_addr(tp, 0);
  190. err = tg3_alloc_consistent(tp);
  191. if (err)
  192. return err;
  193. tpr->rx_std_iob_cnt = 0;
  194. err = tg3_init_hw(tp, 1);
  195. if (err != 0)
  196. DBGC(tp->dev, "tg3_init_hw failed: %s\n", strerror(err));
  197. else
  198. tg3_refill_prod_ring(tp);
  199. return err;
  200. }
  201. static inline u32 tg3_tx_avail(struct tg3 *tp)
  202. { DBGP("%s\n", __func__);
  203. /* Tell compiler to fetch tx indices from memory. */
  204. barrier();
  205. return TG3_DEF_TX_RING_PENDING -
  206. ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1));
  207. }
  208. #if 0
  209. /**
  210. *
  211. * Prints all registers that could cause a set ERR bit in hw_status->status
  212. */
  213. static void tg3_dump_err_reg(struct tg3 *tp)
  214. { DBGP("%s\n", __func__);
  215. printf("FLOW_ATTN: %#08x\n", tr32(HOSTCC_FLOW_ATTN));
  216. printf("MAC ATTN: %#08x\n", tr32(MAC_STATUS));
  217. printf("MSI STATUS: %#08x\n", tr32(MSGINT_STATUS));
  218. printf("DMA RD: %#08x\n", tr32(RDMAC_STATUS));
  219. printf("DMA WR: %#08x\n", tr32(WDMAC_STATUS));
  220. printf("TX CPU STATE: %#08x\n", tr32(TX_CPU_STATE));
  221. printf("RX CPU STATE: %#08x\n", tr32(RX_CPU_STATE));
  222. }
  223. static void __unused tw32_mailbox2(struct tg3 *tp, uint32_t reg, uint32_t val)
  224. { DBGP("%s\n", __func__);
  225. tw32_mailbox(reg, val);
  226. tr32(reg);
  227. }
  228. #endif
  229. #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
  230. /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
  231. * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
  232. */
  233. static int tg3_transmit(struct net_device *dev, struct io_buffer *iob)
  234. { DBGP("%s\n", __func__);
  235. struct tg3 *tp = netdev_priv(dev);
  236. u32 len, entry;
  237. dma_addr_t mapping;
  238. if (tg3_tx_avail(tp) < 1) {
  239. DBGC(dev, "Transmit ring full\n");
  240. return -ENOBUFS;
  241. }
  242. entry = tp->tx_prod;
  243. iob_pad(iob, ETH_ZLEN);
  244. mapping = virt_to_bus(iob->data);
  245. len = iob_len(iob);
  246. tp->tx_buffers[entry].iob = iob;
  247. tg3_set_txd(tp, entry, mapping, len, TXD_FLAG_END);
  248. entry = NEXT_TX(entry);
  249. /* Packets are ready, update Tx producer idx local and on card. */
  250. tw32_tx_mbox(tp->prodmbox, entry);
  251. tp->tx_prod = entry;
  252. mb();
  253. return 0;
  254. }
  255. static void tg3_tx_complete(struct net_device *dev)
  256. { DBGP("%s\n", __func__);
  257. struct tg3 *tp = netdev_priv(dev);
  258. u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
  259. u32 sw_idx = tp->tx_cons;
  260. while (sw_idx != hw_idx) {
  261. struct io_buffer *iob = tp->tx_buffers[sw_idx].iob;
  262. DBGC2(dev, "Transmitted packet: %zd bytes\n", iob_len(iob));
  263. netdev_tx_complete(dev, iob);
  264. sw_idx = NEXT_TX(sw_idx);
  265. }
  266. tp->tx_cons = sw_idx;
  267. }
  268. #define TG3_RX_STD_BUFF_RING_BYTES(tp) \
  269. (sizeof(struct ring_info) * TG3_RX_STD_MAX_SIZE_5700)
  270. #define TG3_RX_STD_RING_BYTES(tp) \
  271. (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_MAX_SIZE_5700)
  272. /* Returns 0 or < 0 on error.
  273. *
  274. * We only need to fill in the address because the other members
  275. * of the RX descriptor are invariant, see tg3_init_rings.
  276. *
  277. * Note the purposeful assymetry of cpu vs. chip accesses. For
  278. * posting buffers we only dirty the first cache line of the RX
  279. * descriptor (containing the address). Whereas for the RX status
  280. * buffers the cpu only reads the last cacheline of the RX descriptor
  281. * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
  282. */
  283. static int tg3_alloc_rx_iob(struct tg3_rx_prodring_set *tpr, u32 dest_idx_unmasked)
  284. { DBGP("%s\n", __func__);
  285. struct tg3_rx_buffer_desc *desc;
  286. struct io_buffer *iob;
  287. dma_addr_t mapping;
  288. int dest_idx, iob_idx;
  289. dest_idx = dest_idx_unmasked & (TG3_RX_STD_MAX_SIZE_5700 - 1);
  290. desc = &tpr->rx_std[dest_idx];
  291. /* Do not overwrite any of the map or rp information
  292. * until we are sure we can commit to a new buffer.
  293. *
  294. * Callers depend upon this behavior and assume that
  295. * we leave everything unchanged if we fail.
  296. */
  297. iob = alloc_iob(TG3_RX_STD_DMA_SZ);
  298. if (iob == NULL)
  299. return -ENOMEM;
  300. iob_idx = dest_idx % TG3_DEF_RX_RING_PENDING;
  301. tpr->rx_iobufs[iob_idx] = iob;
  302. mapping = virt_to_bus(iob->data);
  303. desc->addr_hi = ((u64)mapping >> 32);
  304. desc->addr_lo = ((u64)mapping & 0xffffffff);
  305. return 0;
  306. }
  307. static void tg3_refill_prod_ring(struct tg3 *tp)
  308. { DBGP("%s\n", __func__);
  309. struct tg3_rx_prodring_set *tpr = &tp->prodring;
  310. int idx = tpr->rx_std_prod_idx;
  311. DBGCP(tp->dev, "%s\n", __func__);
  312. while (tpr->rx_std_iob_cnt < TG3_DEF_RX_RING_PENDING) {
  313. if (tpr->rx_iobufs[idx % TG3_DEF_RX_RING_PENDING] == NULL) {
  314. if (tg3_alloc_rx_iob(tpr, idx) < 0) {
  315. DBGC(tp->dev, "alloc_iob() failed for descriptor %d\n", idx);
  316. break;
  317. }
  318. DBGC2(tp->dev, "allocated iob_buffer for descriptor %d\n", idx);
  319. }
  320. idx = (idx + 1) % TG3_RX_STD_MAX_SIZE_5700;
  321. tpr->rx_std_iob_cnt++;
  322. }
  323. if ((u32)idx != tpr->rx_std_prod_idx) {
  324. tpr->rx_std_prod_idx = idx;
  325. tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, idx);
  326. }
  327. }
  328. static void tg3_rx_complete(struct net_device *dev)
  329. { DBGP("%s\n", __func__);
  330. struct tg3 *tp = netdev_priv(dev);
  331. u32 sw_idx = tp->rx_rcb_ptr;
  332. u16 hw_idx;
  333. struct tg3_rx_prodring_set *tpr = &tp->prodring;
  334. hw_idx = *(tp->rx_rcb_prod_idx);
  335. while (sw_idx != hw_idx) {
  336. struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
  337. u32 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
  338. int iob_idx = desc_idx % TG3_DEF_RX_RING_PENDING;
  339. struct io_buffer *iob = tpr->rx_iobufs[iob_idx];
  340. unsigned int len;
  341. DBGC2(dev, "RX - desc_idx: %d sw_idx: %d hw_idx: %d\n", desc_idx, sw_idx, hw_idx);
  342. assert(iob != NULL);
  343. if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
  344. (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
  345. /* drop packet */
  346. DBGC(dev, "Corrupted packet received\n");
  347. netdev_rx_err(dev, iob, -EINVAL);
  348. } else {
  349. len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
  350. ETH_FCS_LEN;
  351. iob_put(iob, len);
  352. netdev_rx(dev, iob);
  353. DBGC2(dev, "Received packet: %d bytes %d %d\n", len, sw_idx, hw_idx);
  354. }
  355. sw_idx++;
  356. sw_idx &= TG3_RX_RET_MAX_SIZE_5705 - 1;
  357. tpr->rx_iobufs[iob_idx] = NULL;
  358. tpr->rx_std_iob_cnt--;
  359. }
  360. if (tp->rx_rcb_ptr != sw_idx) {
  361. tw32_rx_mbox(tp->consmbox, sw_idx);
  362. tp->rx_rcb_ptr = sw_idx;
  363. }
  364. tg3_refill_prod_ring(tp);
  365. }
  366. static void tg3_poll(struct net_device *dev)
  367. { DBGP("%s\n", __func__);
  368. struct tg3 *tp = netdev_priv(dev);
  369. /* ACK interrupts */
  370. /*
  371. *tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00);
  372. */
  373. tp->hw_status->status &= ~SD_STATUS_UPDATED;
  374. tg3_poll_link(tp);
  375. tg3_tx_complete(dev);
  376. tg3_rx_complete(dev);
  377. }
  378. static void tg3_close(struct net_device *dev)
  379. { DBGP("%s\n", __func__);
  380. struct tg3 *tp = netdev_priv(dev);
  381. DBGP("%s\n", __func__);
  382. tg3_halt(tp);
  383. tg3_rx_prodring_free(&tp->prodring);
  384. tg3_flag_clear(tp, INIT_COMPLETE);
  385. tg3_free_consistent(tp);
  386. }
  387. static void tg3_irq(struct net_device *dev, int enable)
  388. { DBGP("%s\n", __func__);
  389. struct tg3 *tp = netdev_priv(dev);
  390. DBGP("%s: %d\n", __func__, enable);
  391. if (enable)
  392. tg3_enable_ints(tp);
  393. else
  394. tg3_disable_ints(tp);
  395. }
  396. static struct net_device_operations tg3_netdev_ops = {
  397. .open = tg3_open,
  398. .close = tg3_close,
  399. .poll = tg3_poll,
  400. .transmit = tg3_transmit,
  401. .irq = tg3_irq,
  402. };
  403. #define TEST_BUFFER_SIZE 0x2000
  404. int tg3_do_test_dma(struct tg3 *tp, u32 __unused *buf, dma_addr_t buf_dma, int size, int to_device);
  405. void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val);
  406. static int tg3_test_dma(struct tg3 *tp)
  407. { DBGP("%s\n", __func__);
  408. dma_addr_t buf_dma;
  409. u32 *buf;
  410. int ret = 0;
  411. buf = malloc_dma(TEST_BUFFER_SIZE, TG3_DMA_ALIGNMENT);
  412. if (!buf) {
  413. ret = -ENOMEM;
  414. goto out_nofree;
  415. }
  416. buf_dma = virt_to_bus(buf);
  417. DBGC2(tp->dev, "dma test buffer, virt: %p phys: %#08x\n", buf, buf_dma);
  418. if (tg3_flag(tp, 57765_PLUS)) {
  419. tp->dma_rwctrl = DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
  420. goto out;
  421. }
  422. tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
  423. (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
  424. if (tg3_flag(tp, PCI_EXPRESS)) {
  425. /* DMA read watermark not used on PCIE */
  426. tp->dma_rwctrl |= 0x00180000;
  427. } else if (!tg3_flag(tp, PCIX_MODE)) {
  428. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
  429. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
  430. tp->dma_rwctrl |= 0x003f0000;
  431. else
  432. tp->dma_rwctrl |= 0x003f000f;
  433. } else {
  434. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
  435. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
  436. u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
  437. u32 read_water = 0x7;
  438. if (ccval == 0x6 || ccval == 0x7)
  439. tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
  440. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
  441. read_water = 4;
  442. /* Set bit 23 to enable PCIX hw bug fix */
  443. tp->dma_rwctrl |=
  444. (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
  445. (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
  446. (1 << 23);
  447. } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
  448. /* 5780 always in PCIX mode */
  449. tp->dma_rwctrl |= 0x00144000;
  450. } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
  451. /* 5714 always in PCIX mode */
  452. tp->dma_rwctrl |= 0x00148000;
  453. } else {
  454. tp->dma_rwctrl |= 0x001b000f;
  455. }
  456. }
  457. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
  458. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
  459. tp->dma_rwctrl &= 0xfffffff0;
  460. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
  461. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
  462. /* Remove this if it causes problems for some boards. */
  463. tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
  464. /* On 5700/5701 chips, we need to set this bit.
  465. * Otherwise the chip will issue cacheline transactions
  466. * to streamable DMA memory with not all the byte
  467. * enables turned on. This is an error on several
  468. * RISC PCI controllers, in particular sparc64.
  469. *
  470. * On 5703/5704 chips, this bit has been reassigned
  471. * a different meaning. In particular, it is used
  472. * on those chips to enable a PCI-X workaround.
  473. */
  474. tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
  475. }
  476. tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
  477. #if 0
  478. /* Unneeded, already done by tg3_get_invariants. */
  479. tg3_switch_clocks(tp);
  480. #endif
  481. if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
  482. GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
  483. goto out;
  484. /* It is best to perform DMA test with maximum write burst size
  485. * to expose the 5700/5701 write DMA bug.
  486. */
  487. tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
  488. tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
  489. while (1) {
  490. u32 *p = buf, i;
  491. for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
  492. p[i] = i;
  493. /* Send the buffer to the chip. */
  494. ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
  495. if (ret) {
  496. DBGC(&tp->pdev->dev,
  497. "%s: Buffer write failed. err = %d\n",
  498. __func__, ret);
  499. break;
  500. }
  501. /* validate data reached card RAM correctly. */
  502. for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
  503. u32 val;
  504. tg3_read_mem(tp, 0x2100 + (i*4), &val);
  505. if (le32_to_cpu(val) != p[i]) {
  506. DBGC(&tp->pdev->dev,
  507. "%s: Buffer corrupted on device! "
  508. "(%d != %d)\n", __func__, val, i);
  509. /* ret = -ENODEV here? */
  510. }
  511. p[i] = 0;
  512. }
  513. /* Now read it back. */
  514. ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
  515. if (ret) {
  516. DBGC(&tp->pdev->dev, "%s: Buffer read failed. "
  517. "err = %d\n", __func__, ret);
  518. break;
  519. }
  520. /* Verify it. */
  521. for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
  522. if (p[i] == i)
  523. continue;
  524. if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
  525. DMA_RWCTRL_WRITE_BNDRY_16) {
  526. tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
  527. tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
  528. tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
  529. break;
  530. } else {
  531. DBGC(&tp->pdev->dev,
  532. "%s: Buffer corrupted on read back! "
  533. "(%d != %d)\n", __func__, p[i], i);
  534. ret = -ENODEV;
  535. goto out;
  536. }
  537. }
  538. if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
  539. /* Success. */
  540. ret = 0;
  541. break;
  542. }
  543. }
  544. if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
  545. DMA_RWCTRL_WRITE_BNDRY_16) {
  546. /* DMA test passed without adjusting DMA boundary,
  547. * now look for chipsets that are known to expose the
  548. * DMA bug without failing the test.
  549. */
  550. tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
  551. tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
  552. tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
  553. }
  554. out:
  555. free_dma(buf, TEST_BUFFER_SIZE);
  556. out_nofree:
  557. return ret;
  558. }
  559. static int tg3_init_one(struct pci_device *pdev)
  560. { DBGP("%s\n", __func__);
  561. struct net_device *dev;
  562. struct tg3 *tp;
  563. int err = 0;
  564. unsigned long reg_base, reg_size;
  565. adjust_pci_device(pdev);
  566. dev = alloc_etherdev(sizeof(*tp));
  567. if (!dev) {
  568. DBGC(&pdev->dev, "Failed to allocate etherdev\n");
  569. err = -ENOMEM;
  570. goto err_out_disable_pdev;
  571. }
  572. netdev_init(dev, &tg3_netdev_ops);
  573. pci_set_drvdata(pdev, dev);
  574. dev->dev = &pdev->dev;
  575. tp = netdev_priv(dev);
  576. tp->pdev = pdev;
  577. tp->dev = dev;
  578. tp->rx_mode = TG3_DEF_RX_MODE;
  579. tp->tx_mode = TG3_DEF_TX_MODE;
  580. /* Subsystem IDs are required later */
  581. pci_read_config_word(tp->pdev, PCI_SUBSYSTEM_VENDOR_ID, &tp->subsystem_vendor);
  582. pci_read_config_word(tp->pdev, PCI_SUBSYSTEM_ID, &tp->subsystem_device);
  583. /* The word/byte swap controls here control register access byte
  584. * swapping. DMA data byte swapping is controlled in the GRC_MODE
  585. * setting below.
  586. */
  587. tp->misc_host_ctrl =
  588. MISC_HOST_CTRL_MASK_PCI_INT |
  589. MISC_HOST_CTRL_WORD_SWAP |
  590. MISC_HOST_CTRL_INDIR_ACCESS |
  591. MISC_HOST_CTRL_PCISTATE_RW;
  592. /* The NONFRM (non-frame) byte/word swap controls take effect
  593. * on descriptor entries, anything which isn't packet data.
  594. *
  595. * The StrongARM chips on the board (one for tx, one for rx)
  596. * are running in big-endian mode.
  597. */
  598. tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
  599. GRC_MODE_WSWAP_NONFRM_DATA);
  600. #if __BYTE_ORDER == __BIG_ENDIAN
  601. tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
  602. #endif
  603. /* FIXME: how can we detect errors here? */
  604. reg_base = pci_bar_start(pdev, PCI_BASE_ADDRESS_0);
  605. reg_size = pci_bar_size(pdev, PCI_BASE_ADDRESS_0);
  606. tp->regs = ioremap(reg_base, reg_size);
  607. if (!tp->regs) {
  608. DBGC(&pdev->dev, "Failed to remap device registers\n");
  609. errno = -ENOENT;
  610. goto err_out_disable_pdev;
  611. }
  612. err = tg3_get_invariants(tp);
  613. if (err) {
  614. DBGC(&pdev->dev, "Problem fetching invariants of chip, aborting\n");
  615. goto err_out_iounmap;
  616. }
  617. tg3_init_bufmgr_config(tp);
  618. err = tg3_get_device_address(tp);
  619. if (err) {
  620. DBGC(&pdev->dev, "Could not obtain valid ethernet address, aborting\n");
  621. goto err_out_iounmap;
  622. }
  623. /*
  624. * Reset chip in case UNDI or EFI driver did not shutdown
  625. * DMA self test will enable WDMAC and we'll see (spurious)
  626. * pending DMA on the PCI bus at that point.
  627. */
  628. if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
  629. (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
  630. tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
  631. tg3_halt(tp);
  632. }
  633. err = tg3_test_dma(tp);
  634. if (err) {
  635. DBGC(&pdev->dev, "DMA engine test failed, aborting\n");
  636. goto err_out_iounmap;
  637. }
  638. tp->int_mbox = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
  639. tp->consmbox = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
  640. tp->prodmbox = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
  641. tp->coal_now = HOSTCC_MODE_NOW;
  642. err = register_netdev(dev);
  643. if (err) {
  644. DBGC(&pdev->dev, "Cannot register net device, aborting\n");
  645. goto err_out_iounmap;
  646. }
  647. /* Call tg3_setup_phy() to start autoneg process, which saves time
  648. * over starting autoneg in tg3_open();
  649. */
  650. err = tg3_setup_phy(tp, 0);
  651. if (err) {
  652. DBGC(tp->dev, "tg3_setup_phy() call failed in %s\n", __func__);
  653. goto err_out_iounmap;
  654. }
  655. return 0;
  656. err_out_iounmap:
  657. if (tp->regs) {
  658. iounmap(tp->regs);
  659. tp->regs = NULL;
  660. }
  661. netdev_put(dev);
  662. err_out_disable_pdev:
  663. pci_set_drvdata(pdev, NULL);
  664. return err;
  665. }
  666. static void tg3_remove_one(struct pci_device *pci)
  667. { DBGP("%s\n", __func__);
  668. struct net_device *netdev = pci_get_drvdata(pci);
  669. unregister_netdev(netdev);
  670. netdev_nullify(netdev);
  671. netdev_put(netdev);
  672. }
  673. static struct pci_device_id tg3_nics[] = {
  674. PCI_ROM(0x14e4, 0x1644, "14e4-1644", "14e4-1644", 0),
  675. PCI_ROM(0x14e4, 0x1645, "14e4-1645", "14e4-1645", 0),
  676. PCI_ROM(0x14e4, 0x1646, "14e4-1646", "14e4-1646", 0),
  677. PCI_ROM(0x14e4, 0x1647, "14e4-1647", "14e4-1647", 0),
  678. PCI_ROM(0x14e4, 0x1648, "14e4-1648", "14e4-1648", 0),
  679. PCI_ROM(0x14e4, 0x164d, "14e4-164d", "14e4-164d", 0),
  680. PCI_ROM(0x14e4, 0x1653, "14e4-1653", "14e4-1653", 0),
  681. PCI_ROM(0x14e4, 0x1654, "14e4-1654", "14e4-1654", 0),
  682. PCI_ROM(0x14e4, 0x165d, "14e4-165d", "14e4-165d", 0),
  683. PCI_ROM(0x14e4, 0x165e, "14e4-165e", "14e4-165e", 0),
  684. PCI_ROM(0x14e4, 0x16a6, "14e4-16a6", "14e4-16a6", 0),
  685. PCI_ROM(0x14e4, 0x16a7, "14e4-16a7", "14e4-16a7", 0),
  686. PCI_ROM(0x14e4, 0x16a8, "14e4-16a8", "14e4-16a8", 0),
  687. PCI_ROM(0x14e4, 0x16c6, "14e4-16c6", "14e4-16c6", 0),
  688. PCI_ROM(0x14e4, 0x16c7, "14e4-16c7", "14e4-16c7", 0),
  689. PCI_ROM(0x14e4, 0x1696, "14e4-1696", "14e4-1696", 0),
  690. PCI_ROM(0x14e4, 0x169c, "14e4-169c", "14e4-169c", 0),
  691. PCI_ROM(0x14e4, 0x169d, "14e4-169d", "14e4-169d", 0),
  692. PCI_ROM(0x14e4, 0x170d, "14e4-170d", "14e4-170d", 0),
  693. PCI_ROM(0x14e4, 0x170e, "14e4-170e", "14e4-170e", 0),
  694. PCI_ROM(0x14e4, 0x1649, "14e4-1649", "14e4-1649", 0),
  695. PCI_ROM(0x14e4, 0x166e, "14e4-166e", "14e4-166e", 0),
  696. PCI_ROM(0x14e4, 0x1659, "14e4-1659", "14e4-1659", 0),
  697. PCI_ROM(0x14e4, 0x165a, "14e4-165a", "14e4-165a", 0),
  698. PCI_ROM(0x14e4, 0x1677, "14e4-1677", "14e4-1677", 0),
  699. PCI_ROM(0x14e4, 0x167d, "14e4-167d", "14e4-167d", 0),
  700. PCI_ROM(0x14e4, 0x167e, "14e4-167e", "14e4-167e", 0),
  701. PCI_ROM(0x14e4, 0x1600, "14e4-1600", "14e4-1600", 0),
  702. PCI_ROM(0x14e4, 0x1601, "14e4-1601", "14e4-1601", 0),
  703. PCI_ROM(0x14e4, 0x16f7, "14e4-16f7", "14e4-16f7", 0),
  704. PCI_ROM(0x14e4, 0x16fd, "14e4-16fd", "14e4-16fd", 0),
  705. PCI_ROM(0x14e4, 0x16fe, "14e4-16fe", "14e4-16fe", 0),
  706. PCI_ROM(0x14e4, 0x167a, "14e4-167a", "14e4-167a", 0),
  707. PCI_ROM(0x14e4, 0x1672, "14e4-1672", "14e4-1672", 0),
  708. PCI_ROM(0x14e4, 0x167b, "14e4-167b", "14e4-167b", 0),
  709. PCI_ROM(0x14e4, 0x1673, "14e4-1673", "14e4-1673", 0),
  710. PCI_ROM(0x14e4, 0x1674, "14e4-1674", "14e4-1674", 0),
  711. PCI_ROM(0x14e4, 0x169a, "14e4-169a", "14e4-169a", 0),
  712. PCI_ROM(0x14e4, 0x169b, "14e4-169b", "14e4-169b", 0),
  713. PCI_ROM(0x14e4, 0x1693, "14e4-1693", "14e4-1693", 0),
  714. PCI_ROM(0x14e4, 0x167f, "14e4-167f", "14e4-167f", 0),
  715. PCI_ROM(0x14e4, 0x1668, "14e4-1668", "14e4-1668", 0),
  716. PCI_ROM(0x14e4, 0x1669, "14e4-1669", "14e4-1669", 0),
  717. PCI_ROM(0x14e4, 0x1678, "14e4-1678", "14e4-1678", 0),
  718. PCI_ROM(0x14e4, 0x1679, "14e4-1679", "14e4-1679", 0),
  719. PCI_ROM(0x14e4, 0x166a, "14e4-166a", "14e4-166a", 0),
  720. PCI_ROM(0x14e4, 0x166b, "14e4-166b", "14e4-166b", 0),
  721. PCI_ROM(0x14e4, 0x16dd, "14e4-16dd", "14e4-16dd", 0),
  722. PCI_ROM(0x14e4, 0x1712, "14e4-1712", "14e4-1712", 0),
  723. PCI_ROM(0x14e4, 0x1713, "14e4-1713", "14e4-1713", 0),
  724. PCI_ROM(0x14e4, 0x1698, "14e4-1698", "14e4-1698", 0),
  725. PCI_ROM(0x14e4, 0x1684, "14e4-1684", "14e4-1684", 0),
  726. PCI_ROM(0x14e4, 0x165b, "14e4-165b", "14e4-165b", 0),
  727. PCI_ROM(0x14e4, 0x1681, "14e4-1681", "14e4-1681", 0),
  728. PCI_ROM(0x14e4, 0x1682, "14e4-1682", "14e4-1682", 0),
  729. PCI_ROM(0x14e4, 0x1680, "14e4-1680", "14e4-1680", 0),
  730. PCI_ROM(0x14e4, 0x1688, "14e4-1688", "14e4-1688", 0),
  731. PCI_ROM(0x14e4, 0x1689, "14e4-1689", "14e4-1689", 0),
  732. PCI_ROM(0x14e4, 0x1699, "14e4-1699", "14e4-1699", 0),
  733. PCI_ROM(0x14e4, 0x16a0, "14e4-16a0", "14e4-16a0", 0),
  734. PCI_ROM(0x14e4, 0x1692, "14e4-1692", "14e4-1692", 0),
  735. PCI_ROM(0x14e4, 0x1690, "14e4-1690", "14e4-1690", 0),
  736. PCI_ROM(0x14e4, 0x1694, "14e4-1694", "14e4-1694", 0),
  737. PCI_ROM(0x14e4, 0x1691, "14e4-1691", "14e4-1691", 0),
  738. PCI_ROM(0x14e4, 0x1655, "14e4-1655", "14e4-1655", 0),
  739. PCI_ROM(0x14e4, 0x1656, "14e4-1656", "14e4-1656", 0),
  740. PCI_ROM(0x14e4, 0x16b1, "14e4-16b1", "14e4-16b1", 0),
  741. PCI_ROM(0x14e4, 0x16b5, "14e4-16b5", "14e4-16b5", 0),
  742. PCI_ROM(0x14e4, 0x16b0, "14e4-16b0", "14e4-16b0", 0),
  743. PCI_ROM(0x14e4, 0x16b4, "14e4-16b4", "14e4-16b4", 0),
  744. PCI_ROM(0x14e4, 0x16b2, "14e4-16b2", "14e4-16b2", 0),
  745. PCI_ROM(0x14e4, 0x16b6, "14e4-16b6", "14e4-16b6", 0),
  746. PCI_ROM(0x14e4, 0x1657, "14e4-1657", "14e4-1657", 0),
  747. PCI_ROM(0x14e4, 0x165f, "14e4-165f", "14e4-165f", 0),
  748. PCI_ROM(0x1148, 0x4400, "1148-4400", "1148-4400", 0),
  749. PCI_ROM(0x1148, 0x4500, "1148-4500", "1148-4500", 0),
  750. PCI_ROM(0x173b, 0x03e8, "173b-03e8", "173b-03e8", 0),
  751. PCI_ROM(0x173b, 0x03e9, "173b-03e9", "173b-03e9", 0),
  752. PCI_ROM(0x173b, 0x03eb, "173b-03eb", "173b-03eb", 0),
  753. PCI_ROM(0x173b, 0x03ea, "173b-03ea", "173b-03ea", 0),
  754. PCI_ROM(0x106b, 0x1645, "106b-1645", "106b-1645", 0),
  755. };
  756. struct pci_driver tg3_pci_driver __pci_driver = {
  757. .ids = tg3_nics,
  758. .id_count = ARRAY_SIZE(tg3_nics),
  759. .probe = tg3_init_one,
  760. .remove = tg3_remove_one,
  761. };