You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

jme.c 27KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309
  1. /*
  2. * JMicron JMC2x0 series PCIe Ethernet gPXE Device Driver
  3. *
  4. * Copyright 2010 Guo-Fu Tseng <cooldavid@cooldavid.org>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  18. * 02110-1301, USA.
  19. *
  20. */
  21. FILE_LICENCE ( GPL2_OR_LATER );
  22. #include <stdint.h>
  23. #include <stdlib.h>
  24. #include <stdio.h>
  25. #include <string.h>
  26. #include <ipxe/io.h>
  27. #include <errno.h>
  28. #include <unistd.h>
  29. #include <byteswap.h>
  30. #include <ipxe/pci.h>
  31. #include <ipxe/if_ether.h>
  32. #include <ipxe/ethernet.h>
  33. #include <ipxe/iobuf.h>
  34. #include <ipxe/netdevice.h>
  35. #include <ipxe/malloc.h>
  36. #include <mii.h>
  37. #include "jme.h"
  38. static int
  39. jme_mdio_read(struct net_device *netdev, int phy, int reg)
  40. {
  41. struct jme_adapter *jme = netdev->priv;
  42. int i, val, again = (reg == MII_BMSR) ? 1 : 0;
  43. read_again:
  44. jwrite32(jme, JME_SMI, SMI_OP_REQ |
  45. smi_phy_addr(phy) |
  46. smi_reg_addr(reg));
  47. for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
  48. udelay(20);
  49. val = jread32(jme, JME_SMI);
  50. if ((val & SMI_OP_REQ) == 0)
  51. break;
  52. }
  53. if (i == 0) {
  54. DBG("phy(%d) read timeout : %d\n", phy, reg);
  55. return 0;
  56. }
  57. if (again--)
  58. goto read_again;
  59. return (val & SMI_DATA_MASK) >> SMI_DATA_SHIFT;
  60. }
  61. static void
  62. jme_mdio_write(struct net_device *netdev,
  63. int phy, int reg, int val)
  64. {
  65. struct jme_adapter *jme = netdev->priv;
  66. int i;
  67. jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ |
  68. ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
  69. smi_phy_addr(phy) | smi_reg_addr(reg));
  70. wmb();
  71. for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
  72. udelay(20);
  73. if ((jread32(jme, JME_SMI) & SMI_OP_REQ) == 0)
  74. break;
  75. }
  76. if (i == 0)
  77. DBG("phy(%d) write timeout : %d\n", phy, reg);
  78. return;
  79. }
  80. static void
  81. jme_reset_phy_processor(struct jme_adapter *jme)
  82. {
  83. u32 val;
  84. jme_mdio_write(jme->mii_if.dev,
  85. jme->mii_if.phy_id,
  86. MII_ADVERTISE, ADVERTISE_ALL |
  87. ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
  88. if (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250)
  89. jme_mdio_write(jme->mii_if.dev,
  90. jme->mii_if.phy_id,
  91. MII_CTRL1000,
  92. ADVERTISE_1000FULL | ADVERTISE_1000HALF);
  93. val = jme_mdio_read(jme->mii_if.dev,
  94. jme->mii_if.phy_id,
  95. MII_BMCR);
  96. jme_mdio_write(jme->mii_if.dev,
  97. jme->mii_if.phy_id,
  98. MII_BMCR, val | BMCR_RESET);
  99. return;
  100. }
  101. static void
  102. jme_phy_init(struct jme_adapter *jme)
  103. {
  104. u16 reg26;
  105. reg26 = jme_mdio_read(jme->mii_if.dev, jme->mii_if.phy_id, 26);
  106. jme_mdio_write(jme->mii_if.dev, jme->mii_if.phy_id, 26, reg26 | 0x1000);
  107. }
  108. static void
  109. jme_set_phyfifoa(struct jme_adapter *jme)
  110. {
  111. jme_mdio_write(jme->mii_if.dev, jme->mii_if.phy_id, 27, 0x0004);
  112. }
  113. static void
  114. jme_set_phyfifob(struct jme_adapter *jme)
  115. {
  116. jme_mdio_write(jme->mii_if.dev, jme->mii_if.phy_id, 27, 0x0000);
  117. }
  118. static void
  119. jme_phy_off(struct jme_adapter *jme)
  120. {
  121. jme_mdio_write(jme->mii_if.dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN);
  122. }
  123. static void
  124. jme_restart_an(struct jme_adapter *jme)
  125. {
  126. uint32_t bmcr;
  127. bmcr = jme_mdio_read(jme->mii_if.dev, jme->mii_if.phy_id, MII_BMCR);
  128. bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
  129. jme_mdio_write(jme->mii_if.dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
  130. }
  131. static void
  132. jme_reset_ghc_speed(struct jme_adapter *jme)
  133. {
  134. jme->reg_ghc &= ~(GHC_SPEED_1000M | GHC_DPX);
  135. jwrite32(jme, JME_GHC, jme->reg_ghc);
  136. }
  137. static void
  138. jme_start_irq(struct jme_adapter *jme)
  139. {
  140. /*
  141. * Enable Interrupts
  142. */
  143. jwrite32(jme, JME_IENS, INTR_ENABLE);
  144. }
  145. static void
  146. jme_stop_irq(struct jme_adapter *jme)
  147. {
  148. /*
  149. * Disable Interrupts
  150. */
  151. jwrite32f(jme, JME_IENC, INTR_ENABLE);
  152. }
  153. static void
  154. jme_setup_wakeup_frame(struct jme_adapter *jme,
  155. u32 *mask, u32 crc, int fnr)
  156. {
  157. int i;
  158. /*
  159. * Setup CRC pattern
  160. */
  161. jwrite32(jme, JME_WFOI, WFOI_CRC_SEL | (fnr & WFOI_FRAME_SEL));
  162. wmb();
  163. jwrite32(jme, JME_WFODP, crc);
  164. wmb();
  165. /*
  166. * Setup Mask
  167. */
  168. for (i = 0 ; i < WAKEUP_FRAME_MASK_DWNR ; ++i) {
  169. jwrite32(jme, JME_WFOI,
  170. ((i << WFOI_MASK_SHIFT) & WFOI_MASK_SEL) |
  171. (fnr & WFOI_FRAME_SEL));
  172. wmb();
  173. jwrite32(jme, JME_WFODP, mask[i]);
  174. wmb();
  175. }
  176. }
  177. static void
  178. jme_reset_mac_processor(struct jme_adapter *jme)
  179. {
  180. u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0};
  181. u32 crc = 0xCDCDCDCD;
  182. int i;
  183. jwrite32(jme, JME_GHC, jme->reg_ghc | GHC_SWRST);
  184. udelay(2);
  185. jwrite32(jme, JME_GHC, jme->reg_ghc);
  186. jwrite32(jme, JME_RXDBA_LO, 0x00000000);
  187. jwrite32(jme, JME_RXDBA_HI, 0x00000000);
  188. jwrite32(jme, JME_RXQDC, 0x00000000);
  189. jwrite32(jme, JME_RXNDA, 0x00000000);
  190. jwrite32(jme, JME_TXDBA_LO, 0x00000000);
  191. jwrite32(jme, JME_TXDBA_HI, 0x00000000);
  192. jwrite32(jme, JME_TXQDC, 0x00000000);
  193. jwrite32(jme, JME_TXNDA, 0x00000000);
  194. jwrite32(jme, JME_RXMCHT_LO, 0x00000000);
  195. jwrite32(jme, JME_RXMCHT_HI, 0x00000000);
  196. for (i = 0 ; i < WAKEUP_FRAME_NR ; ++i)
  197. jme_setup_wakeup_frame(jme, mask, crc, i);
  198. jwrite32(jme, JME_GPREG0, GPREG0_DEFAULT);
  199. jwrite32(jme, JME_GPREG1, GPREG1_DEFAULT);
  200. }
  201. static void
  202. jme_free_tx_buffers(struct jme_adapter *jme)
  203. {
  204. struct jme_ring *txring = &jme->txring;
  205. struct io_buffer *txbi;
  206. unsigned int i;
  207. for (i = 0; i < jme->tx_ring_size; ++i) {
  208. txbi = txring->bufinf[i];
  209. if (txbi) {
  210. netdev_tx_complete_err(jme->mii_if.dev,
  211. txbi, -ENOLINK);
  212. txring->bufinf[i] = NULL;
  213. }
  214. }
  215. }
  216. static void
  217. jme_free_tx_resources(struct jme_adapter *jme)
  218. {
  219. struct jme_ring *txring = &jme->txring;
  220. if (txring->desc) {
  221. if (txring->bufinf) {
  222. memset(txring->bufinf, 0,
  223. sizeof(struct io_buffer *) * jme->tx_ring_size);
  224. free(txring->bufinf);
  225. }
  226. free_dma(txring->desc, jme->tx_ring_size * TX_DESC_SIZE);
  227. txring->desc = NULL;
  228. txring->dma = 0;
  229. txring->bufinf = NULL;
  230. }
  231. txring->next_to_use = 0;
  232. txring->next_to_clean = 0;
  233. txring->nr_free = 0;
  234. }
  235. static int
  236. jme_alloc_tx_resources(struct jme_adapter *jme)
  237. {
  238. struct jme_ring *txring = &jme->txring;
  239. txring->desc = malloc_dma(jme->tx_ring_size * TX_DESC_SIZE,
  240. RING_DESC_ALIGN);
  241. if (!txring->desc) {
  242. DBG("Can not allocate transmit ring descriptors.\n");
  243. goto err_out;
  244. }
  245. /*
  246. * 16 Bytes align
  247. */
  248. txring->dma = virt_to_bus(txring->desc);
  249. txring->bufinf = malloc(sizeof(struct io_buffer *) *
  250. jme->tx_ring_size);
  251. if (!(txring->bufinf)) {
  252. DBG("Can not allocate transmit buffer info.\n");
  253. goto err_out;
  254. }
  255. /*
  256. * Initialize Transmit Buffer Pointers
  257. */
  258. memset(txring->bufinf, 0,
  259. sizeof(struct io_buffer *) * jme->tx_ring_size);
  260. return 0;
  261. err_out:
  262. jme_free_tx_resources(jme);
  263. return -ENOMEM;
  264. }
  265. static void
  266. jme_init_tx_ring(struct jme_adapter *jme)
  267. {
  268. struct jme_ring *txring = &jme->txring;
  269. txring->next_to_clean = 0;
  270. txring->next_to_use = 0;
  271. txring->nr_free = jme->tx_ring_size;
  272. /*
  273. * Initialize Transmit Descriptors
  274. */
  275. memset(txring->desc, 0, jme->tx_ring_size * TX_DESC_SIZE);
  276. jme_free_tx_buffers(jme);
  277. }
  278. static void
  279. jme_enable_tx_engine(struct jme_adapter *jme)
  280. {
  281. /*
  282. * Select Queue 0
  283. */
  284. jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0);
  285. wmb();
  286. /*
  287. * Setup TX Queue 0 DMA Bass Address
  288. */
  289. jwrite32(jme, JME_TXDBA_LO, (uint64_t)jme->txring.dma & 0xFFFFFFFFUL);
  290. jwrite32(jme, JME_TXDBA_HI, (uint64_t)(jme->txring.dma) >> 32);
  291. jwrite32(jme, JME_TXNDA, (uint64_t)jme->txring.dma & 0xFFFFFFFFUL);
  292. /*
  293. * Setup TX Descptor Count
  294. */
  295. jwrite32(jme, JME_TXQDC, jme->tx_ring_size);
  296. /*
  297. * Enable TX Engine
  298. */
  299. wmb();
  300. jwrite32(jme, JME_TXCS, jme->reg_txcs |
  301. TXCS_SELECT_QUEUE0 |
  302. TXCS_ENABLE);
  303. }
  304. static void
  305. jme_disable_tx_engine(struct jme_adapter *jme)
  306. {
  307. int i;
  308. u32 val;
  309. /*
  310. * Disable TX Engine
  311. */
  312. jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0);
  313. wmb();
  314. val = jread32(jme, JME_TXCS);
  315. for (i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i) {
  316. mdelay(1);
  317. val = jread32(jme, JME_TXCS);
  318. rmb();
  319. }
  320. if (!i)
  321. DBG("Disable TX engine timeout.\n");
  322. }
  323. static void
  324. jme_set_clean_rxdesc(struct jme_adapter *jme, int i)
  325. {
  326. struct jme_ring *rxring = &jme->rxring;
  327. register struct rxdesc *rxdesc = rxring->desc;
  328. struct io_buffer *rxbi = rxring->bufinf[i];
  329. uint64_t mapping;
  330. rxdesc += i;
  331. mapping = virt_to_bus(rxbi->data);
  332. rxdesc->dw[0] = 0;
  333. rxdesc->dw[1] = 0;
  334. rxdesc->desc1.bufaddrh = cpu_to_le32(mapping >> 32);
  335. rxdesc->desc1.bufaddrl = cpu_to_le32(mapping & 0xFFFFFFFFUL);
  336. rxdesc->desc1.datalen = cpu_to_le16(RX_ALLOC_LEN);
  337. wmb();
  338. rxdesc->desc1.flags |= RXFLAG_OWN | RXFLAG_INT;
  339. }
  340. static int
  341. jme_make_new_rx_buf(struct io_buffer **rxbip)
  342. {
  343. struct io_buffer *inbuf;
  344. /*
  345. * IOB_ALIGN == 2048
  346. */
  347. inbuf = alloc_iob(RX_ALLOC_LEN);
  348. if (!inbuf) {
  349. DBG("Allocate receive iob error.\n");
  350. return -ENOMEM;
  351. }
  352. *rxbip = inbuf;
  353. return 0;
  354. }
  355. static void
  356. jme_free_rx_buf(struct jme_adapter *jme, int i)
  357. {
  358. struct jme_ring *rxring = &jme->rxring;
  359. struct io_buffer *rxbi = rxring->bufinf[i];
  360. if (rxbi) {
  361. free_iob(rxbi);
  362. rxring->bufinf[i] = NULL;
  363. }
  364. }
  365. static void
  366. jme_free_rx_resources(struct jme_adapter *jme)
  367. {
  368. unsigned int i;
  369. struct jme_ring *rxring = &jme->rxring;
  370. if (rxring->desc) {
  371. if (rxring->bufinf) {
  372. for (i = 0 ; i < jme->rx_ring_size ; ++i)
  373. jme_free_rx_buf(jme, i);
  374. free(rxring->bufinf);
  375. }
  376. free_dma(rxring->desc, jme->rx_ring_size * RX_DESC_SIZE);
  377. rxring->desc = NULL;
  378. rxring->dma = 0;
  379. rxring->bufinf = NULL;
  380. }
  381. rxring->next_to_fill = 0;
  382. rxring->next_to_clean = 0;
  383. }
  384. static int
  385. jme_alloc_rx_resources(struct jme_adapter *jme)
  386. {
  387. unsigned int i;
  388. struct jme_ring *rxring = &jme->rxring;
  389. struct io_buffer **bufinf;
  390. rxring->desc = malloc_dma(jme->rx_ring_size * RX_DESC_SIZE,
  391. RING_DESC_ALIGN);
  392. if (!rxring->desc) {
  393. DBG("Can not allocate receive ring descriptors.\n");
  394. goto err_out;
  395. }
  396. /*
  397. * 16 Bytes align
  398. */
  399. rxring->dma = virt_to_bus(rxring->desc);
  400. rxring->bufinf = malloc(sizeof(struct io_buffer *) *
  401. jme->rx_ring_size);
  402. if (!(rxring->bufinf)) {
  403. DBG("Can not allocate receive buffer info.\n");
  404. goto err_out;
  405. }
  406. /*
  407. * Initiallize Receive Buffer Pointers
  408. */
  409. bufinf = rxring->bufinf;
  410. memset(bufinf, 0, sizeof(struct io_buffer *) * jme->rx_ring_size);
  411. for (i = 0 ; i < jme->rx_ring_size ; ++i) {
  412. if (jme_make_new_rx_buf(bufinf))
  413. goto err_out;
  414. ++bufinf;
  415. }
  416. return 0;
  417. err_out:
  418. jme_free_rx_resources(jme);
  419. return -ENOMEM;
  420. }
  421. static void
  422. jme_init_rx_ring(struct jme_adapter *jme)
  423. {
  424. unsigned int i;
  425. struct jme_ring *rxring = &jme->rxring;
  426. for (i = 0 ; i < jme->rx_ring_size ; ++i)
  427. jme_set_clean_rxdesc(jme, i);
  428. rxring->next_to_fill = 0;
  429. rxring->next_to_clean = 0;
  430. }
  431. static void
  432. jme_set_multi(struct jme_adapter *jme)
  433. {
  434. /*
  435. * Just receive all kind of packet for new.
  436. */
  437. jme->reg_rxmcs |= RXMCS_ALLFRAME | RXMCS_BRDFRAME | RXMCS_UNIFRAME;
  438. jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
  439. }
  440. static void
  441. jme_enable_rx_engine(struct jme_adapter *jme)
  442. {
  443. /*
  444. * Select Queue 0
  445. */
  446. jwrite32(jme, JME_RXCS, jme->reg_rxcs |
  447. RXCS_QUEUESEL_Q0);
  448. wmb();
  449. /*
  450. * Setup RX DMA Bass Address
  451. */
  452. jwrite32(jme, JME_RXDBA_LO, (uint64_t)(jme->rxring.dma) & 0xFFFFFFFFUL);
  453. jwrite32(jme, JME_RXDBA_HI, (uint64_t)(jme->rxring.dma) >> 32);
  454. jwrite32(jme, JME_RXNDA, (uint64_t)(jme->rxring.dma) & 0xFFFFFFFFUL);
  455. /*
  456. * Setup RX Descriptor Count
  457. */
  458. jwrite32(jme, JME_RXQDC, jme->rx_ring_size);
  459. /*
  460. * Setup Unicast Filter
  461. */
  462. jme_set_multi(jme);
  463. /*
  464. * Enable RX Engine
  465. */
  466. wmb();
  467. jwrite32(jme, JME_RXCS, jme->reg_rxcs |
  468. RXCS_QUEUESEL_Q0 |
  469. RXCS_ENABLE |
  470. RXCS_QST);
  471. }
  472. static void
  473. jme_restart_rx_engine(struct jme_adapter *jme)
  474. {
  475. /*
  476. * Start RX Engine
  477. */
  478. jwrite32(jme, JME_RXCS, jme->reg_rxcs |
  479. RXCS_QUEUESEL_Q0 |
  480. RXCS_ENABLE |
  481. RXCS_QST);
  482. }
  483. static void
  484. jme_disable_rx_engine(struct jme_adapter *jme)
  485. {
  486. int i;
  487. u32 val;
  488. /*
  489. * Disable RX Engine
  490. */
  491. jwrite32(jme, JME_RXCS, jme->reg_rxcs);
  492. wmb();
  493. val = jread32(jme, JME_RXCS);
  494. for (i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i) {
  495. mdelay(1);
  496. val = jread32(jme, JME_RXCS);
  497. rmb();
  498. }
  499. if (!i)
  500. DBG("Disable RX engine timeout.\n");
  501. }
  502. static void
  503. jme_refill_rx_ring(struct jme_adapter *jme, int curhole)
  504. {
  505. struct jme_ring *rxring = &jme->rxring;
  506. int i = rxring->next_to_fill;
  507. struct io_buffer **bufinf = rxring->bufinf;
  508. int mask = jme->rx_ring_mask;
  509. int limit = jme->rx_ring_size;
  510. while (limit--) {
  511. if (!bufinf[i]) {
  512. if (jme_make_new_rx_buf(bufinf + i))
  513. break;
  514. jme_set_clean_rxdesc(jme, i);
  515. }
  516. if (i == curhole)
  517. limit = 0;
  518. i = (i + 1) & mask;
  519. }
  520. rxring->next_to_fill = i;
  521. }
  522. static void
  523. jme_alloc_and_feed_iob(struct jme_adapter *jme, int idx)
  524. {
  525. struct jme_ring *rxring = &jme->rxring;
  526. struct rxdesc *rxdesc = rxring->desc;
  527. struct io_buffer *rxbi = rxring->bufinf[idx];
  528. struct net_device *netdev = jme->mii_if.dev;
  529. int framesize;
  530. rxdesc += idx;
  531. framesize = le16_to_cpu(rxdesc->descwb.framesize);
  532. iob_put(rxbi, framesize);
  533. netdev_rx(netdev, rxbi);
  534. rxring->bufinf[idx] = NULL;
  535. jme_refill_rx_ring(jme, idx);
  536. }
  537. static void
  538. jme_process_receive(struct jme_adapter *jme)
  539. {
  540. struct jme_ring *rxring = &jme->rxring;
  541. struct rxdesc *rxdesc = rxring->desc;
  542. struct net_device *netdev = jme->mii_if.dev;
  543. int i, j, ccnt, desccnt, mask = jme->rx_ring_mask;
  544. unsigned int limit = jme->rx_ring_size;
  545. i = rxring->next_to_clean;
  546. rxdesc += i;
  547. while (rxring->bufinf[i] &&
  548. !(rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_OWN)) &&
  549. (rxdesc->descwb.desccnt & RXWBDCNT_WBCPL) &&
  550. limit--) {
  551. rmb();
  552. desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT;
  553. DBG2("Cleaning rx desc=%d, cnt=%d\n", i, desccnt);
  554. if (desccnt > 1 || rxdesc->descwb.errstat & RXWBERR_ALLERR) {
  555. for (j = i, ccnt = desccnt ; ccnt-- ; ) {
  556. jme_set_clean_rxdesc(jme, j);
  557. j = (j + 1) & (mask);
  558. }
  559. DBG("Dropped packet due to ");
  560. if (desccnt > 1)
  561. DBG("long packet.(%d descriptors)\n", desccnt);
  562. else
  563. DBG("Packet error.\n");
  564. netdev_rx_err(netdev, NULL, -EINVAL);
  565. } else {
  566. jme_alloc_and_feed_iob(jme, i);
  567. }
  568. i = (i + desccnt) & (mask);
  569. rxdesc = rxring->desc;
  570. rxdesc += i;
  571. }
  572. rxring->next_to_clean = i;
  573. return;
  574. }
  575. static void
  576. jme_set_custom_macaddr(struct net_device *netdev)
  577. {
  578. struct jme_adapter *jme = netdev->priv;
  579. uint8_t *addr = netdev->ll_addr;
  580. u32 val;
  581. val = (addr[3] & 0xff) << 24 |
  582. (addr[2] & 0xff) << 16 |
  583. (addr[1] & 0xff) << 8 |
  584. (addr[0] & 0xff);
  585. jwrite32(jme, JME_RXUMA_LO, val);
  586. val = (addr[5] & 0xff) << 8 |
  587. (addr[4] & 0xff);
  588. jwrite32(jme, JME_RXUMA_HI, val);
  589. }
  590. /**
  591. * Open NIC
  592. *
  593. * @v netdev Net device
  594. * @ret rc Return status code
  595. */
  596. static int
  597. jme_open(struct net_device *netdev)
  598. {
  599. struct jme_adapter *jme = netdev->priv;
  600. int rc;
  601. /*
  602. * Allocate receive resources
  603. */
  604. rc = jme_alloc_rx_resources(jme);
  605. if (rc) {
  606. DBG("Allocate receive resources error.\n");
  607. goto nomem_out;
  608. }
  609. /*
  610. * Allocate transmit resources
  611. */
  612. rc = jme_alloc_tx_resources(jme);
  613. if (rc) {
  614. DBG("Allocate transmit resources error.\n");
  615. goto free_rx_resources_out;
  616. }
  617. jme_set_custom_macaddr(netdev);
  618. jme_reset_phy_processor(jme);
  619. jme_restart_an(jme);
  620. return 0;
  621. free_rx_resources_out:
  622. jme_free_rx_resources(jme);
  623. nomem_out:
  624. return rc;
  625. }
  626. /**
  627. * Close NIC
  628. *
  629. * @v netdev Net device
  630. */
  631. static void
  632. jme_close(struct net_device *netdev)
  633. {
  634. struct jme_adapter *jme = netdev->priv;
  635. jme_free_tx_resources(jme);
  636. jme_free_rx_resources(jme);
  637. jme_reset_mac_processor(jme);
  638. jme->phylink = 0;
  639. jme_phy_off(jme);
  640. netdev_link_down(netdev);
  641. }
  642. static int
  643. jme_alloc_txdesc(struct jme_adapter *jme)
  644. {
  645. struct jme_ring *txring = &jme->txring;
  646. int idx;
  647. idx = txring->next_to_use;
  648. if (txring->nr_free < 1)
  649. return -1;
  650. --(txring->nr_free);
  651. txring->next_to_use = (txring->next_to_use + 1) & jme->tx_ring_mask;
  652. return idx;
  653. }
  654. static void
  655. jme_fill_tx_desc(struct jme_adapter *jme, struct io_buffer *iob, int idx)
  656. {
  657. struct jme_ring *txring = &jme->txring;
  658. struct txdesc *txdesc = txring->desc;
  659. uint16_t len = iob_len(iob);
  660. unsigned long int mapping;
  661. txdesc += idx;
  662. mapping = virt_to_bus(iob->data);
  663. DBG2("TX buffer address: %p(%08lx+%x)\n",
  664. iob->data, mapping, len);
  665. txdesc->dw[0] = 0;
  666. txdesc->dw[1] = 0;
  667. txdesc->dw[2] = 0;
  668. txdesc->dw[3] = 0;
  669. txdesc->desc1.datalen = cpu_to_le16(len);
  670. txdesc->desc1.pktsize = cpu_to_le16(len);
  671. txdesc->desc1.bufaddr = cpu_to_le32(mapping);
  672. /*
  673. * Set OWN bit at final.
  674. * When kernel transmit faster than NIC.
  675. * And NIC trying to send this descriptor before we tell
  676. * it to start sending this TX queue.
  677. * Other fields are already filled correctly.
  678. */
  679. wmb();
  680. txdesc->desc1.flags = TXFLAG_OWN | TXFLAG_INT;
  681. /*
  682. * Set tx buffer info after telling NIC to send
  683. * For better tx_clean timing
  684. */
  685. wmb();
  686. txring->bufinf[idx] = iob;
  687. }
  688. /**
  689. * Transmit packet
  690. *
  691. * @v netdev Network device
  692. * @v iobuf I/O buffer
  693. * @ret rc Return status code
  694. */
  695. static int
  696. jme_transmit(struct net_device *netdev, struct io_buffer *iobuf)
  697. {
  698. struct jme_adapter *jme = netdev->priv;
  699. int idx;
  700. idx = jme_alloc_txdesc(jme);
  701. if (idx < 0) {
  702. /*
  703. * Pause transmit queue somehow if possible.
  704. */
  705. DBG("TX ring full!\n");
  706. return -EOVERFLOW;
  707. }
  708. jme_fill_tx_desc(jme, iobuf, idx);
  709. jwrite32(jme, JME_TXCS, jme->reg_txcs |
  710. TXCS_SELECT_QUEUE0 |
  711. TXCS_QUEUE0S |
  712. TXCS_ENABLE);
  713. DBG2("xmit: idx=%d\n", idx);
  714. return 0;
  715. }
  716. static int
  717. jme_check_link(struct net_device *netdev, int testonly)
  718. {
  719. struct jme_adapter *jme = netdev->priv;
  720. u32 phylink, ghc, cnt = JME_SPDRSV_TIMEOUT, gpreg1;
  721. int rc = 0;
  722. phylink = jread32(jme, JME_PHY_LINK);
  723. if (phylink & PHY_LINK_UP) {
  724. /*
  725. * Keep polling for speed/duplex resolve complete
  726. */
  727. while (!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) &&
  728. --cnt) {
  729. udelay(1);
  730. phylink = jread32(jme, JME_PHY_LINK);
  731. }
  732. if (!cnt)
  733. DBG("Waiting speed resolve timeout.\n");
  734. if (jme->phylink == phylink) {
  735. rc = 1;
  736. goto out;
  737. }
  738. if (testonly)
  739. goto out;
  740. jme->phylink = phylink;
  741. ghc = jme->reg_ghc & ~(GHC_SPEED | GHC_DPX |
  742. GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE |
  743. GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY);
  744. switch (phylink & PHY_LINK_SPEED_MASK) {
  745. case PHY_LINK_SPEED_10M:
  746. ghc |= GHC_SPEED_10M |
  747. GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
  748. break;
  749. case PHY_LINK_SPEED_100M:
  750. ghc |= GHC_SPEED_100M |
  751. GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
  752. break;
  753. case PHY_LINK_SPEED_1000M:
  754. ghc |= GHC_SPEED_1000M |
  755. GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY;
  756. break;
  757. default:
  758. break;
  759. }
  760. if (phylink & PHY_LINK_DUPLEX) {
  761. jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
  762. ghc |= GHC_DPX;
  763. } else {
  764. jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
  765. TXMCS_BACKOFF |
  766. TXMCS_CARRIERSENSE |
  767. TXMCS_COLLISION);
  768. jwrite32(jme, JME_TXTRHD, TXTRHD_TXPEN |
  769. ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) |
  770. TXTRHD_TXREN |
  771. ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL));
  772. }
  773. gpreg1 = GPREG1_DEFAULT;
  774. if (is_buggy250(jme->pdev->device, jme->chiprev)) {
  775. if (!(phylink & PHY_LINK_DUPLEX))
  776. gpreg1 |= GPREG1_HALFMODEPATCH;
  777. switch (phylink & PHY_LINK_SPEED_MASK) {
  778. case PHY_LINK_SPEED_10M:
  779. jme_set_phyfifoa(jme);
  780. gpreg1 |= GPREG1_RSSPATCH;
  781. break;
  782. case PHY_LINK_SPEED_100M:
  783. jme_set_phyfifob(jme);
  784. gpreg1 |= GPREG1_RSSPATCH;
  785. break;
  786. case PHY_LINK_SPEED_1000M:
  787. jme_set_phyfifoa(jme);
  788. break;
  789. default:
  790. break;
  791. }
  792. }
  793. jwrite32(jme, JME_GPREG1, gpreg1);
  794. jwrite32(jme, JME_GHC, ghc);
  795. jme->reg_ghc = ghc;
  796. DBG("Link is up at %d Mbps, %s-Duplex, MDI%s.\n",
  797. ((phylink & PHY_LINK_SPEED_MASK)
  798. == PHY_LINK_SPEED_1000M) ? 1000 :
  799. ((phylink & PHY_LINK_SPEED_MASK)
  800. == PHY_LINK_SPEED_100M) ? 100 : 10,
  801. (phylink & PHY_LINK_DUPLEX) ? "Full" : "Half",
  802. (phylink & PHY_LINK_MDI_STAT) ? "-X" : "");
  803. netdev_link_up(netdev);
  804. } else {
  805. if (testonly)
  806. goto out;
  807. DBG("Link is down.\n");
  808. jme->phylink = 0;
  809. netdev_link_down(netdev);
  810. }
  811. out:
  812. return rc;
  813. }
  814. static void
  815. jme_link_change(struct net_device *netdev)
  816. {
  817. struct jme_adapter *jme = netdev->priv;
  818. /*
  819. * Do nothing if the link status did not change.
  820. */
  821. if (jme_check_link(netdev, 1))
  822. return;
  823. if (netdev_link_ok(netdev)) {
  824. netdev_link_down(netdev);
  825. jme_disable_rx_engine(jme);
  826. jme_disable_tx_engine(jme);
  827. jme_reset_ghc_speed(jme);
  828. jme_reset_mac_processor(jme);
  829. }
  830. jme_check_link(netdev, 0);
  831. if (netdev_link_ok(netdev)) {
  832. jme_init_rx_ring(jme);
  833. jme_enable_rx_engine(jme);
  834. jme_init_tx_ring(jme);
  835. jme_enable_tx_engine(jme);
  836. }
  837. return;
  838. }
  839. static void
  840. jme_tx_clean(struct jme_adapter *jme)
  841. {
  842. struct jme_ring *txring = &jme->txring;
  843. struct txdesc *txdesc = txring->desc;
  844. struct io_buffer *txbi;
  845. struct net_device *netdev = jme->mii_if.dev;
  846. int i, cnt = 0, max, err, mask;
  847. max = jme->tx_ring_size - txring->nr_free;
  848. mask = jme->tx_ring_mask;
  849. for (i = txring->next_to_clean ; cnt < max ; ++cnt) {
  850. txbi = txring->bufinf[i];
  851. if (txbi && !(txdesc[i].descwb.flags & TXWBFLAG_OWN)) {
  852. DBG2("TX clean address: %08lx(%08lx+%zx)\n",
  853. (unsigned long)txbi->data,
  854. virt_to_bus(txbi->data),
  855. iob_len(txbi));
  856. err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR;
  857. if (err)
  858. netdev_tx_complete_err(netdev, txbi, -EIO);
  859. else
  860. netdev_tx_complete(netdev, txbi);
  861. txring->bufinf[i] = NULL;
  862. } else {
  863. break;
  864. }
  865. i = (i + 1) & mask;
  866. }
  867. DBG2("txclean: next %d\n", i);
  868. txring->next_to_clean = i;
  869. txring->nr_free += cnt;
  870. }
  871. /**
  872. * Poll for received packets
  873. *
  874. * @v netdev Network device
  875. */
  876. static void
  877. jme_poll(struct net_device *netdev)
  878. {
  879. struct jme_adapter *jme = netdev->priv;
  880. u32 intrstat;
  881. intrstat = jread32(jme, JME_IEVE);
  882. /*
  883. * Check if any actions needs to perform.
  884. */
  885. if ((intrstat & INTR_ENABLE) == 0)
  886. return;
  887. /*
  888. * Check if the device still exist
  889. */
  890. if (intrstat == ~((typeof(intrstat))0))
  891. return;
  892. DBG2("intrstat 0x%08x\n", intrstat);
  893. if (intrstat & (INTR_LINKCH | INTR_SWINTR)) {
  894. DBG2("Link changed\n");
  895. jme_link_change(netdev);
  896. /*
  897. * Clear all interrupt status
  898. */
  899. jwrite32(jme, JME_IEVE, intrstat);
  900. /*
  901. * Link change event is critical
  902. * all other events are ignored
  903. */
  904. return;
  905. }
  906. /*
  907. * Process transmission complete first to free more memory.
  908. */
  909. if (intrstat & INTR_TX0) {
  910. DBG2("Packet transmit complete\n");
  911. jme_tx_clean(jme);
  912. jwrite32(jme, JME_IEVE, intrstat & INTR_TX0);
  913. }
  914. if (intrstat & (INTR_RX0 | INTR_RX0EMP)) {
  915. DBG2("Packet received\n");
  916. jme_process_receive(jme);
  917. jwrite32(jme, JME_IEVE,
  918. intrstat & (INTR_RX0 | INTR_RX0EMP));
  919. if (intrstat & INTR_RX0EMP)
  920. jme_restart_rx_engine(jme);
  921. }
  922. /*
  923. * Clean all other interrupt status
  924. */
  925. jwrite32(jme, JME_IEVE,
  926. intrstat & ~(INTR_RX0 | INTR_RX0EMP | INTR_TX0));
  927. }
  928. /**
  929. * Enable/disable interrupts
  930. *
  931. * @v netdev Network device
  932. * @v enable Interrupts should be enabled
  933. */
  934. static void
  935. jme_irq(struct net_device *netdev, int enable)
  936. {
  937. struct jme_adapter *jme = netdev->priv;
  938. DBG("jme interrupts %s\n", (enable ? "enabled" : "disabled"));
  939. if (enable)
  940. jme_start_irq(jme);
  941. else
  942. jme_stop_irq(jme);
  943. }
  944. /** JME net device operations */
  945. static struct net_device_operations jme_operations = {
  946. .open = jme_open,
  947. .close = jme_close,
  948. .transmit = jme_transmit,
  949. .poll = jme_poll,
  950. .irq = jme_irq,
  951. };
  952. static void
  953. jme_check_hw_ver(struct jme_adapter *jme)
  954. {
  955. u32 chipmode;
  956. chipmode = jread32(jme, JME_CHIPMODE);
  957. jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT;
  958. jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT;
  959. }
  960. static int
  961. jme_reload_eeprom(struct jme_adapter *jme)
  962. {
  963. u32 val;
  964. int i;
  965. val = jread32(jme, JME_SMBCSR);
  966. if (val & SMBCSR_EEPROMD) {
  967. val |= SMBCSR_CNACK;
  968. jwrite32(jme, JME_SMBCSR, val);
  969. val |= SMBCSR_RELOAD;
  970. jwrite32(jme, JME_SMBCSR, val);
  971. mdelay(12);
  972. for (i = JME_EEPROM_RELOAD_TIMEOUT; i > 0; --i) {
  973. mdelay(1);
  974. if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0)
  975. break;
  976. }
  977. if (i == 0) {
  978. DBG("eeprom reload timeout\n");
  979. return -EIO;
  980. }
  981. }
  982. return 0;
  983. }
  984. static void
  985. jme_load_macaddr(struct net_device *netdev)
  986. {
  987. struct jme_adapter *jme = netdev_priv(netdev);
  988. unsigned char macaddr[6];
  989. u32 val;
  990. val = jread32(jme, JME_RXUMA_LO);
  991. macaddr[0] = (val >> 0) & 0xFF;
  992. macaddr[1] = (val >> 8) & 0xFF;
  993. macaddr[2] = (val >> 16) & 0xFF;
  994. macaddr[3] = (val >> 24) & 0xFF;
  995. val = jread32(jme, JME_RXUMA_HI);
  996. macaddr[4] = (val >> 0) & 0xFF;
  997. macaddr[5] = (val >> 8) & 0xFF;
  998. memcpy(netdev->hw_addr, macaddr, 6);
  999. }
  1000. /**
  1001. * Probe PCI device
  1002. *
  1003. * @v pci PCI device
  1004. * @v id PCI ID
  1005. * @ret rc Return status code
  1006. */
  1007. static int
  1008. jme_probe(struct pci_device *pci)
  1009. {
  1010. struct net_device *netdev;
  1011. struct jme_adapter *jme;
  1012. int rc;
  1013. uint8_t mrrs;
  1014. /* Allocate net device */
  1015. netdev = alloc_etherdev(sizeof(*jme));
  1016. if (!netdev)
  1017. return -ENOMEM;
  1018. netdev_init(netdev, &jme_operations);
  1019. jme = netdev->priv;
  1020. pci_set_drvdata(pci, netdev);
  1021. netdev->dev = &pci->dev;
  1022. jme->regs = ioremap(pci->membase, JME_REGS_SIZE);
  1023. if (!(jme->regs)) {
  1024. DBG("Mapping PCI resource region error.\n");
  1025. rc = -ENOMEM;
  1026. goto err_out;
  1027. }
  1028. jme->reg_ghc = 0;
  1029. jme->reg_rxcs = RXCS_DEFAULT;
  1030. jme->reg_rxmcs = RXMCS_DEFAULT;
  1031. jme->phylink = 0;
  1032. jme->pdev = pci;
  1033. jme->mii_if.dev = netdev;
  1034. jme->mii_if.phy_id = 1;
  1035. jme->mii_if.mdio_read = jme_mdio_read;
  1036. jme->mii_if.mdio_write = jme_mdio_write;
  1037. jme->rx_ring_size = 1 << 4;
  1038. jme->rx_ring_mask = jme->rx_ring_size - 1;
  1039. jme->tx_ring_size = 1 << 4;
  1040. jme->tx_ring_mask = jme->tx_ring_size - 1;
  1041. /* Fix up PCI device */
  1042. adjust_pci_device(pci);
  1043. /*
  1044. * Get Max Read Req Size from PCI Config Space
  1045. */
  1046. pci_read_config_byte(pci, PCI_DCSR_MRRS, &mrrs);
  1047. mrrs &= PCI_DCSR_MRRS_MASK;
  1048. switch (mrrs) {
  1049. case MRRS_128B:
  1050. jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B;
  1051. break;
  1052. case MRRS_256B:
  1053. jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B;
  1054. break;
  1055. default:
  1056. jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B;
  1057. break;
  1058. };
  1059. /*
  1060. * Get basic hardware info.
  1061. */
  1062. jme_check_hw_ver(jme);
  1063. if (pci->device == PCI_DEVICE_ID_JMICRON_JMC250)
  1064. jme->mii_if.supports_gmii = 1;
  1065. else
  1066. jme->mii_if.supports_gmii = 0;
  1067. /*
  1068. * Initialize PHY
  1069. */
  1070. jme_set_phyfifoa(jme);
  1071. jme_phy_init(jme);
  1072. /*
  1073. * Bring down phy before interface is opened.
  1074. */
  1075. jme_phy_off(jme);
  1076. /*
  1077. * Reset MAC processor and reload EEPROM for MAC Address
  1078. */
  1079. jme_reset_mac_processor(jme);
  1080. rc = jme_reload_eeprom(jme);
  1081. if (rc) {
  1082. DBG("Reload eeprom for reading MAC Address error.\n");
  1083. goto err_unmap;
  1084. }
  1085. jme_load_macaddr(netdev);
  1086. /* Register network device */
  1087. if ((rc = register_netdev(netdev)) != 0) {
  1088. DBG("Register net_device error.\n");
  1089. goto err_unmap;
  1090. }
  1091. return 0;
  1092. err_unmap:
  1093. iounmap(jme->regs);
  1094. err_out:
  1095. netdev_nullify(netdev);
  1096. netdev_put(netdev);
  1097. return rc;
  1098. }
  1099. /**
  1100. * Remove PCI device
  1101. *
  1102. * @v pci PCI device
  1103. */
  1104. static void
  1105. jme_remove(struct pci_device *pci)
  1106. {
  1107. struct net_device *netdev = pci_get_drvdata(pci);
  1108. struct jme_adapter *jme = netdev->priv;
  1109. iounmap(jme->regs);
  1110. unregister_netdev(netdev);
  1111. netdev_nullify(netdev);
  1112. netdev_put(netdev);
  1113. }
  1114. static struct pci_device_id jm_nics[] = {
  1115. PCI_ROM(0x197b, 0x0250, "jme", "JMicron Gigabit Ethernet", 0),
  1116. PCI_ROM(0x197b, 0x0260, "jmfe", "JMicron Fast Ethernet", 0),
  1117. };
  1118. struct pci_driver jme_driver __pci_driver = {
  1119. .ids = jm_nics,
  1120. .id_count = ( sizeof ( jm_nics ) / sizeof ( jm_nics[0] ) ),
  1121. .probe = jme_probe,
  1122. .remove = jme_remove,
  1123. };