You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

b44.c 21KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952
  1. /*
  2. * Copyright (c) 2008 Stefan Hajnoczi <stefanha@gmail.com>
  3. * Copyright (c) 2008 Pantelis Koukousoulas <pktoss@gmail.com>
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation; either version 2 of the
  8. * License, or any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  18. *
  19. * This driver is a port of the b44 linux driver version 1.01
  20. *
  21. * Copyright (c) 2002 David S. Miller <davem@redhat.com>
  22. * Copyright (c) Pekka Pietikainen <pp@ee.oulu.fi>
  23. * Copyright (C) 2006 Broadcom Corporation.
  24. *
  25. * Some ssb bits copied from version 2.0 of the b44 driver
  26. * Copyright (c) Michael Buesch
  27. *
  28. * Copyright (c) a lot of people too. Please respect their work.
  29. */
  30. FILE_LICENCE ( GPL2_OR_LATER );
  31. #include <errno.h>
  32. #include <assert.h>
  33. #include <stdio.h>
  34. #include <unistd.h>
  35. #include <byteswap.h>
  36. #include <ipxe/io.h>
  37. #include <mii.h>
  38. #include <ipxe/iobuf.h>
  39. #include <ipxe/malloc.h>
  40. #include <ipxe/pci.h>
  41. #include <ipxe/netdevice.h>
  42. #include <ipxe/ethernet.h>
  43. #include <ipxe/if_ether.h>
  44. #include "b44.h"
  45. static inline int ring_next(int index)
  46. {
  47. /* B44_RING_SIZE is a power of 2 :) */
  48. return (index + 1) & (B44_RING_SIZE - 1);
  49. }
  50. /* Memory-mapped I/O wrappers */
  51. static inline u32 br32(const struct b44_private *bp, u32 reg)
  52. {
  53. return readl(bp->regs + reg);
  54. }
  55. static inline void bw32(const struct b44_private *bp, u32 reg, u32 val)
  56. {
  57. writel(val, bp->regs + reg);
  58. }
  59. static inline void bflush(const struct b44_private *bp, u32 reg, u32 timeout)
  60. {
  61. readl(bp->regs + reg);
  62. udelay(timeout);
  63. }
  64. #define VIRT_TO_B44(addr) ( virt_to_bus(addr) + SB_PCI_DMA )
  65. /**
  66. * Return non-zero if the installed RAM is within
  67. * the limit given and zero if it is outside.
  68. * Hopefully will be removed soon.
  69. */
  70. int phys_ram_within_limit(u64 limit)
  71. {
  72. struct memory_map memmap;
  73. struct memory_region *highest = NULL;
  74. get_memmap(&memmap);
  75. if (memmap.count == 0)
  76. return 0;
  77. highest = &memmap.regions[memmap.count - 1];
  78. return (highest->end < limit);
  79. }
  80. /**
  81. * Ring cells waiting to be processed are between 'tx_cur' and 'pending'
  82. * indexes in the ring.
  83. */
  84. static u32 pending_tx_index(struct b44_private *bp)
  85. {
  86. u32 pending = br32(bp, B44_DMATX_STAT);
  87. pending &= DMATX_STAT_CDMASK;
  88. pending /= sizeof(struct dma_desc);
  89. return pending & (B44_RING_SIZE - 1);
  90. }
  91. /**
  92. * Ring cells waiting to be processed are between 'rx_cur' and 'pending'
  93. * indexes in the ring.
  94. */
  95. static u32 pending_rx_index(struct b44_private *bp)
  96. {
  97. u32 pending = br32(bp, B44_DMARX_STAT);
  98. pending &= DMARX_STAT_CDMASK;
  99. pending /= sizeof(struct dma_desc);
  100. return pending & (B44_RING_SIZE - 1);
  101. }
  102. /**
  103. * Wait until the given bit is set/cleared.
  104. */
  105. static int b44_wait_bit(struct b44_private *bp, unsigned long reg, u32 bit,
  106. unsigned long timeout, const int clear)
  107. {
  108. unsigned long i;
  109. for (i = 0; i < timeout; i++) {
  110. u32 val = br32(bp, reg);
  111. if (clear && !(val & bit))
  112. break;
  113. if (!clear && (val & bit))
  114. break;
  115. udelay(10);
  116. }
  117. if (i == timeout) {
  118. return -ENODEV;
  119. }
  120. return 0;
  121. }
  122. /*
  123. * Sonics Silicon Backplane support. SSB is a mini-bus interconnecting
  124. * so-called IP Cores. One of those cores implements the Fast Ethernet
  125. * functionality and another one the PCI engine.
  126. *
  127. * You need to switch to the core you want to talk to before actually
  128. * sending commands.
  129. *
  130. * See: http://bcm-v4.sipsolutions.net/Backplane for (reverse-engineered)
  131. * specs.
  132. */
  133. static inline u32 ssb_get_core_rev(struct b44_private *bp)
  134. {
  135. return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
  136. }
  137. static inline int ssb_is_core_up(struct b44_private *bp)
  138. {
  139. return ((br32(bp, B44_SBTMSLOW) & (SSB_CORE_DOWN | SBTMSLOW_CLOCK))
  140. == SBTMSLOW_CLOCK);
  141. }
  142. static u32 ssb_pci_setup(struct b44_private *bp, u32 cores)
  143. {
  144. u32 bar_orig, pci_rev, val;
  145. pci_read_config_dword(bp->pci, SSB_BAR0_WIN, &bar_orig);
  146. pci_write_config_dword(bp->pci, SSB_BAR0_WIN,
  147. BCM4400_PCI_CORE_ADDR);
  148. pci_rev = ssb_get_core_rev(bp);
  149. val = br32(bp, B44_SBINTVEC);
  150. val |= cores;
  151. bw32(bp, B44_SBINTVEC, val);
  152. val = br32(bp, SSB_PCI_TRANS_2);
  153. val |= SSB_PCI_PREF | SSB_PCI_BURST;
  154. bw32(bp, SSB_PCI_TRANS_2, val);
  155. pci_write_config_dword(bp->pci, SSB_BAR0_WIN, bar_orig);
  156. return pci_rev;
  157. }
  158. static void ssb_core_disable(struct b44_private *bp)
  159. {
  160. if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
  161. return;
  162. bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
  163. b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
  164. b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
  165. bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
  166. SSB_CORE_DOWN));
  167. bflush(bp, B44_SBTMSLOW, 1);
  168. bw32(bp, B44_SBTMSLOW, SSB_CORE_DOWN);
  169. bflush(bp, B44_SBTMSLOW, 1);
  170. }
  171. static void ssb_core_reset(struct b44_private *bp)
  172. {
  173. u32 val;
  174. const u32 mask = (SBTMSLOW_CLOCK | SBTMSLOW_FGC | SBTMSLOW_RESET);
  175. ssb_core_disable(bp);
  176. bw32(bp, B44_SBTMSLOW, mask);
  177. bflush(bp, B44_SBTMSLOW, 1);
  178. /* Clear SERR if set, this is a hw bug workaround. */
  179. if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
  180. bw32(bp, B44_SBTMSHIGH, 0);
  181. val = br32(bp, B44_SBIMSTATE);
  182. if (val & (SBIMSTATE_BAD)) {
  183. bw32(bp, B44_SBIMSTATE, val & ~SBIMSTATE_BAD);
  184. }
  185. bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
  186. bflush(bp, B44_SBTMSLOW, 1);
  187. bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
  188. bflush(bp, B44_SBTMSLOW, 1);
  189. }
  190. /*
  191. * Driver helper functions
  192. */
  193. /*
  194. * Chip reset provides power to the b44 MAC & PCI cores, which
  195. * is necessary for MAC register access. We only do a partial
  196. * reset in case of transmit/receive errors (ISTAT_ERRORS) to
  197. * avoid the chip being hung for an unnecessary long time in
  198. * this case.
  199. *
  200. * Called-by: b44_close, b44_halt, b44_inithw(b44_open), b44_probe
  201. */
  202. static void b44_chip_reset(struct b44_private *bp, int reset_kind)
  203. {
  204. if (ssb_is_core_up(bp)) {
  205. bw32(bp, B44_RCV_LAZY, 0);
  206. bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
  207. b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
  208. bw32(bp, B44_DMATX_CTRL, 0);
  209. bp->tx_dirty = bp->tx_cur = 0;
  210. if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK)
  211. b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
  212. 100, 0);
  213. bw32(bp, B44_DMARX_CTRL, 0);
  214. bp->rx_cur = 0;
  215. } else {
  216. ssb_pci_setup(bp, SBINTVEC_ENET0);
  217. }
  218. ssb_core_reset(bp);
  219. /* Don't enable PHY if we are only doing a partial reset. */
  220. if (reset_kind == B44_CHIP_RESET_PARTIAL)
  221. return;
  222. /* Make PHY accessible. */
  223. bw32(bp, B44_MDIO_CTRL,
  224. (MDIO_CTRL_PREAMBLE | (0x0d & MDIO_CTRL_MAXF_MASK)));
  225. bflush(bp, B44_MDIO_CTRL, 1);
  226. /* Enable internal or external PHY */
  227. if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
  228. bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
  229. bflush(bp, B44_ENET_CTRL, 1);
  230. } else {
  231. u32 val = br32(bp, B44_DEVCTRL);
  232. if (val & DEVCTRL_EPR) {
  233. bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
  234. bflush(bp, B44_DEVCTRL, 100);
  235. }
  236. }
  237. }
  238. /**
  239. * called by b44_poll in the error path
  240. */
  241. static void b44_halt(struct b44_private *bp)
  242. {
  243. /* disable ints */
  244. bw32(bp, B44_IMASK, 0);
  245. bflush(bp, B44_IMASK, 1);
  246. DBG("b44: powering down PHY\n");
  247. bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
  248. /*
  249. * Now reset the chip, but without enabling
  250. * the MAC&PHY part of it.
  251. * This has to be done _after_ we shut down the PHY
  252. */
  253. b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
  254. }
  255. /*
  256. * Called at device open time to get the chip ready for
  257. * packet processing.
  258. *
  259. * Called-by: b44_open
  260. */
  261. static void b44_init_hw(struct b44_private *bp, int reset_kind)
  262. {
  263. u32 val;
  264. #define CTRL_MASK (DMARX_CTRL_ENABLE | (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT))
  265. b44_chip_reset(bp, B44_CHIP_RESET_FULL);
  266. if (reset_kind == B44_FULL_RESET) {
  267. b44_phy_reset(bp);
  268. }
  269. /* Enable CRC32, set proper LED modes and power on PHY */
  270. bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
  271. bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
  272. /* This sets the MAC address too. */
  273. b44_set_rx_mode(bp->netdev);
  274. /* MTU + eth header + possible VLAN tag + struct rx_header */
  275. bw32(bp, B44_RXMAXLEN, B44_MAX_MTU + ETH_HLEN + 8 + RX_HEADER_LEN);
  276. bw32(bp, B44_TXMAXLEN, B44_MAX_MTU + ETH_HLEN + 8 + RX_HEADER_LEN);
  277. bw32(bp, B44_TX_HIWMARK, TX_HIWMARK_DEFLT);
  278. if (reset_kind == B44_PARTIAL_RESET) {
  279. bw32(bp, B44_DMARX_CTRL, CTRL_MASK);
  280. } else {
  281. bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
  282. bw32(bp, B44_DMATX_ADDR, VIRT_TO_B44(bp->tx));
  283. bw32(bp, B44_DMARX_CTRL, CTRL_MASK);
  284. bw32(bp, B44_DMARX_ADDR, VIRT_TO_B44(bp->rx));
  285. bw32(bp, B44_DMARX_PTR, B44_RX_RING_LEN_BYTES);
  286. bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
  287. }
  288. val = br32(bp, B44_ENET_CTRL);
  289. bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
  290. #undef CTRL_MASK
  291. }
  292. /*** Management of ring descriptors ***/
  293. static void b44_populate_rx_descriptor(struct b44_private *bp, u32 idx)
  294. {
  295. struct rx_header *rh;
  296. u32 ctrl, addr;
  297. rh = bp->rx_iobuf[idx]->data;
  298. rh->len = 0;
  299. rh->flags = 0;
  300. ctrl = DESC_CTRL_LEN & (RX_PKT_BUF_SZ - RX_PKT_OFFSET);
  301. if (idx == B44_RING_LAST) {
  302. ctrl |= DESC_CTRL_EOT;
  303. }
  304. addr = VIRT_TO_B44(bp->rx_iobuf[idx]->data);
  305. bp->rx[idx].ctrl = cpu_to_le32(ctrl);
  306. bp->rx[idx].addr = cpu_to_le32(addr);
  307. bw32(bp, B44_DMARX_PTR, idx * sizeof(struct dma_desc));
  308. }
  309. /*
  310. * Refill RX ring descriptors with buffers. This is needed
  311. * because during rx we are passing ownership of descriptor
  312. * buffers to the network stack.
  313. */
  314. static void b44_rx_refill(struct b44_private *bp, u32 pending)
  315. {
  316. u32 i;
  317. // skip pending
  318. for (i = pending + 1; i != bp->rx_cur; i = ring_next(i)) {
  319. if (bp->rx_iobuf[i] != NULL)
  320. continue;
  321. bp->rx_iobuf[i] = alloc_iob(RX_PKT_BUF_SZ);
  322. if (!bp->rx_iobuf[i]) {
  323. DBG("Refill rx ring failed!!\n");
  324. break;
  325. }
  326. b44_populate_rx_descriptor(bp, i);
  327. }
  328. }
  329. static void b44_free_rx_ring(struct b44_private *bp)
  330. {
  331. u32 i;
  332. if (bp->rx) {
  333. for (i = 0; i < B44_RING_SIZE; i++) {
  334. free_iob(bp->rx_iobuf[i]);
  335. bp->rx_iobuf[i] = NULL;
  336. }
  337. free_dma(bp->rx, B44_RX_RING_LEN_BYTES);
  338. bp->rx = NULL;
  339. }
  340. }
  341. static int b44_init_rx_ring(struct b44_private *bp)
  342. {
  343. b44_free_rx_ring(bp);
  344. bp->rx = malloc_dma(B44_RX_RING_LEN_BYTES, B44_DMA_ALIGNMENT);
  345. if (!bp->rx)
  346. return -ENOMEM;
  347. memset(bp->rx_iobuf, 0, sizeof(bp->rx_iobuf));
  348. bp->rx_iobuf[0] = alloc_iob(RX_PKT_BUF_SZ);
  349. b44_populate_rx_descriptor(bp, 0);
  350. b44_rx_refill(bp, 0);
  351. DBG("Init RX rings: rx=0x%08lx\n", VIRT_TO_B44(bp->rx));
  352. return 0;
  353. }
  354. static void b44_free_tx_ring(struct b44_private *bp)
  355. {
  356. if (bp->tx) {
  357. free_dma(bp->tx, B44_TX_RING_LEN_BYTES);
  358. bp->tx = NULL;
  359. }
  360. }
  361. static int b44_init_tx_ring(struct b44_private *bp)
  362. {
  363. b44_free_tx_ring(bp);
  364. bp->tx = malloc_dma(B44_TX_RING_LEN_BYTES, B44_DMA_ALIGNMENT);
  365. if (!bp->tx)
  366. return -ENOMEM;
  367. memset(bp->tx, 0, B44_TX_RING_LEN_BYTES);
  368. memset(bp->tx_iobuf, 0, sizeof(bp->tx_iobuf));
  369. DBG("Init TX rings: tx=0x%08lx\n", VIRT_TO_B44(bp->tx));
  370. return 0;
  371. }
  372. /*** Interaction with the PHY ***/
  373. static int b44_phy_read(struct b44_private *bp, int reg, u32 * val)
  374. {
  375. int err;
  376. u32 arg1 = (MDIO_OP_READ << MDIO_DATA_OP_SHIFT);
  377. u32 arg2 = (bp->phy_addr << MDIO_DATA_PMD_SHIFT);
  378. u32 arg3 = (reg << MDIO_DATA_RA_SHIFT);
  379. u32 arg4 = (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT);
  380. u32 argv = arg1 | arg2 | arg3 | arg4;
  381. bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
  382. bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START | argv));
  383. err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
  384. *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
  385. return err;
  386. }
  387. static int b44_phy_write(struct b44_private *bp, int reg, u32 val)
  388. {
  389. u32 arg1 = (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT);
  390. u32 arg2 = (bp->phy_addr << MDIO_DATA_PMD_SHIFT);
  391. u32 arg3 = (reg << MDIO_DATA_RA_SHIFT);
  392. u32 arg4 = (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT);
  393. u32 arg5 = (val & MDIO_DATA_DATA);
  394. u32 argv = arg1 | arg2 | arg3 | arg4 | arg5;
  395. bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
  396. bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START | argv));
  397. return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
  398. }
  399. static int b44_phy_reset(struct b44_private *bp)
  400. {
  401. u32 val;
  402. int err;
  403. err = b44_phy_write(bp, MII_BMCR, BMCR_RESET);
  404. if (err)
  405. return err;
  406. udelay(100);
  407. err = b44_phy_read(bp, MII_BMCR, &val);
  408. if (!err) {
  409. if (val & BMCR_RESET) {
  410. return -ENODEV;
  411. }
  412. }
  413. return 0;
  414. }
  415. /*
  416. * The BCM44xx CAM (Content Addressable Memory) stores the MAC
  417. * and PHY address.
  418. */
  419. static void b44_cam_write(struct b44_private *bp, unsigned char *data,
  420. int index)
  421. {
  422. u32 val;
  423. val = ((u32) data[2]) << 24;
  424. val |= ((u32) data[3]) << 16;
  425. val |= ((u32) data[4]) << 8;
  426. val |= ((u32) data[5]) << 0;
  427. bw32(bp, B44_CAM_DATA_LO, val);
  428. val = (CAM_DATA_HI_VALID |
  429. (((u32) data[0]) << 8) | (((u32) data[1]) << 0));
  430. bw32(bp, B44_CAM_DATA_HI, val);
  431. val = CAM_CTRL_WRITE | (index << CAM_CTRL_INDEX_SHIFT);
  432. bw32(bp, B44_CAM_CTRL, val);
  433. b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
  434. }
  435. static void b44_set_mac_addr(struct b44_private *bp)
  436. {
  437. u32 val;
  438. bw32(bp, B44_CAM_CTRL, 0);
  439. b44_cam_write(bp, bp->netdev->ll_addr, 0);
  440. val = br32(bp, B44_CAM_CTRL);
  441. bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
  442. }
  443. /* Read 128-bytes of EEPROM. */
  444. static void b44_read_eeprom(struct b44_private *bp, u8 * data)
  445. {
  446. long i;
  447. u16 *ptr = (u16 *) data;
  448. for (i = 0; i < 128; i += 2)
  449. ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i));
  450. }
  451. static void b44_load_mac_and_phy_addr(struct b44_private *bp)
  452. {
  453. u8 eeprom[128];
  454. /* Load MAC address, note byteswapping */
  455. b44_read_eeprom(bp, &eeprom[0]);
  456. bp->netdev->hw_addr[0] = eeprom[79];
  457. bp->netdev->hw_addr[1] = eeprom[78];
  458. bp->netdev->hw_addr[2] = eeprom[81];
  459. bp->netdev->hw_addr[3] = eeprom[80];
  460. bp->netdev->hw_addr[4] = eeprom[83];
  461. bp->netdev->hw_addr[5] = eeprom[82];
  462. /* Load PHY address */
  463. bp->phy_addr = eeprom[90] & 0x1f;
  464. }
  465. static void b44_set_rx_mode(struct net_device *netdev)
  466. {
  467. struct b44_private *bp = netdev_priv(netdev);
  468. unsigned char zero[6] = { 0, 0, 0, 0, 0, 0 };
  469. u32 val;
  470. int i;
  471. val = br32(bp, B44_RXCONFIG);
  472. val &= ~RXCONFIG_PROMISC;
  473. val |= RXCONFIG_ALLMULTI;
  474. b44_set_mac_addr(bp);
  475. for (i = 1; i < 64; i++)
  476. b44_cam_write(bp, zero, i);
  477. bw32(bp, B44_RXCONFIG, val);
  478. val = br32(bp, B44_CAM_CTRL);
  479. bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
  480. }
  481. /*** Implementation of iPXE driver callbacks ***/
  482. /**
  483. * Probe device
  484. *
  485. * @v pci PCI device
  486. * @v id Matching entry in ID table
  487. * @ret rc Return status code
  488. */
  489. static int b44_probe(struct pci_device *pci, const struct pci_device_id *id)
  490. {
  491. struct net_device *netdev;
  492. struct b44_private *bp;
  493. int rc;
  494. /*
  495. * Bail out if more than 1GB of physical RAM is installed.
  496. * This limitation will be removed later when dma mapping
  497. * is merged into mainline.
  498. */
  499. if (!phys_ram_within_limit(B44_30BIT_DMA_MASK)) {
  500. DBG("Sorry, this version of the driver does not\n"
  501. "support systems with more than 1GB of RAM.\n");
  502. return -ENOMEM;
  503. }
  504. /* Set up netdev */
  505. netdev = alloc_etherdev(sizeof(*bp));
  506. if (!netdev)
  507. return -ENOMEM;
  508. netdev_init(netdev, &b44_operations);
  509. pci_set_drvdata(pci, netdev);
  510. netdev->dev = &pci->dev;
  511. /* Set up private data */
  512. bp = netdev_priv(netdev);
  513. memset(bp, 0, sizeof(*bp));
  514. bp->netdev = netdev;
  515. bp->pci = pci;
  516. /* Map device registers */
  517. bp->regs = ioremap(pci->membase, B44_REGS_SIZE);
  518. if (!bp->regs) {
  519. netdev_put(netdev);
  520. return -ENOMEM;
  521. }
  522. /* Enable PCI bus mastering */
  523. adjust_pci_device(pci);
  524. b44_load_mac_and_phy_addr(bp);
  525. /* Link management currently not implemented */
  526. netdev_link_up(netdev);
  527. rc = register_netdev(netdev);
  528. if (rc != 0) {
  529. iounmap(bp->regs);
  530. netdev_put(netdev);
  531. return rc;
  532. }
  533. b44_chip_reset(bp, B44_CHIP_RESET_FULL);
  534. DBG("b44 %s (%04x:%04x) regs=%p MAC=%s\n", id->name, id->vendor,
  535. id->device, bp->regs, eth_ntoa(netdev->ll_addr));
  536. return 0;
  537. }
  538. /**
  539. * Remove device
  540. *
  541. * @v pci PCI device
  542. */
  543. static void b44_remove(struct pci_device *pci)
  544. {
  545. struct net_device *netdev = pci_get_drvdata(pci);
  546. struct b44_private *bp = netdev_priv(netdev);
  547. ssb_core_disable(bp);
  548. unregister_netdev(netdev);
  549. iounmap(bp->regs);
  550. netdev_nullify(netdev);
  551. netdev_put(netdev);
  552. }
  553. /** Enable or disable interrupts
  554. *
  555. * @v netdev Network device
  556. * @v enable Interrupts should be enabled
  557. */
  558. static void b44_irq(struct net_device *netdev, int enable)
  559. {
  560. struct b44_private *bp = netdev_priv(netdev);
  561. /* Interrupt mask specifies which events generate interrupts */
  562. bw32(bp, B44_IMASK, enable ? IMASK_DEF : IMASK_DISABLE);
  563. }
  564. /** Open network device
  565. *
  566. * @v netdev Network device
  567. * @ret rc Return status code
  568. */
  569. static int b44_open(struct net_device *netdev)
  570. {
  571. struct b44_private *bp = netdev_priv(netdev);
  572. int rc;
  573. rc = b44_init_tx_ring(bp);
  574. if (rc != 0)
  575. return rc;
  576. rc = b44_init_rx_ring(bp);
  577. if (rc != 0)
  578. return rc;
  579. b44_init_hw(bp, B44_FULL_RESET);
  580. /* Disable interrupts */
  581. b44_irq(netdev, 0);
  582. return 0;
  583. }
  584. /** Close network device
  585. *
  586. * @v netdev Network device
  587. */
  588. static void b44_close(struct net_device *netdev)
  589. {
  590. struct b44_private *bp = netdev_priv(netdev);
  591. b44_chip_reset(bp, B44_FULL_RESET);
  592. b44_free_tx_ring(bp);
  593. b44_free_rx_ring(bp);
  594. }
  595. /** Transmit packet
  596. *
  597. * @v netdev Network device
  598. * @v iobuf I/O buffer
  599. * @ret rc Return status code
  600. */
  601. static int b44_transmit(struct net_device *netdev, struct io_buffer *iobuf)
  602. {
  603. struct b44_private *bp = netdev_priv(netdev);
  604. u32 cur = bp->tx_cur;
  605. u32 ctrl;
  606. /* Check for TX ring overflow */
  607. if (bp->tx[cur].ctrl) {
  608. DBG("tx overflow\n");
  609. return -ENOBUFS;
  610. }
  611. /* Will call netdev_tx_complete() on the iobuf later */
  612. bp->tx_iobuf[cur] = iobuf;
  613. /* Set up TX descriptor */
  614. ctrl = (iob_len(iobuf) & DESC_CTRL_LEN) |
  615. DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
  616. if (cur == B44_RING_LAST)
  617. ctrl |= DESC_CTRL_EOT;
  618. bp->tx[cur].ctrl = cpu_to_le32(ctrl);
  619. bp->tx[cur].addr = cpu_to_le32(VIRT_TO_B44(iobuf->data));
  620. /* Update next available descriptor index */
  621. cur = ring_next(cur);
  622. bp->tx_cur = cur;
  623. wmb();
  624. /* Tell card that a new TX descriptor is ready */
  625. bw32(bp, B44_DMATX_PTR, cur * sizeof(struct dma_desc));
  626. return 0;
  627. }
  628. /** Recycles sent TX descriptors and notifies network stack
  629. *
  630. * @v bp Driver state
  631. */
  632. static void b44_tx_complete(struct b44_private *bp)
  633. {
  634. u32 cur, i;
  635. cur = pending_tx_index(bp);
  636. for (i = bp->tx_dirty; i != cur; i = ring_next(i)) {
  637. /* Free finished frame */
  638. netdev_tx_complete(bp->netdev, bp->tx_iobuf[i]);
  639. bp->tx_iobuf[i] = NULL;
  640. /* Clear TX descriptor */
  641. bp->tx[i].ctrl = 0;
  642. bp->tx[i].addr = 0;
  643. }
  644. bp->tx_dirty = cur;
  645. }
  646. static void b44_process_rx_packets(struct b44_private *bp)
  647. {
  648. struct io_buffer *iob; /* received data */
  649. struct rx_header *rh;
  650. u32 pending, i;
  651. u16 len;
  652. pending = pending_rx_index(bp);
  653. for (i = bp->rx_cur; i != pending; i = ring_next(i)) {
  654. iob = bp->rx_iobuf[i];
  655. if (iob == NULL)
  656. break;
  657. rh = iob->data;
  658. len = le16_to_cpu(rh->len);
  659. /*
  660. * Guard against incompletely written RX descriptors.
  661. * Without this, things can get really slow!
  662. */
  663. if (len == 0)
  664. break;
  665. /* Discard CRC that is generated by the card */
  666. len -= 4;
  667. /* Check for invalid packets and errors */
  668. if (len > RX_PKT_BUF_SZ - RX_PKT_OFFSET ||
  669. (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
  670. DBG("rx error len=%d flags=%04x\n", len,
  671. cpu_to_le16(rh->flags));
  672. rh->len = 0;
  673. rh->flags = 0;
  674. netdev_rx_err(bp->netdev, iob, -EINVAL);
  675. continue;
  676. }
  677. /* Clear RX descriptor */
  678. rh->len = 0;
  679. rh->flags = 0;
  680. bp->rx_iobuf[i] = NULL;
  681. /* Hand off the IO buffer to the network stack */
  682. iob_reserve(iob, RX_PKT_OFFSET);
  683. iob_put(iob, len);
  684. netdev_rx(bp->netdev, iob);
  685. }
  686. bp->rx_cur = i;
  687. b44_rx_refill(bp, pending_rx_index(bp));
  688. }
  689. /** Poll for completed and received packets
  690. *
  691. * @v netdev Network device
  692. */
  693. static void b44_poll(struct net_device *netdev)
  694. {
  695. struct b44_private *bp = netdev_priv(netdev);
  696. u32 istat;
  697. /* Interrupt status */
  698. istat = br32(bp, B44_ISTAT);
  699. istat &= IMASK_DEF; /* only the events we care about */
  700. if (!istat)
  701. return;
  702. if (istat & ISTAT_TX)
  703. b44_tx_complete(bp);
  704. if (istat & ISTAT_RX)
  705. b44_process_rx_packets(bp);
  706. if (istat & ISTAT_ERRORS) {
  707. DBG("b44 error istat=0x%08x\n", istat);
  708. /* Reset B44 core partially to avoid long waits */
  709. b44_irq(bp->netdev, 0);
  710. b44_halt(bp);
  711. b44_init_tx_ring(bp);
  712. b44_init_rx_ring(bp);
  713. b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
  714. }
  715. /* Acknowledge interrupt */
  716. bw32(bp, B44_ISTAT, 0);
  717. bflush(bp, B44_ISTAT, 1);
  718. }
  719. static struct net_device_operations b44_operations = {
  720. .open = b44_open,
  721. .close = b44_close,
  722. .transmit = b44_transmit,
  723. .poll = b44_poll,
  724. .irq = b44_irq,
  725. };
  726. static struct pci_device_id b44_nics[] = {
  727. PCI_ROM(0x14e4, 0x4401, "BCM4401", "BCM4401", 0),
  728. PCI_ROM(0x14e4, 0x170c, "BCM4401-B0", "BCM4401-B0", 0),
  729. PCI_ROM(0x14e4, 0x4402, "BCM4401-B1", "BCM4401-B1", 0),
  730. };
  731. struct pci_driver b44_driver __pci_driver = {
  732. .ids = b44_nics,
  733. .id_count = sizeof b44_nics / sizeof b44_nics[0],
  734. .probe = b44_probe,
  735. .remove = b44_remove,
  736. };