Nevar pievienot vairāk kā 25 tēmas Tēmai ir jāsākas ar burtu vai ciparu, tā var saturēt domu zīmes ('-') un var būt līdz 35 simboliem gara.

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951
  1. /*
  2. * Copyright (c) 2008 Stefan Hajnoczi <stefanha@gmail.com>
  3. * Copyright (c) 2008 Pantelis Koukousoulas <pktoss@gmail.com>
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation; either version 2 of the
  8. * License, or any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  18. *
  19. * This driver is a port of the b44 linux driver version 1.01
  20. *
  21. * Copyright (c) 2002 David S. Miller <davem@redhat.com>
  22. * Copyright (c) Pekka Pietikainen <pp@ee.oulu.fi>
  23. * Copyright (C) 2006 Broadcom Corporation.
  24. *
  25. * Some ssb bits copied from version 2.0 of the b44 driver
  26. * Copyright (c) Michael Buesch
  27. *
  28. * Copyright (c) a lot of people too. Please respect their work.
  29. */
  30. FILE_LICENCE ( GPL2_OR_LATER );
  31. #include <errno.h>
  32. #include <assert.h>
  33. #include <stdio.h>
  34. #include <unistd.h>
  35. #include <byteswap.h>
  36. #include <gpxe/io.h>
  37. #include <mii.h>
  38. #include <gpxe/iobuf.h>
  39. #include <gpxe/malloc.h>
  40. #include <gpxe/pci.h>
  41. #include <gpxe/netdevice.h>
  42. #include <gpxe/ethernet.h>
  43. #include <gpxe/if_ether.h>
  44. #include <gpxe/memmap.h>
  45. #include "b44.h"
  46. static inline int ring_next(int index)
  47. {
  48. /* B44_RING_SIZE is a power of 2 :) */
  49. return (index + 1) & (B44_RING_SIZE - 1);
  50. }
  51. /* Memory-mapped I/O wrappers */
  52. static inline u32 br32(const struct b44_private *bp, u32 reg)
  53. {
  54. return readl(bp->regs + reg);
  55. }
  56. static inline void bw32(const struct b44_private *bp, u32 reg, u32 val)
  57. {
  58. writel(val, bp->regs + reg);
  59. }
  60. static inline void bflush(const struct b44_private *bp, u32 reg, u32 timeout)
  61. {
  62. readl(bp->regs + reg);
  63. udelay(timeout);
  64. }
  65. #define VIRT_TO_B44(addr) ( virt_to_bus(addr) + SB_PCI_DMA )
  66. /**
  67. * Return non-zero if the installed RAM is within
  68. * the limit given and zero if it is outside.
  69. * Hopefully will be removed soon.
  70. */
  71. int phys_ram_within_limit(u64 limit)
  72. {
  73. struct memory_map memmap;
  74. struct memory_region *highest = NULL;
  75. get_memmap(&memmap);
  76. highest = &memmap.regions[memmap.count - 1];
  77. return (highest->end < limit);
  78. }
  79. /**
  80. * Ring cells waiting to be processed are between 'tx_cur' and 'pending'
  81. * indexes in the ring.
  82. */
  83. static u32 pending_tx_index(struct b44_private *bp)
  84. {
  85. u32 pending = br32(bp, B44_DMATX_STAT);
  86. pending &= DMATX_STAT_CDMASK;
  87. pending /= sizeof(struct dma_desc);
  88. return pending & (B44_RING_SIZE - 1);
  89. }
  90. /**
  91. * Ring cells waiting to be processed are between 'rx_cur' and 'pending'
  92. * indexes in the ring.
  93. */
  94. static u32 pending_rx_index(struct b44_private *bp)
  95. {
  96. u32 pending = br32(bp, B44_DMARX_STAT);
  97. pending &= DMARX_STAT_CDMASK;
  98. pending /= sizeof(struct dma_desc);
  99. return pending & (B44_RING_SIZE - 1);
  100. }
  101. /**
  102. * Wait until the given bit is set/cleared.
  103. */
  104. static int b44_wait_bit(struct b44_private *bp, unsigned long reg, u32 bit,
  105. unsigned long timeout, const int clear)
  106. {
  107. unsigned long i;
  108. for (i = 0; i < timeout; i++) {
  109. u32 val = br32(bp, reg);
  110. if (clear && !(val & bit))
  111. break;
  112. if (!clear && (val & bit))
  113. break;
  114. udelay(10);
  115. }
  116. if (i == timeout) {
  117. return -ENODEV;
  118. }
  119. return 0;
  120. }
  121. /*
  122. * Sonics Silicon Backplane support. SSB is a mini-bus interconnecting
  123. * so-called IP Cores. One of those cores implements the Fast Ethernet
  124. * functionality and another one the PCI engine.
  125. *
  126. * You need to switch to the core you want to talk to before actually
  127. * sending commands.
  128. *
  129. * See: http://bcm-v4.sipsolutions.net/Backplane for (reverse-engineered)
  130. * specs.
  131. */
  132. static inline u32 ssb_get_core_rev(struct b44_private *bp)
  133. {
  134. return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
  135. }
  136. static inline int ssb_is_core_up(struct b44_private *bp)
  137. {
  138. return ((br32(bp, B44_SBTMSLOW) & (SSB_CORE_DOWN | SBTMSLOW_CLOCK))
  139. == SBTMSLOW_CLOCK);
  140. }
  141. static u32 ssb_pci_setup(struct b44_private *bp, u32 cores)
  142. {
  143. u32 bar_orig, pci_rev, val;
  144. pci_read_config_dword(bp->pci, SSB_BAR0_WIN, &bar_orig);
  145. pci_write_config_dword(bp->pci, SSB_BAR0_WIN,
  146. BCM4400_PCI_CORE_ADDR);
  147. pci_rev = ssb_get_core_rev(bp);
  148. val = br32(bp, B44_SBINTVEC);
  149. val |= cores;
  150. bw32(bp, B44_SBINTVEC, val);
  151. val = br32(bp, SSB_PCI_TRANS_2);
  152. val |= SSB_PCI_PREF | SSB_PCI_BURST;
  153. bw32(bp, SSB_PCI_TRANS_2, val);
  154. pci_write_config_dword(bp->pci, SSB_BAR0_WIN, bar_orig);
  155. return pci_rev;
  156. }
  157. static void ssb_core_disable(struct b44_private *bp)
  158. {
  159. if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
  160. return;
  161. bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
  162. b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
  163. b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
  164. bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
  165. SSB_CORE_DOWN));
  166. bflush(bp, B44_SBTMSLOW, 1);
  167. bw32(bp, B44_SBTMSLOW, SSB_CORE_DOWN);
  168. bflush(bp, B44_SBTMSLOW, 1);
  169. }
  170. static void ssb_core_reset(struct b44_private *bp)
  171. {
  172. u32 val;
  173. const u32 mask = (SBTMSLOW_CLOCK | SBTMSLOW_FGC | SBTMSLOW_RESET);
  174. ssb_core_disable(bp);
  175. bw32(bp, B44_SBTMSLOW, mask);
  176. bflush(bp, B44_SBTMSLOW, 1);
  177. /* Clear SERR if set, this is a hw bug workaround. */
  178. if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
  179. bw32(bp, B44_SBTMSHIGH, 0);
  180. val = br32(bp, B44_SBIMSTATE);
  181. if (val & (SBIMSTATE_BAD)) {
  182. bw32(bp, B44_SBIMSTATE, val & ~SBIMSTATE_BAD);
  183. }
  184. bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
  185. bflush(bp, B44_SBTMSLOW, 1);
  186. bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
  187. bflush(bp, B44_SBTMSLOW, 1);
  188. }
  189. /*
  190. * Driver helper functions
  191. */
  192. /*
  193. * Chip reset provides power to the b44 MAC & PCI cores, which
  194. * is necessary for MAC register access. We only do a partial
  195. * reset in case of transmit/receive errors (ISTAT_ERRORS) to
  196. * avoid the chip being hung for an unnecessary long time in
  197. * this case.
  198. *
  199. * Called-by: b44_close, b44_halt, b44_inithw(b44_open), b44_probe
  200. */
  201. static void b44_chip_reset(struct b44_private *bp, int reset_kind)
  202. {
  203. if (ssb_is_core_up(bp)) {
  204. bw32(bp, B44_RCV_LAZY, 0);
  205. bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
  206. b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
  207. bw32(bp, B44_DMATX_CTRL, 0);
  208. bp->tx_dirty = bp->tx_cur = 0;
  209. if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK)
  210. b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
  211. 100, 0);
  212. bw32(bp, B44_DMARX_CTRL, 0);
  213. bp->rx_cur = 0;
  214. } else {
  215. ssb_pci_setup(bp, SBINTVEC_ENET0);
  216. }
  217. ssb_core_reset(bp);
  218. /* Don't enable PHY if we are only doing a partial reset. */
  219. if (reset_kind == B44_CHIP_RESET_PARTIAL)
  220. return;
  221. /* Make PHY accessible. */
  222. bw32(bp, B44_MDIO_CTRL,
  223. (MDIO_CTRL_PREAMBLE | (0x0d & MDIO_CTRL_MAXF_MASK)));
  224. bflush(bp, B44_MDIO_CTRL, 1);
  225. /* Enable internal or external PHY */
  226. if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
  227. bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
  228. bflush(bp, B44_ENET_CTRL, 1);
  229. } else {
  230. u32 val = br32(bp, B44_DEVCTRL);
  231. if (val & DEVCTRL_EPR) {
  232. bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
  233. bflush(bp, B44_DEVCTRL, 100);
  234. }
  235. }
  236. }
  237. /**
  238. * called by b44_poll in the error path
  239. */
  240. static void b44_halt(struct b44_private *bp)
  241. {
  242. /* disable ints */
  243. bw32(bp, B44_IMASK, 0);
  244. bflush(bp, B44_IMASK, 1);
  245. DBG("b44: powering down PHY\n");
  246. bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
  247. /*
  248. * Now reset the chip, but without enabling
  249. * the MAC&PHY part of it.
  250. * This has to be done _after_ we shut down the PHY
  251. */
  252. b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
  253. }
  254. /*
  255. * Called at device open time to get the chip ready for
  256. * packet processing.
  257. *
  258. * Called-by: b44_open
  259. */
  260. static void b44_init_hw(struct b44_private *bp, int reset_kind)
  261. {
  262. u32 val;
  263. #define CTRL_MASK (DMARX_CTRL_ENABLE | (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT))
  264. b44_chip_reset(bp, B44_CHIP_RESET_FULL);
  265. if (reset_kind == B44_FULL_RESET) {
  266. b44_phy_reset(bp);
  267. }
  268. /* Enable CRC32, set proper LED modes and power on PHY */
  269. bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
  270. bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
  271. /* This sets the MAC address too. */
  272. b44_set_rx_mode(bp->netdev);
  273. /* MTU + eth header + possible VLAN tag + struct rx_header */
  274. bw32(bp, B44_RXMAXLEN, B44_MAX_MTU + ETH_HLEN + 8 + RX_HEADER_LEN);
  275. bw32(bp, B44_TXMAXLEN, B44_MAX_MTU + ETH_HLEN + 8 + RX_HEADER_LEN);
  276. bw32(bp, B44_TX_HIWMARK, TX_HIWMARK_DEFLT);
  277. if (reset_kind == B44_PARTIAL_RESET) {
  278. bw32(bp, B44_DMARX_CTRL, CTRL_MASK);
  279. } else {
  280. bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
  281. bw32(bp, B44_DMATX_ADDR, VIRT_TO_B44(bp->tx));
  282. bw32(bp, B44_DMARX_CTRL, CTRL_MASK);
  283. bw32(bp, B44_DMARX_ADDR, VIRT_TO_B44(bp->rx));
  284. bw32(bp, B44_DMARX_PTR, B44_RX_RING_LEN_BYTES);
  285. bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
  286. }
  287. val = br32(bp, B44_ENET_CTRL);
  288. bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
  289. #undef CTRL_MASK
  290. }
  291. /*** Management of ring descriptors ***/
  292. static void b44_populate_rx_descriptor(struct b44_private *bp, u32 idx)
  293. {
  294. struct rx_header *rh;
  295. u32 ctrl, addr;
  296. rh = bp->rx_iobuf[idx]->data;
  297. rh->len = 0;
  298. rh->flags = 0;
  299. ctrl = DESC_CTRL_LEN & (RX_PKT_BUF_SZ - RX_PKT_OFFSET);
  300. if (idx == B44_RING_LAST) {
  301. ctrl |= DESC_CTRL_EOT;
  302. }
  303. addr = VIRT_TO_B44(bp->rx_iobuf[idx]->data);
  304. bp->rx[idx].ctrl = cpu_to_le32(ctrl);
  305. bp->rx[idx].addr = cpu_to_le32(addr);
  306. bw32(bp, B44_DMARX_PTR, idx * sizeof(struct dma_desc));
  307. }
  308. /*
  309. * Refill RX ring descriptors with buffers. This is needed
  310. * because during rx we are passing ownership of descriptor
  311. * buffers to the network stack.
  312. */
  313. static void b44_rx_refill(struct b44_private *bp, u32 pending)
  314. {
  315. u32 i;
  316. // skip pending
  317. for (i = pending + 1; i != bp->rx_cur; i = ring_next(i)) {
  318. if (bp->rx_iobuf[i] != NULL)
  319. continue;
  320. bp->rx_iobuf[i] = alloc_iob(RX_PKT_BUF_SZ);
  321. if (!bp->rx_iobuf[i]) {
  322. DBG("Refill rx ring failed!!\n");
  323. break;
  324. }
  325. b44_populate_rx_descriptor(bp, i);
  326. }
  327. }
  328. static void b44_free_rx_ring(struct b44_private *bp)
  329. {
  330. u32 i;
  331. if (bp->rx) {
  332. for (i = 0; i < B44_RING_SIZE; i++) {
  333. free_iob(bp->rx_iobuf[i]);
  334. bp->rx_iobuf[i] = NULL;
  335. }
  336. free_dma(bp->rx, B44_RX_RING_LEN_BYTES);
  337. bp->rx = NULL;
  338. }
  339. }
  340. static int b44_init_rx_ring(struct b44_private *bp)
  341. {
  342. b44_free_rx_ring(bp);
  343. bp->rx = malloc_dma(B44_RX_RING_LEN_BYTES, B44_DMA_ALIGNMENT);
  344. if (!bp->rx)
  345. return -ENOMEM;
  346. memset(bp->rx_iobuf, 0, sizeof(bp->rx_iobuf));
  347. bp->rx_iobuf[0] = alloc_iob(RX_PKT_BUF_SZ);
  348. b44_populate_rx_descriptor(bp, 0);
  349. b44_rx_refill(bp, 0);
  350. DBG("Init RX rings: rx=0x%08lx\n", VIRT_TO_B44(bp->rx));
  351. return 0;
  352. }
  353. static void b44_free_tx_ring(struct b44_private *bp)
  354. {
  355. if (bp->tx) {
  356. free_dma(bp->tx, B44_TX_RING_LEN_BYTES);
  357. bp->tx = NULL;
  358. }
  359. }
  360. static int b44_init_tx_ring(struct b44_private *bp)
  361. {
  362. b44_free_tx_ring(bp);
  363. bp->tx = malloc_dma(B44_TX_RING_LEN_BYTES, B44_DMA_ALIGNMENT);
  364. if (!bp->tx)
  365. return -ENOMEM;
  366. memset(bp->tx, 0, B44_TX_RING_LEN_BYTES);
  367. memset(bp->tx_iobuf, 0, sizeof(bp->tx_iobuf));
  368. DBG("Init TX rings: tx=0x%08lx\n", VIRT_TO_B44(bp->tx));
  369. return 0;
  370. }
  371. /*** Interaction with the PHY ***/
  372. static int b44_phy_read(struct b44_private *bp, int reg, u32 * val)
  373. {
  374. int err;
  375. u32 arg1 = (MDIO_OP_READ << MDIO_DATA_OP_SHIFT);
  376. u32 arg2 = (bp->phy_addr << MDIO_DATA_PMD_SHIFT);
  377. u32 arg3 = (reg << MDIO_DATA_RA_SHIFT);
  378. u32 arg4 = (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT);
  379. u32 argv = arg1 | arg2 | arg3 | arg4;
  380. bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
  381. bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START | argv));
  382. err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
  383. *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
  384. return err;
  385. }
  386. static int b44_phy_write(struct b44_private *bp, int reg, u32 val)
  387. {
  388. u32 arg1 = (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT);
  389. u32 arg2 = (bp->phy_addr << MDIO_DATA_PMD_SHIFT);
  390. u32 arg3 = (reg << MDIO_DATA_RA_SHIFT);
  391. u32 arg4 = (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT);
  392. u32 arg5 = (val & MDIO_DATA_DATA);
  393. u32 argv = arg1 | arg2 | arg3 | arg4 | arg5;
  394. bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
  395. bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START | argv));
  396. return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
  397. }
  398. static int b44_phy_reset(struct b44_private *bp)
  399. {
  400. u32 val;
  401. int err;
  402. err = b44_phy_write(bp, MII_BMCR, BMCR_RESET);
  403. if (err)
  404. return err;
  405. udelay(100);
  406. err = b44_phy_read(bp, MII_BMCR, &val);
  407. if (!err) {
  408. if (val & BMCR_RESET) {
  409. return -ENODEV;
  410. }
  411. }
  412. return 0;
  413. }
  414. /*
  415. * The BCM44xx CAM (Content Addressable Memory) stores the MAC
  416. * and PHY address.
  417. */
  418. static void b44_cam_write(struct b44_private *bp, unsigned char *data,
  419. int index)
  420. {
  421. u32 val;
  422. val = ((u32) data[2]) << 24;
  423. val |= ((u32) data[3]) << 16;
  424. val |= ((u32) data[4]) << 8;
  425. val |= ((u32) data[5]) << 0;
  426. bw32(bp, B44_CAM_DATA_LO, val);
  427. val = (CAM_DATA_HI_VALID |
  428. (((u32) data[0]) << 8) | (((u32) data[1]) << 0));
  429. bw32(bp, B44_CAM_DATA_HI, val);
  430. val = CAM_CTRL_WRITE | (index << CAM_CTRL_INDEX_SHIFT);
  431. bw32(bp, B44_CAM_CTRL, val);
  432. b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
  433. }
  434. static void b44_set_mac_addr(struct b44_private *bp)
  435. {
  436. u32 val;
  437. bw32(bp, B44_CAM_CTRL, 0);
  438. b44_cam_write(bp, bp->netdev->ll_addr, 0);
  439. val = br32(bp, B44_CAM_CTRL);
  440. bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
  441. }
  442. /* Read 128-bytes of EEPROM. */
  443. static void b44_read_eeprom(struct b44_private *bp, u8 * data)
  444. {
  445. long i;
  446. u16 *ptr = (u16 *) data;
  447. for (i = 0; i < 128; i += 2)
  448. ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i));
  449. }
  450. static void b44_load_mac_and_phy_addr(struct b44_private *bp)
  451. {
  452. u8 eeprom[128];
  453. /* Load MAC address, note byteswapping */
  454. b44_read_eeprom(bp, &eeprom[0]);
  455. bp->netdev->hw_addr[0] = eeprom[79];
  456. bp->netdev->hw_addr[1] = eeprom[78];
  457. bp->netdev->hw_addr[2] = eeprom[81];
  458. bp->netdev->hw_addr[3] = eeprom[80];
  459. bp->netdev->hw_addr[4] = eeprom[83];
  460. bp->netdev->hw_addr[5] = eeprom[82];
  461. /* Load PHY address */
  462. bp->phy_addr = eeprom[90] & 0x1f;
  463. }
  464. static void b44_set_rx_mode(struct net_device *netdev)
  465. {
  466. struct b44_private *bp = netdev_priv(netdev);
  467. unsigned char zero[6] = { 0, 0, 0, 0, 0, 0 };
  468. u32 val;
  469. int i;
  470. val = br32(bp, B44_RXCONFIG);
  471. val &= ~RXCONFIG_PROMISC;
  472. val |= RXCONFIG_ALLMULTI;
  473. b44_set_mac_addr(bp);
  474. for (i = 1; i < 64; i++)
  475. b44_cam_write(bp, zero, i);
  476. bw32(bp, B44_RXCONFIG, val);
  477. val = br32(bp, B44_CAM_CTRL);
  478. bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
  479. }
  480. /*** Implementation of gPXE driver callbacks ***/
  481. /**
  482. * Probe device
  483. *
  484. * @v pci PCI device
  485. * @v id Matching entry in ID table
  486. * @ret rc Return status code
  487. */
  488. static int b44_probe(struct pci_device *pci, const struct pci_device_id *id)
  489. {
  490. struct net_device *netdev;
  491. struct b44_private *bp;
  492. int rc;
  493. /*
  494. * Bail out if more than 1GB of physical RAM is installed.
  495. * This limitation will be removed later when dma mapping
  496. * is merged into mainline.
  497. */
  498. if (!phys_ram_within_limit(B44_30BIT_DMA_MASK)) {
  499. DBG("Sorry, this version of the driver does not\n"
  500. "support systems with more than 1GB of RAM.\n");
  501. return -ENOMEM;
  502. }
  503. /* Set up netdev */
  504. netdev = alloc_etherdev(sizeof(*bp));
  505. if (!netdev)
  506. return -ENOMEM;
  507. netdev_init(netdev, &b44_operations);
  508. pci_set_drvdata(pci, netdev);
  509. netdev->dev = &pci->dev;
  510. /* Set up private data */
  511. bp = netdev_priv(netdev);
  512. memset(bp, 0, sizeof(*bp));
  513. bp->netdev = netdev;
  514. bp->pci = pci;
  515. /* Map device registers */
  516. bp->regs = ioremap(pci->membase, B44_REGS_SIZE);
  517. if (!bp->regs) {
  518. netdev_put(netdev);
  519. return -ENOMEM;
  520. }
  521. /* Enable PCI bus mastering */
  522. adjust_pci_device(pci);
  523. b44_load_mac_and_phy_addr(bp);
  524. /* Link management currently not implemented */
  525. netdev_link_up(netdev);
  526. rc = register_netdev(netdev);
  527. if (rc != 0) {
  528. iounmap(bp->regs);
  529. netdev_put(netdev);
  530. return rc;
  531. }
  532. b44_chip_reset(bp, B44_CHIP_RESET_FULL);
  533. DBG("b44 %s (%04x:%04x) regs=%p MAC=%s\n", id->name, id->vendor,
  534. id->device, bp->regs, eth_ntoa(netdev->ll_addr));
  535. return 0;
  536. }
  537. /**
  538. * Remove device
  539. *
  540. * @v pci PCI device
  541. */
  542. static void b44_remove(struct pci_device *pci)
  543. {
  544. struct net_device *netdev = pci_get_drvdata(pci);
  545. struct b44_private *bp = netdev_priv(netdev);
  546. ssb_core_disable(bp);
  547. unregister_netdev(netdev);
  548. iounmap(bp->regs);
  549. netdev_nullify(netdev);
  550. netdev_put(netdev);
  551. }
  552. /** Enable or disable interrupts
  553. *
  554. * @v netdev Network device
  555. * @v enable Interrupts should be enabled
  556. */
  557. static void b44_irq(struct net_device *netdev, int enable)
  558. {
  559. struct b44_private *bp = netdev_priv(netdev);
  560. /* Interrupt mask specifies which events generate interrupts */
  561. bw32(bp, B44_IMASK, enable ? IMASK_DEF : IMASK_DISABLE);
  562. }
  563. /** Open network device
  564. *
  565. * @v netdev Network device
  566. * @ret rc Return status code
  567. */
  568. static int b44_open(struct net_device *netdev)
  569. {
  570. struct b44_private *bp = netdev_priv(netdev);
  571. int rc;
  572. rc = b44_init_tx_ring(bp);
  573. if (rc != 0)
  574. return rc;
  575. rc = b44_init_rx_ring(bp);
  576. if (rc != 0)
  577. return rc;
  578. b44_init_hw(bp, B44_FULL_RESET);
  579. /* Disable interrupts */
  580. b44_irq(netdev, 0);
  581. return 0;
  582. }
  583. /** Close network device
  584. *
  585. * @v netdev Network device
  586. */
  587. static void b44_close(struct net_device *netdev)
  588. {
  589. struct b44_private *bp = netdev_priv(netdev);
  590. b44_chip_reset(bp, B44_FULL_RESET);
  591. b44_free_tx_ring(bp);
  592. b44_free_rx_ring(bp);
  593. }
  594. /** Transmit packet
  595. *
  596. * @v netdev Network device
  597. * @v iobuf I/O buffer
  598. * @ret rc Return status code
  599. */
  600. static int b44_transmit(struct net_device *netdev, struct io_buffer *iobuf)
  601. {
  602. struct b44_private *bp = netdev_priv(netdev);
  603. u32 cur = bp->tx_cur;
  604. u32 ctrl;
  605. /* Check for TX ring overflow */
  606. if (bp->tx[cur].ctrl) {
  607. DBG("tx overflow\n");
  608. return -ENOBUFS;
  609. }
  610. /* Will call netdev_tx_complete() on the iobuf later */
  611. bp->tx_iobuf[cur] = iobuf;
  612. /* Set up TX descriptor */
  613. ctrl = (iob_len(iobuf) & DESC_CTRL_LEN) |
  614. DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
  615. if (cur == B44_RING_LAST)
  616. ctrl |= DESC_CTRL_EOT;
  617. bp->tx[cur].ctrl = cpu_to_le32(ctrl);
  618. bp->tx[cur].addr = cpu_to_le32(VIRT_TO_B44(iobuf->data));
  619. /* Update next available descriptor index */
  620. cur = ring_next(cur);
  621. bp->tx_cur = cur;
  622. wmb();
  623. /* Tell card that a new TX descriptor is ready */
  624. bw32(bp, B44_DMATX_PTR, cur * sizeof(struct dma_desc));
  625. return 0;
  626. }
  627. /** Recycles sent TX descriptors and notifies network stack
  628. *
  629. * @v bp Driver state
  630. */
  631. static void b44_tx_complete(struct b44_private *bp)
  632. {
  633. u32 cur, i;
  634. cur = pending_tx_index(bp);
  635. for (i = bp->tx_dirty; i != cur; i = ring_next(i)) {
  636. /* Free finished frame */
  637. netdev_tx_complete(bp->netdev, bp->tx_iobuf[i]);
  638. bp->tx_iobuf[i] = NULL;
  639. /* Clear TX descriptor */
  640. bp->tx[i].ctrl = 0;
  641. bp->tx[i].addr = 0;
  642. }
  643. bp->tx_dirty = cur;
  644. }
  645. static void b44_process_rx_packets(struct b44_private *bp)
  646. {
  647. struct io_buffer *iob; /* received data */
  648. struct rx_header *rh;
  649. u32 pending, i;
  650. u16 len;
  651. pending = pending_rx_index(bp);
  652. for (i = bp->rx_cur; i != pending; i = ring_next(i)) {
  653. iob = bp->rx_iobuf[i];
  654. if (iob == NULL)
  655. break;
  656. rh = iob->data;
  657. len = le16_to_cpu(rh->len);
  658. /*
  659. * Guard against incompletely written RX descriptors.
  660. * Without this, things can get really slow!
  661. */
  662. if (len == 0)
  663. break;
  664. /* Discard CRC that is generated by the card */
  665. len -= 4;
  666. /* Check for invalid packets and errors */
  667. if (len > RX_PKT_BUF_SZ - RX_PKT_OFFSET ||
  668. (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
  669. DBG("rx error len=%d flags=%04x\n", len,
  670. cpu_to_le16(rh->flags));
  671. rh->len = 0;
  672. rh->flags = 0;
  673. netdev_rx_err(bp->netdev, iob, -EINVAL);
  674. continue;
  675. }
  676. /* Clear RX descriptor */
  677. rh->len = 0;
  678. rh->flags = 0;
  679. bp->rx_iobuf[i] = NULL;
  680. /* Hand off the IO buffer to the network stack */
  681. iob_reserve(iob, RX_PKT_OFFSET);
  682. iob_put(iob, len);
  683. netdev_rx(bp->netdev, iob);
  684. }
  685. bp->rx_cur = i;
  686. b44_rx_refill(bp, pending_rx_index(bp));
  687. }
  688. /** Poll for completed and received packets
  689. *
  690. * @v netdev Network device
  691. */
  692. static void b44_poll(struct net_device *netdev)
  693. {
  694. struct b44_private *bp = netdev_priv(netdev);
  695. u32 istat;
  696. /* Interrupt status */
  697. istat = br32(bp, B44_ISTAT);
  698. istat &= IMASK_DEF; /* only the events we care about */
  699. if (!istat)
  700. return;
  701. if (istat & ISTAT_TX)
  702. b44_tx_complete(bp);
  703. if (istat & ISTAT_RX)
  704. b44_process_rx_packets(bp);
  705. if (istat & ISTAT_ERRORS) {
  706. DBG("b44 error istat=0x%08x\n", istat);
  707. /* Reset B44 core partially to avoid long waits */
  708. b44_irq(bp->netdev, 0);
  709. b44_halt(bp);
  710. b44_init_tx_ring(bp);
  711. b44_init_rx_ring(bp);
  712. b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
  713. }
  714. /* Acknowledge interrupt */
  715. bw32(bp, B44_ISTAT, 0);
  716. bflush(bp, B44_ISTAT, 1);
  717. }
  718. static struct net_device_operations b44_operations = {
  719. .open = b44_open,
  720. .close = b44_close,
  721. .transmit = b44_transmit,
  722. .poll = b44_poll,
  723. .irq = b44_irq,
  724. };
  725. static struct pci_device_id b44_nics[] = {
  726. PCI_ROM(0x14e4, 0x4401, "BCM4401", "BCM4401", 0),
  727. PCI_ROM(0x14e4, 0x170c, "BCM4401-B0", "BCM4401-B0", 0),
  728. PCI_ROM(0x14e4, 0x4402, "BCM4401-B1", "BCM4401-B1", 0),
  729. };
  730. struct pci_driver b44_driver __pci_driver = {
  731. .ids = b44_nics,
  732. .id_count = sizeof b44_nics / sizeof b44_nics[0],
  733. .probe = b44_probe,
  734. .remove = b44_remove,
  735. };