You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758
  1. /*
  2. * Copyright(c) 2007 Atheros Corporation. All rights reserved.
  3. *
  4. * Derived from Intel e1000 driver
  5. * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
  6. *
  7. * Modified for gPXE, October 2009 by Joshua Oreman <oremanj@rwcr.net>.
  8. *
  9. * This program is free software; you can redistribute it and/or modify it
  10. * under the terms of the GNU General Public License as published by the Free
  11. * Software Foundation; either version 2 of the License, or (at your option)
  12. * any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful, but WITHOUT
  15. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  16. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  17. * more details.
  18. *
  19. * You should have received a copy of the GNU General Public License along with
  20. * this program; if not, write to the Free Software Foundation, Inc., 59
  21. * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  22. */
  23. FILE_LICENCE ( GPL2_OR_LATER );
  24. #include "atl1e.h"
  25. /* User-tweakable parameters: */
  26. #define TX_DESC_COUNT 32 /* TX descriptors, minimum 32 */
  27. #define RX_MEM_SIZE 8192 /* RX area size, minimum 8kb */
  28. #define MAX_FRAME_SIZE 1500 /* Maximum MTU supported, minimum 1500 */
  29. /* Arcane parameters: */
  30. #define PREAMBLE_LEN 7
  31. #define RX_JUMBO_THRESH ((MAX_FRAME_SIZE + ETH_HLEN + \
  32. VLAN_HLEN + ETH_FCS_LEN + 7) >> 3)
  33. #define IMT_VAL 100 /* interrupt moderator timer, us */
  34. #define ICT_VAL 50000 /* interrupt clear timer, us */
  35. #define SMB_TIMER 200000
  36. #define RRD_THRESH 1 /* packets to queue before interrupt */
  37. #define TPD_BURST 5
  38. #define TPD_THRESH (TX_DESC_COUNT / 2)
  39. #define RX_COUNT_DOWN 4
  40. #define TX_COUNT_DOWN (IMT_VAL * 4 / 3)
  41. #define DMAR_DLY_CNT 15
  42. #define DMAW_DLY_CNT 4
  43. #define PCI_DEVICE_ID_ATTANSIC_L1E 0x1026
  44. /*
  45. * atl1e_pci_tbl - PCI Device ID Table
  46. *
  47. * Wildcard entries (PCI_ANY_ID) should come last
  48. * Last entry must be all 0s
  49. *
  50. * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
  51. * Class, Class Mask, private data (not used) }
  52. */
  53. static struct pci_device_id atl1e_pci_tbl[] = {
  54. PCI_ROM(0x1969, 0x1026, "atl1e_26", "Attansic L1E 0x1026", 0),
  55. PCI_ROM(0x1969, 0x1066, "atl1e_66", "Attansic L1E 0x1066", 0),
  56. };
  57. static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter);
  58. static const u16
  59. atl1e_rx_page_vld_regs[AT_PAGE_NUM_PER_QUEUE] =
  60. {
  61. REG_HOST_RXF0_PAGE0_VLD, REG_HOST_RXF0_PAGE1_VLD
  62. };
  63. static const u16
  64. atl1e_rx_page_lo_addr_regs[AT_PAGE_NUM_PER_QUEUE] =
  65. {
  66. REG_HOST_RXF0_PAGE0_LO, REG_HOST_RXF0_PAGE1_LO
  67. };
  68. static const u16
  69. atl1e_rx_page_write_offset_regs[AT_PAGE_NUM_PER_QUEUE] =
  70. {
  71. REG_HOST_RXF0_MB0_LO, REG_HOST_RXF0_MB1_LO
  72. };
  73. static const u16 atl1e_pay_load_size[] = {
  74. 128, 256, 512, 1024, 2048, 4096,
  75. };
  76. /*
  77. * atl1e_irq_enable - Enable default interrupt generation settings
  78. * @adapter: board private structure
  79. */
  80. static inline void atl1e_irq_enable(struct atl1e_adapter *adapter)
  81. {
  82. AT_WRITE_REG(&adapter->hw, REG_ISR, 0);
  83. AT_WRITE_REG(&adapter->hw, REG_IMR, IMR_NORMAL_MASK);
  84. AT_WRITE_FLUSH(&adapter->hw);
  85. }
  86. /*
  87. * atl1e_irq_disable - Mask off interrupt generation on the NIC
  88. * @adapter: board private structure
  89. */
  90. static inline void atl1e_irq_disable(struct atl1e_adapter *adapter)
  91. {
  92. AT_WRITE_REG(&adapter->hw, REG_IMR, 0);
  93. AT_WRITE_FLUSH(&adapter->hw);
  94. }
  95. /*
  96. * atl1e_irq_reset - reset interrupt confiure on the NIC
  97. * @adapter: board private structure
  98. */
  99. static inline void atl1e_irq_reset(struct atl1e_adapter *adapter)
  100. {
  101. AT_WRITE_REG(&adapter->hw, REG_ISR, 0);
  102. AT_WRITE_REG(&adapter->hw, REG_IMR, 0);
  103. AT_WRITE_FLUSH(&adapter->hw);
  104. }
  105. static void atl1e_reset(struct atl1e_adapter *adapter)
  106. {
  107. atl1e_down(adapter);
  108. atl1e_up(adapter);
  109. }
  110. static int atl1e_check_link(struct atl1e_adapter *adapter)
  111. {
  112. struct atl1e_hw *hw = &adapter->hw;
  113. struct net_device *netdev = adapter->netdev;
  114. int err = 0;
  115. u16 speed, duplex, phy_data;
  116. /* MII_BMSR must read twise */
  117. atl1e_read_phy_reg(hw, MII_BMSR, &phy_data);
  118. atl1e_read_phy_reg(hw, MII_BMSR, &phy_data);
  119. if ((phy_data & BMSR_LSTATUS) == 0) {
  120. /* link down */
  121. if (netdev_link_ok(netdev)) { /* old link state: Up */
  122. u32 value;
  123. /* disable rx */
  124. value = AT_READ_REG(hw, REG_MAC_CTRL);
  125. value &= ~MAC_CTRL_RX_EN;
  126. AT_WRITE_REG(hw, REG_MAC_CTRL, value);
  127. adapter->link_speed = SPEED_0;
  128. DBG("atl1e: %s link is down\n", netdev->name);
  129. netdev_link_down(netdev);
  130. }
  131. } else {
  132. /* Link Up */
  133. err = atl1e_get_speed_and_duplex(hw, &speed, &duplex);
  134. if (err)
  135. return err;
  136. /* link result is our setting */
  137. if (adapter->link_speed != speed ||
  138. adapter->link_duplex != duplex) {
  139. adapter->link_speed = speed;
  140. adapter->link_duplex = duplex;
  141. atl1e_setup_mac_ctrl(adapter);
  142. DBG("atl1e: %s link is up, %d Mbps, %s duplex\n",
  143. netdev->name, adapter->link_speed,
  144. adapter->link_duplex == FULL_DUPLEX ?
  145. "full" : "half");
  146. netdev_link_up(netdev);
  147. }
  148. }
  149. return 0;
  150. }
  151. static int atl1e_mdio_read(struct net_device *netdev, int phy_id __unused,
  152. int reg_num)
  153. {
  154. struct atl1e_adapter *adapter = netdev_priv(netdev);
  155. u16 result;
  156. atl1e_read_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, &result);
  157. return result;
  158. }
  159. static void atl1e_mdio_write(struct net_device *netdev, int phy_id __unused,
  160. int reg_num, int val)
  161. {
  162. struct atl1e_adapter *adapter = netdev_priv(netdev);
  163. atl1e_write_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, val);
  164. }
  165. static void atl1e_setup_pcicmd(struct pci_device *pdev)
  166. {
  167. u16 cmd;
  168. pci_read_config_word(pdev, PCI_COMMAND, &cmd);
  169. cmd |= (PCI_COMMAND_MEM | PCI_COMMAND_MASTER);
  170. pci_write_config_word(pdev, PCI_COMMAND, cmd);
  171. /*
  172. * some motherboards BIOS(PXE/EFI) driver may set PME
  173. * while they transfer control to OS (Windows/Linux)
  174. * so we should clear this bit before NIC work normally
  175. */
  176. pci_write_config_dword(pdev, REG_PM_CTRLSTAT, 0);
  177. mdelay(1);
  178. }
  179. /*
  180. * atl1e_sw_init - Initialize general software structures (struct atl1e_adapter)
  181. * @adapter: board private structure to initialize
  182. *
  183. * atl1e_sw_init initializes the Adapter private data structure.
  184. * Fields are initialized based on PCI device information and
  185. * OS network device settings (MTU size).
  186. */
  187. static int atl1e_sw_init(struct atl1e_adapter *adapter)
  188. {
  189. struct atl1e_hw *hw = &adapter->hw;
  190. struct pci_device *pdev = adapter->pdev;
  191. u32 phy_status_data = 0;
  192. u8 rev_id = 0;
  193. adapter->link_speed = SPEED_0; /* hardware init */
  194. adapter->link_duplex = FULL_DUPLEX;
  195. /* PCI config space info */
  196. pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
  197. phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS);
  198. /* nic type */
  199. if (rev_id >= 0xF0) {
  200. hw->nic_type = athr_l2e_revB;
  201. } else {
  202. if (phy_status_data & PHY_STATUS_100M)
  203. hw->nic_type = athr_l1e;
  204. else
  205. hw->nic_type = athr_l2e_revA;
  206. }
  207. phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS);
  208. hw->emi_ca = !!(phy_status_data & PHY_STATUS_EMI_CA);
  209. hw->phy_configured = 0;
  210. /* need confirm */
  211. hw->dmar_block = atl1e_dma_req_1024;
  212. hw->dmaw_block = atl1e_dma_req_1024;
  213. netdev_link_down(adapter->netdev);
  214. return 0;
  215. }
  216. /*
  217. * atl1e_clean_tx_ring - free all Tx buffers for device close
  218. * @adapter: board private structure
  219. */
  220. static void atl1e_clean_tx_ring(struct atl1e_adapter *adapter)
  221. {
  222. struct atl1e_tx_ring *tx_ring = (struct atl1e_tx_ring *)
  223. &adapter->tx_ring;
  224. struct atl1e_tx_buffer *tx_buffer = NULL;
  225. u16 index, ring_count = tx_ring->count;
  226. if (tx_ring->desc == NULL || tx_ring->tx_buffer == NULL)
  227. return;
  228. for (index = 0; index < ring_count; index++) {
  229. tx_buffer = &tx_ring->tx_buffer[index];
  230. if (tx_buffer->iob) {
  231. netdev_tx_complete(adapter->netdev, tx_buffer->iob);
  232. tx_buffer->dma = 0;
  233. tx_buffer->iob = NULL;
  234. }
  235. }
  236. /* Zero out Tx-buffers */
  237. memset(tx_ring->desc, 0, sizeof(struct atl1e_tpd_desc) *
  238. ring_count);
  239. memset(tx_ring->tx_buffer, 0, sizeof(struct atl1e_tx_buffer) *
  240. ring_count);
  241. }
  242. /*
  243. * atl1e_clean_rx_ring - Free rx-reservation iobs
  244. * @adapter: board private structure
  245. */
  246. static void atl1e_clean_rx_ring(struct atl1e_adapter *adapter)
  247. {
  248. struct atl1e_rx_ring *rx_ring =
  249. (struct atl1e_rx_ring *)&adapter->rx_ring;
  250. struct atl1e_rx_page_desc *rx_page_desc = &rx_ring->rx_page_desc;
  251. u16 j;
  252. if (adapter->ring_vir_addr == NULL)
  253. return;
  254. /* Zero out the descriptor ring */
  255. for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
  256. if (rx_page_desc->rx_page[j].addr != NULL) {
  257. memset(rx_page_desc->rx_page[j].addr, 0,
  258. rx_ring->real_page_size);
  259. }
  260. }
  261. }
  262. static void atl1e_cal_ring_size(struct atl1e_adapter *adapter, u32 *ring_size)
  263. {
  264. *ring_size = ((u32)(adapter->tx_ring.count *
  265. sizeof(struct atl1e_tpd_desc) + 7
  266. /* tx ring, qword align */
  267. + adapter->rx_ring.real_page_size * AT_PAGE_NUM_PER_QUEUE
  268. + 31
  269. /* rx ring, 32 bytes align */
  270. + (1 + AT_PAGE_NUM_PER_QUEUE) *
  271. sizeof(u32) + 3));
  272. /* tx, rx cmd, dword align */
  273. }
  274. static void atl1e_init_ring_resources(struct atl1e_adapter *adapter)
  275. {
  276. struct atl1e_tx_ring *tx_ring = NULL;
  277. struct atl1e_rx_ring *rx_ring = NULL;
  278. tx_ring = &adapter->tx_ring;
  279. rx_ring = &adapter->rx_ring;
  280. rx_ring->real_page_size = adapter->rx_ring.page_size
  281. + MAX_FRAME_SIZE
  282. + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
  283. rx_ring->real_page_size = (rx_ring->real_page_size + 31) & ~31;
  284. atl1e_cal_ring_size(adapter, &adapter->ring_size);
  285. adapter->ring_vir_addr = NULL;
  286. adapter->rx_ring.desc = NULL;
  287. return;
  288. }
  289. /*
  290. * Read / Write Ptr Initialize:
  291. */
  292. static void atl1e_init_ring_ptrs(struct atl1e_adapter *adapter)
  293. {
  294. struct atl1e_tx_ring *tx_ring = NULL;
  295. struct atl1e_rx_ring *rx_ring = NULL;
  296. struct atl1e_rx_page_desc *rx_page_desc = NULL;
  297. int j;
  298. tx_ring = &adapter->tx_ring;
  299. rx_ring = &adapter->rx_ring;
  300. rx_page_desc = &rx_ring->rx_page_desc;
  301. tx_ring->next_to_use = 0;
  302. tx_ring->next_to_clean = 0;
  303. rx_page_desc->rx_using = 0;
  304. rx_page_desc->rx_nxseq = 0;
  305. for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
  306. *rx_page_desc->rx_page[j].write_offset_addr = 0;
  307. rx_page_desc->rx_page[j].read_offset = 0;
  308. }
  309. }
  310. /*
  311. * atl1e_free_ring_resources - Free Tx / RX descriptor Resources
  312. * @adapter: board private structure
  313. *
  314. * Free all transmit software resources
  315. */
  316. static void atl1e_free_ring_resources(struct atl1e_adapter *adapter)
  317. {
  318. atl1e_clean_tx_ring(adapter);
  319. atl1e_clean_rx_ring(adapter);
  320. if (adapter->ring_vir_addr) {
  321. free_dma(adapter->ring_vir_addr, adapter->ring_size);
  322. adapter->ring_vir_addr = NULL;
  323. adapter->ring_dma = 0;
  324. }
  325. if (adapter->tx_ring.tx_buffer) {
  326. free(adapter->tx_ring.tx_buffer);
  327. adapter->tx_ring.tx_buffer = NULL;
  328. }
  329. }
  330. /*
  331. * atl1e_setup_mem_resources - allocate Tx / RX descriptor resources
  332. * @adapter: board private structure
  333. *
  334. * Return 0 on success, negative on failure
  335. */
  336. static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
  337. {
  338. struct atl1e_tx_ring *tx_ring;
  339. struct atl1e_rx_ring *rx_ring;
  340. struct atl1e_rx_page_desc *rx_page_desc;
  341. int size, j;
  342. u32 offset = 0;
  343. int err = 0;
  344. if (adapter->ring_vir_addr != NULL)
  345. return 0; /* alloced already */
  346. tx_ring = &adapter->tx_ring;
  347. rx_ring = &adapter->rx_ring;
  348. /* real ring DMA buffer */
  349. size = adapter->ring_size;
  350. adapter->ring_vir_addr = malloc_dma(adapter->ring_size, 32);
  351. if (adapter->ring_vir_addr == NULL) {
  352. DBG("atl1e: out of memory allocating %d bytes for %s ring\n",
  353. adapter->ring_size, adapter->netdev->name);
  354. return -ENOMEM;
  355. }
  356. adapter->ring_dma = virt_to_bus(adapter->ring_vir_addr);
  357. memset(adapter->ring_vir_addr, 0, adapter->ring_size);
  358. rx_page_desc = &rx_ring->rx_page_desc;
  359. /* Init TPD Ring */
  360. tx_ring->dma = (adapter->ring_dma + 7) & ~7;
  361. offset = tx_ring->dma - adapter->ring_dma;
  362. tx_ring->desc = (struct atl1e_tpd_desc *)
  363. (adapter->ring_vir_addr + offset);
  364. size = sizeof(struct atl1e_tx_buffer) * (tx_ring->count);
  365. tx_ring->tx_buffer = zalloc(size);
  366. if (tx_ring->tx_buffer == NULL) {
  367. DBG("atl1e: out of memory allocating %d bytes for %s txbuf\n",
  368. size, adapter->netdev->name);
  369. err = -ENOMEM;
  370. goto failed;
  371. }
  372. /* Init RXF-Pages */
  373. offset += (sizeof(struct atl1e_tpd_desc) * tx_ring->count);
  374. offset = (offset + 31) & ~31;
  375. for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
  376. rx_page_desc->rx_page[j].dma =
  377. adapter->ring_dma + offset;
  378. rx_page_desc->rx_page[j].addr =
  379. adapter->ring_vir_addr + offset;
  380. offset += rx_ring->real_page_size;
  381. }
  382. /* Init CMB dma address */
  383. tx_ring->cmb_dma = adapter->ring_dma + offset;
  384. tx_ring->cmb = (u32 *)(adapter->ring_vir_addr + offset);
  385. offset += sizeof(u32);
  386. for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
  387. rx_page_desc->rx_page[j].write_offset_dma =
  388. adapter->ring_dma + offset;
  389. rx_page_desc->rx_page[j].write_offset_addr =
  390. adapter->ring_vir_addr + offset;
  391. offset += sizeof(u32);
  392. }
  393. if (offset > adapter->ring_size) {
  394. DBG("atl1e: ring miscalculation! need %d > %d bytes\n",
  395. offset, adapter->ring_size);
  396. err = -EINVAL;
  397. goto failed;
  398. }
  399. return 0;
  400. failed:
  401. atl1e_free_ring_resources(adapter);
  402. return err;
  403. }
  404. static inline void atl1e_configure_des_ring(const struct atl1e_adapter *adapter)
  405. {
  406. struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw;
  407. struct atl1e_rx_ring *rx_ring =
  408. (struct atl1e_rx_ring *)&adapter->rx_ring;
  409. struct atl1e_tx_ring *tx_ring =
  410. (struct atl1e_tx_ring *)&adapter->tx_ring;
  411. struct atl1e_rx_page_desc *rx_page_desc = NULL;
  412. int j;
  413. AT_WRITE_REG(hw, REG_DESC_BASE_ADDR_HI, 0);
  414. AT_WRITE_REG(hw, REG_TPD_BASE_ADDR_LO, tx_ring->dma);
  415. AT_WRITE_REG(hw, REG_TPD_RING_SIZE, (u16)(tx_ring->count));
  416. AT_WRITE_REG(hw, REG_HOST_TX_CMB_LO, tx_ring->cmb_dma);
  417. rx_page_desc = &rx_ring->rx_page_desc;
  418. /* RXF Page Physical address / Page Length */
  419. AT_WRITE_REG(hw, REG_RXF0_BASE_ADDR_HI, 0);
  420. for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
  421. u32 page_phy_addr;
  422. u32 offset_phy_addr;
  423. page_phy_addr = rx_page_desc->rx_page[j].dma;
  424. offset_phy_addr = rx_page_desc->rx_page[j].write_offset_dma;
  425. AT_WRITE_REG(hw, atl1e_rx_page_lo_addr_regs[j], page_phy_addr);
  426. AT_WRITE_REG(hw, atl1e_rx_page_write_offset_regs[j],
  427. offset_phy_addr);
  428. AT_WRITE_REGB(hw, atl1e_rx_page_vld_regs[j], 1);
  429. }
  430. /* Page Length */
  431. AT_WRITE_REG(hw, REG_HOST_RXFPAGE_SIZE, rx_ring->page_size);
  432. /* Load all of base address above */
  433. AT_WRITE_REG(hw, REG_LOAD_PTR, 1);
  434. return;
  435. }
  436. static inline void atl1e_configure_tx(struct atl1e_adapter *adapter)
  437. {
  438. struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw;
  439. u32 dev_ctrl_data = 0;
  440. u32 max_pay_load = 0;
  441. u32 jumbo_thresh = 0;
  442. u32 extra_size = 0; /* Jumbo frame threshold in QWORD unit */
  443. /* configure TXQ param */
  444. if (hw->nic_type != athr_l2e_revB) {
  445. extra_size = ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
  446. jumbo_thresh = MAX_FRAME_SIZE + extra_size;
  447. AT_WRITE_REG(hw, REG_TX_EARLY_TH, (jumbo_thresh + 7) >> 3);
  448. }
  449. dev_ctrl_data = AT_READ_REG(hw, REG_DEVICE_CTRL);
  450. max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT)) &
  451. DEVICE_CTRL_MAX_PAYLOAD_MASK;
  452. if (max_pay_load < hw->dmaw_block)
  453. hw->dmaw_block = max_pay_load;
  454. max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT)) &
  455. DEVICE_CTRL_MAX_RREQ_SZ_MASK;
  456. if (max_pay_load < hw->dmar_block)
  457. hw->dmar_block = max_pay_load;
  458. if (hw->nic_type != athr_l2e_revB)
  459. AT_WRITE_REGW(hw, REG_TXQ_CTRL + 2,
  460. atl1e_pay_load_size[hw->dmar_block]);
  461. /* enable TXQ */
  462. AT_WRITE_REGW(hw, REG_TXQ_CTRL,
  463. ((TPD_BURST & TXQ_CTRL_NUM_TPD_BURST_MASK)
  464. << TXQ_CTRL_NUM_TPD_BURST_SHIFT)
  465. | TXQ_CTRL_ENH_MODE | TXQ_CTRL_EN);
  466. return;
  467. }
  468. static inline void atl1e_configure_rx(struct atl1e_adapter *adapter)
  469. {
  470. struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw;
  471. u32 rxf_len = 0;
  472. u32 rxf_low = 0;
  473. u32 rxf_high = 0;
  474. u32 rxf_thresh_data = 0;
  475. u32 rxq_ctrl_data = 0;
  476. if (hw->nic_type != athr_l2e_revB) {
  477. AT_WRITE_REGW(hw, REG_RXQ_JMBOSZ_RRDTIM,
  478. (u16)((RX_JUMBO_THRESH & RXQ_JMBOSZ_TH_MASK) <<
  479. RXQ_JMBOSZ_TH_SHIFT |
  480. (1 & RXQ_JMBO_LKAH_MASK) <<
  481. RXQ_JMBO_LKAH_SHIFT));
  482. rxf_len = AT_READ_REG(hw, REG_SRAM_RXF_LEN);
  483. rxf_high = rxf_len * 4 / 5;
  484. rxf_low = rxf_len / 5;
  485. rxf_thresh_data = ((rxf_high & RXQ_RXF_PAUSE_TH_HI_MASK)
  486. << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
  487. ((rxf_low & RXQ_RXF_PAUSE_TH_LO_MASK)
  488. << RXQ_RXF_PAUSE_TH_LO_SHIFT);
  489. AT_WRITE_REG(hw, REG_RXQ_RXF_PAUSE_THRESH, rxf_thresh_data);
  490. }
  491. /* RRS */
  492. AT_WRITE_REG(hw, REG_IDT_TABLE, 0);
  493. AT_WRITE_REG(hw, REG_BASE_CPU_NUMBER, 0);
  494. rxq_ctrl_data |= RXQ_CTRL_PBA_ALIGN_32 |
  495. RXQ_CTRL_CUT_THRU_EN | RXQ_CTRL_EN;
  496. AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data);
  497. return;
  498. }
  499. static inline void atl1e_configure_dma(struct atl1e_adapter *adapter)
  500. {
  501. struct atl1e_hw *hw = &adapter->hw;
  502. u32 dma_ctrl_data = 0;
  503. dma_ctrl_data = DMA_CTRL_RXCMB_EN;
  504. dma_ctrl_data |= (((u32)hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
  505. << DMA_CTRL_DMAR_BURST_LEN_SHIFT;
  506. dma_ctrl_data |= (((u32)hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK)
  507. << DMA_CTRL_DMAW_BURST_LEN_SHIFT;
  508. dma_ctrl_data |= DMA_CTRL_DMAR_REQ_PRI | DMA_CTRL_DMAR_OUT_ORDER;
  509. dma_ctrl_data |= (DMAR_DLY_CNT & DMA_CTRL_DMAR_DLY_CNT_MASK)
  510. << DMA_CTRL_DMAR_DLY_CNT_SHIFT;
  511. dma_ctrl_data |= (DMAW_DLY_CNT & DMA_CTRL_DMAW_DLY_CNT_MASK)
  512. << DMA_CTRL_DMAW_DLY_CNT_SHIFT;
  513. AT_WRITE_REG(hw, REG_DMA_CTRL, dma_ctrl_data);
  514. return;
  515. }
  516. static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter)
  517. {
  518. u32 value;
  519. struct atl1e_hw *hw = &adapter->hw;
  520. /* Config MAC CTRL Register */
  521. value = MAC_CTRL_TX_EN |
  522. MAC_CTRL_RX_EN ;
  523. if (FULL_DUPLEX == adapter->link_duplex)
  524. value |= MAC_CTRL_DUPLX;
  525. value |= ((u32)((SPEED_1000 == adapter->link_speed) ?
  526. MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) <<
  527. MAC_CTRL_SPEED_SHIFT);
  528. value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW);
  529. value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
  530. value |= ((PREAMBLE_LEN & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
  531. value |= MAC_CTRL_BC_EN;
  532. value |= MAC_CTRL_MC_ALL_EN;
  533. AT_WRITE_REG(hw, REG_MAC_CTRL, value);
  534. }
  535. /*
  536. * atl1e_configure - Configure Transmit&Receive Unit after Reset
  537. * @adapter: board private structure
  538. *
  539. * Configure the Tx /Rx unit of the MAC after a reset.
  540. */
  541. static int atl1e_configure(struct atl1e_adapter *adapter)
  542. {
  543. struct atl1e_hw *hw = &adapter->hw;
  544. u32 intr_status_data = 0;
  545. /* clear interrupt status */
  546. AT_WRITE_REG(hw, REG_ISR, ~0);
  547. /* 1. set MAC Address */
  548. atl1e_hw_set_mac_addr(hw);
  549. /* 2. Init the Multicast HASH table (clear) */
  550. AT_WRITE_REG(hw, REG_RX_HASH_TABLE, 0);
  551. AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
  552. /* 3. Clear any WOL status */
  553. AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
  554. /* 4. Descripter Ring BaseMem/Length/Read ptr/Write ptr
  555. * TPD Ring/SMB/RXF0 Page CMBs, they use the same
  556. * High 32bits memory */
  557. atl1e_configure_des_ring(adapter);
  558. /* 5. set Interrupt Moderator Timer */
  559. AT_WRITE_REGW(hw, REG_IRQ_MODU_TIMER_INIT, IMT_VAL);
  560. AT_WRITE_REGW(hw, REG_IRQ_MODU_TIMER2_INIT, IMT_VAL);
  561. AT_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_LED_MODE |
  562. MASTER_CTRL_ITIMER_EN | MASTER_CTRL_ITIMER2_EN);
  563. /* 6. rx/tx threshold to trig interrupt */
  564. AT_WRITE_REGW(hw, REG_TRIG_RRD_THRESH, RRD_THRESH);
  565. AT_WRITE_REGW(hw, REG_TRIG_TPD_THRESH, TPD_THRESH);
  566. AT_WRITE_REGW(hw, REG_TRIG_RXTIMER, RX_COUNT_DOWN);
  567. AT_WRITE_REGW(hw, REG_TRIG_TXTIMER, TX_COUNT_DOWN);
  568. /* 7. set Interrupt Clear Timer */
  569. AT_WRITE_REGW(hw, REG_CMBDISDMA_TIMER, ICT_VAL);
  570. /* 8. set MTU */
  571. AT_WRITE_REG(hw, REG_MTU, MAX_FRAME_SIZE + ETH_HLEN +
  572. VLAN_HLEN + ETH_FCS_LEN);
  573. /* 9. config TXQ early tx threshold */
  574. atl1e_configure_tx(adapter);
  575. /* 10. config RXQ */
  576. atl1e_configure_rx(adapter);
  577. /* 11. config DMA Engine */
  578. atl1e_configure_dma(adapter);
  579. /* 12. smb timer to trig interrupt */
  580. AT_WRITE_REG(hw, REG_SMB_STAT_TIMER, SMB_TIMER);
  581. intr_status_data = AT_READ_REG(hw, REG_ISR);
  582. if ((intr_status_data & ISR_PHY_LINKDOWN) != 0) {
  583. DBG("atl1e: configure failed, PCIE phy link down\n");
  584. return -1;
  585. }
  586. AT_WRITE_REG(hw, REG_ISR, 0x7fffffff);
  587. return 0;
  588. }
  589. static inline void atl1e_clear_phy_int(struct atl1e_adapter *adapter)
  590. {
  591. u16 phy_data;
  592. atl1e_read_phy_reg(&adapter->hw, MII_INT_STATUS, &phy_data);
  593. }
  594. static int atl1e_clean_tx_irq(struct atl1e_adapter *adapter)
  595. {
  596. struct atl1e_tx_ring *tx_ring = (struct atl1e_tx_ring *)
  597. &adapter->tx_ring;
  598. struct atl1e_tx_buffer *tx_buffer = NULL;
  599. u16 hw_next_to_clean = AT_READ_REGW(&adapter->hw, REG_TPD_CONS_IDX);
  600. u16 next_to_clean = tx_ring->next_to_clean;
  601. while (next_to_clean != hw_next_to_clean) {
  602. tx_buffer = &tx_ring->tx_buffer[next_to_clean];
  603. tx_buffer->dma = 0;
  604. if (tx_buffer->iob) {
  605. netdev_tx_complete(adapter->netdev, tx_buffer->iob);
  606. tx_buffer->iob = NULL;
  607. }
  608. if (++next_to_clean == tx_ring->count)
  609. next_to_clean = 0;
  610. }
  611. tx_ring->next_to_clean = next_to_clean;
  612. return 1;
  613. }
  614. static struct atl1e_rx_page *atl1e_get_rx_page(struct atl1e_adapter *adapter)
  615. {
  616. struct atl1e_rx_page_desc *rx_page_desc =
  617. (struct atl1e_rx_page_desc *) &adapter->rx_ring.rx_page_desc;
  618. u8 rx_using = rx_page_desc->rx_using;
  619. return (struct atl1e_rx_page *)&(rx_page_desc->rx_page[rx_using]);
  620. }
  621. static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter)
  622. {
  623. struct net_device *netdev = adapter->netdev;
  624. struct atl1e_rx_ring *rx_ring = (struct atl1e_rx_ring *)
  625. &adapter->rx_ring;
  626. struct atl1e_rx_page_desc *rx_page_desc =
  627. (struct atl1e_rx_page_desc *) &rx_ring->rx_page_desc;
  628. struct io_buffer *iob = NULL;
  629. struct atl1e_rx_page *rx_page = atl1e_get_rx_page(adapter);
  630. u32 packet_size, write_offset;
  631. struct atl1e_recv_ret_status *prrs;
  632. write_offset = *(rx_page->write_offset_addr);
  633. if (rx_page->read_offset >= write_offset)
  634. return;
  635. do {
  636. /* get new packet's rrs */
  637. prrs = (struct atl1e_recv_ret_status *) (rx_page->addr +
  638. rx_page->read_offset);
  639. /* check sequence number */
  640. if (prrs->seq_num != rx_page_desc->rx_nxseq) {
  641. DBG("atl1e %s: RX sequence number error (%d != %d)\n",
  642. netdev->name, prrs->seq_num,
  643. rx_page_desc->rx_nxseq);
  644. rx_page_desc->rx_nxseq++;
  645. goto fatal_err;
  646. }
  647. rx_page_desc->rx_nxseq++;
  648. /* error packet */
  649. if (prrs->pkt_flag & RRS_IS_ERR_FRAME) {
  650. if (prrs->err_flag & (RRS_ERR_BAD_CRC |
  651. RRS_ERR_DRIBBLE | RRS_ERR_CODE |
  652. RRS_ERR_TRUNC)) {
  653. /* hardware error, discard this
  654. packet */
  655. netdev_rx_err(netdev, NULL, EIO);
  656. goto skip_pkt;
  657. }
  658. }
  659. packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) &
  660. RRS_PKT_SIZE_MASK) - ETH_FCS_LEN;
  661. iob = alloc_iob(packet_size + NET_IP_ALIGN);
  662. if (iob == NULL) {
  663. DBG("atl1e %s: dropping packet under memory pressure\n",
  664. netdev->name);
  665. goto skip_pkt;
  666. }
  667. iob_reserve(iob, NET_IP_ALIGN);
  668. memcpy(iob->data, (u8 *)(prrs + 1), packet_size);
  669. iob_put(iob, packet_size);
  670. netdev_rx(netdev, iob);
  671. skip_pkt:
  672. /* skip current packet whether it's ok or not. */
  673. rx_page->read_offset +=
  674. (((u32)((prrs->word1 >> RRS_PKT_SIZE_SHIFT) &
  675. RRS_PKT_SIZE_MASK) +
  676. sizeof(struct atl1e_recv_ret_status) + 31) &
  677. 0xFFFFFFE0);
  678. if (rx_page->read_offset >= rx_ring->page_size) {
  679. /* mark this page clean */
  680. u16 reg_addr;
  681. u8 rx_using;
  682. rx_page->read_offset =
  683. *(rx_page->write_offset_addr) = 0;
  684. rx_using = rx_page_desc->rx_using;
  685. reg_addr =
  686. atl1e_rx_page_vld_regs[rx_using];
  687. AT_WRITE_REGB(&adapter->hw, reg_addr, 1);
  688. rx_page_desc->rx_using ^= 1;
  689. rx_page = atl1e_get_rx_page(adapter);
  690. }
  691. write_offset = *(rx_page->write_offset_addr);
  692. } while (rx_page->read_offset < write_offset);
  693. return;
  694. fatal_err:
  695. if (!netdev_link_ok(adapter->netdev))
  696. atl1e_reset(adapter);
  697. }
  698. /*
  699. * atl1e_poll - poll for completed transmissions and received packets
  700. * @netdev: network device
  701. */
  702. static void atl1e_poll(struct net_device *netdev)
  703. {
  704. struct atl1e_adapter *adapter = netdev_priv(netdev);
  705. struct atl1e_hw *hw = &adapter->hw;
  706. int max_ints = 64;
  707. u32 status;
  708. do {
  709. status = AT_READ_REG(hw, REG_ISR);
  710. if ((status & IMR_NORMAL_MASK) == 0)
  711. break;
  712. /* link event */
  713. if (status & ISR_GPHY)
  714. atl1e_clear_phy_int(adapter);
  715. /* Ack ISR */
  716. AT_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT);
  717. /* check if PCIE PHY Link down */
  718. if (status & ISR_PHY_LINKDOWN) {
  719. DBG("atl1e: PCI-E PHY link down: %x\n", status);
  720. if (netdev_link_ok(adapter->netdev)) {
  721. /* reset MAC */
  722. atl1e_irq_reset(adapter);
  723. atl1e_reset(adapter);
  724. break;
  725. }
  726. }
  727. /* check if DMA read/write error */
  728. if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
  729. DBG("atl1e: PCI-E DMA RW error: %x\n", status);
  730. atl1e_irq_reset(adapter);
  731. atl1e_reset(adapter);
  732. break;
  733. }
  734. /* link event */
  735. if (status & (ISR_GPHY | ISR_MANUAL)) {
  736. atl1e_check_link(adapter);
  737. break;
  738. }
  739. /* transmit event */
  740. if (status & ISR_TX_EVENT)
  741. atl1e_clean_tx_irq(adapter);
  742. if (status & ISR_RX_EVENT)
  743. atl1e_clean_rx_irq(adapter);
  744. } while (--max_ints > 0);
  745. /* re-enable Interrupt*/
  746. AT_WRITE_REG(&adapter->hw, REG_ISR, 0);
  747. return;
  748. }
  749. static inline u16 atl1e_tpd_avail(struct atl1e_adapter *adapter)
  750. {
  751. struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
  752. u16 next_to_use = 0;
  753. u16 next_to_clean = 0;
  754. next_to_clean = tx_ring->next_to_clean;
  755. next_to_use = tx_ring->next_to_use;
  756. return (u16)(next_to_clean > next_to_use) ?
  757. (next_to_clean - next_to_use - 1) :
  758. (tx_ring->count + next_to_clean - next_to_use - 1);
  759. }
  760. /*
  761. * get next usable tpd
  762. * Note: should call atl1e_tdp_avail to make sure
  763. * there is enough tpd to use
  764. */
  765. static struct atl1e_tpd_desc *atl1e_get_tpd(struct atl1e_adapter *adapter)
  766. {
  767. struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
  768. u16 next_to_use = 0;
  769. next_to_use = tx_ring->next_to_use;
  770. if (++tx_ring->next_to_use == tx_ring->count)
  771. tx_ring->next_to_use = 0;
  772. memset(&tx_ring->desc[next_to_use], 0, sizeof(struct atl1e_tpd_desc));
  773. return (struct atl1e_tpd_desc *)&tx_ring->desc[next_to_use];
  774. }
  775. static struct atl1e_tx_buffer *
  776. atl1e_get_tx_buffer(struct atl1e_adapter *adapter, struct atl1e_tpd_desc *tpd)
  777. {
  778. struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
  779. return &tx_ring->tx_buffer[tpd - tx_ring->desc];
  780. }
  781. static void atl1e_tx_map(struct atl1e_adapter *adapter,
  782. struct io_buffer *iob, struct atl1e_tpd_desc *tpd)
  783. {
  784. struct atl1e_tx_buffer *tx_buffer = NULL;
  785. u16 buf_len = iob_len(iob);
  786. tx_buffer = atl1e_get_tx_buffer(adapter, tpd);
  787. tx_buffer->iob = iob;
  788. tx_buffer->length = buf_len;
  789. tx_buffer->dma = virt_to_bus(iob->data);
  790. tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
  791. tpd->word2 = ((tpd->word2 & ~TPD_BUFLEN_MASK) |
  792. ((cpu_to_le32(buf_len) & TPD_BUFLEN_MASK) <<
  793. TPD_BUFLEN_SHIFT));
  794. tpd->word3 |= 1 << TPD_EOP_SHIFT;
  795. }
  796. static void atl1e_tx_queue(struct atl1e_adapter *adapter, u16 count __unused,
  797. struct atl1e_tpd_desc *tpd __unused)
  798. {
  799. struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
  800. wmb();
  801. AT_WRITE_REG(&adapter->hw, REG_MB_TPD_PROD_IDX, tx_ring->next_to_use);
  802. }
  803. static int atl1e_xmit_frame(struct net_device *netdev, struct io_buffer *iob)
  804. {
  805. struct atl1e_adapter *adapter = netdev_priv(netdev);
  806. u16 tpd_req = 1;
  807. struct atl1e_tpd_desc *tpd;
  808. if (!netdev_link_ok(netdev)) {
  809. return -EINVAL;
  810. }
  811. if (atl1e_tpd_avail(adapter) < tpd_req) {
  812. return -EBUSY;
  813. }
  814. tpd = atl1e_get_tpd(adapter);
  815. atl1e_tx_map(adapter, iob, tpd);
  816. atl1e_tx_queue(adapter, tpd_req, tpd);
  817. return 0;
  818. }
  819. int atl1e_up(struct atl1e_adapter *adapter)
  820. {
  821. struct net_device *netdev = adapter->netdev;
  822. int err = 0;
  823. u32 val;
  824. /* hardware has been reset, we need to reload some things */
  825. err = atl1e_init_hw(&adapter->hw);
  826. if (err) {
  827. return -EIO;
  828. }
  829. atl1e_init_ring_ptrs(adapter);
  830. memcpy(adapter->hw.mac_addr, netdev->ll_addr, ETH_ALEN);
  831. if (atl1e_configure(adapter) != 0) {
  832. return -EIO;
  833. }
  834. atl1e_irq_disable(adapter);
  835. val = AT_READ_REG(&adapter->hw, REG_MASTER_CTRL);
  836. AT_WRITE_REG(&adapter->hw, REG_MASTER_CTRL,
  837. val | MASTER_CTRL_MANUAL_INT);
  838. return err;
  839. }
  840. void atl1e_irq(struct net_device *netdev, int enable)
  841. {
  842. struct atl1e_adapter *adapter = netdev_priv(netdev);
  843. if (enable)
  844. atl1e_irq_enable(adapter);
  845. else
  846. atl1e_irq_disable(adapter);
  847. }
  848. void atl1e_down(struct atl1e_adapter *adapter)
  849. {
  850. struct net_device *netdev = adapter->netdev;
  851. /* reset MAC to disable all RX/TX */
  852. atl1e_reset_hw(&adapter->hw);
  853. mdelay(1);
  854. netdev_link_down(netdev);
  855. adapter->link_speed = SPEED_0;
  856. adapter->link_duplex = -1;
  857. atl1e_clean_tx_ring(adapter);
  858. atl1e_clean_rx_ring(adapter);
  859. }
  860. /*
  861. * atl1e_open - Called when a network interface is made active
  862. * @netdev: network interface device structure
  863. *
  864. * Returns 0 on success, negative value on failure
  865. *
  866. * The open entry point is called when a network interface is made
  867. * active by the system (IFF_UP). At this point all resources needed
  868. * for transmit and receive operations are allocated, the interrupt
  869. * handler is registered with the OS, the watchdog timer is started,
  870. * and the stack is notified that the interface is ready.
  871. */
  872. static int atl1e_open(struct net_device *netdev)
  873. {
  874. struct atl1e_adapter *adapter = netdev_priv(netdev);
  875. int err;
  876. /* allocate rx/tx dma buffer & descriptors */
  877. atl1e_init_ring_resources(adapter);
  878. err = atl1e_setup_ring_resources(adapter);
  879. if (err)
  880. return err;
  881. err = atl1e_up(adapter);
  882. if (err)
  883. goto err_up;
  884. return 0;
  885. err_up:
  886. atl1e_free_ring_resources(adapter);
  887. atl1e_reset_hw(&adapter->hw);
  888. return err;
  889. }
  890. /*
  891. * atl1e_close - Disables a network interface
  892. * @netdev: network interface device structure
  893. *
  894. * Returns 0, this is not allowed to fail
  895. *
  896. * The close entry point is called when an interface is de-activated
  897. * by the OS. The hardware is still under the drivers control, but
  898. * needs to be disabled. A global MAC reset is issued to stop the
  899. * hardware, and all transmit and receive resources are freed.
  900. */
  901. static void atl1e_close(struct net_device *netdev)
  902. {
  903. struct atl1e_adapter *adapter = netdev_priv(netdev);
  904. atl1e_down(adapter);
  905. atl1e_free_ring_resources(adapter);
  906. }
  907. static struct net_device_operations atl1e_netdev_ops = {
  908. .open = atl1e_open,
  909. .close = atl1e_close,
  910. .transmit = atl1e_xmit_frame,
  911. .poll = atl1e_poll,
  912. .irq = atl1e_irq,
  913. };
  914. static void atl1e_init_netdev(struct net_device *netdev, struct pci_device *pdev)
  915. {
  916. netdev_init(netdev, &atl1e_netdev_ops);
  917. netdev->dev = &pdev->dev;
  918. pci_set_drvdata(pdev, netdev);
  919. }
  920. /*
  921. * atl1e_probe - Device Initialization Routine
  922. * @pdev: PCI device information struct
  923. * @ent: entry in atl1e_pci_tbl
  924. *
  925. * Returns 0 on success, negative on failure
  926. *
  927. * atl1e_probe initializes an adapter identified by a pci_device structure.
  928. * The OS initialization, configuring of the adapter private structure,
  929. * and a hardware reset occur.
  930. */
  931. static int atl1e_probe(struct pci_device *pdev,
  932. const struct pci_device_id *ent __unused)
  933. {
  934. struct net_device *netdev;
  935. struct atl1e_adapter *adapter = NULL;
  936. static int cards_found;
  937. int err = 0;
  938. adjust_pci_device(pdev);
  939. netdev = alloc_etherdev(sizeof(struct atl1e_adapter));
  940. if (netdev == NULL) {
  941. err = -ENOMEM;
  942. DBG("atl1e: out of memory allocating net_device\n");
  943. goto err;
  944. }
  945. atl1e_init_netdev(netdev, pdev);
  946. adapter = netdev_priv(netdev);
  947. adapter->bd_number = cards_found;
  948. adapter->netdev = netdev;
  949. adapter->pdev = pdev;
  950. adapter->hw.adapter = adapter;
  951. if (!pdev->membase) {
  952. err = -EIO;
  953. DBG("atl1e: cannot map device registers\n");
  954. goto err_free_netdev;
  955. }
  956. adapter->hw.hw_addr = bus_to_virt(pdev->membase);
  957. /* init mii data */
  958. adapter->mii.dev = netdev;
  959. adapter->mii.mdio_read = atl1e_mdio_read;
  960. adapter->mii.mdio_write = atl1e_mdio_write;
  961. adapter->mii.phy_id_mask = 0x1f;
  962. adapter->mii.reg_num_mask = MDIO_REG_ADDR_MASK;
  963. /* get user settings */
  964. adapter->tx_ring.count = TX_DESC_COUNT;
  965. adapter->rx_ring.page_size = RX_MEM_SIZE;
  966. atl1e_setup_pcicmd(pdev);
  967. /* setup the private structure */
  968. err = atl1e_sw_init(adapter);
  969. if (err) {
  970. DBG("atl1e: private data init failed\n");
  971. goto err_free_netdev;
  972. }
  973. /* Init GPHY as early as possible due to power saving issue */
  974. atl1e_phy_init(&adapter->hw);
  975. /* reset the controller to
  976. * put the device in a known good starting state */
  977. err = atl1e_reset_hw(&adapter->hw);
  978. if (err) {
  979. err = -EIO;
  980. goto err_free_netdev;
  981. }
  982. /* This may have been run by a zero-wait timer around
  983. now... unclear. */
  984. atl1e_restart_autoneg(&adapter->hw);
  985. if (atl1e_read_mac_addr(&adapter->hw) != 0) {
  986. DBG("atl1e: cannot read MAC address from EEPROM\n");
  987. err = -EIO;
  988. goto err_free_netdev;
  989. }
  990. memcpy(netdev->hw_addr, adapter->hw.perm_mac_addr, ETH_ALEN);
  991. memcpy(netdev->ll_addr, adapter->hw.mac_addr, ETH_ALEN);
  992. DBG("atl1e: Attansic L1E Ethernet controller on %s, "
  993. "%02x:%02x:%02x:%02x:%02x:%02x\n", adapter->netdev->name,
  994. adapter->hw.mac_addr[0], adapter->hw.mac_addr[1],
  995. adapter->hw.mac_addr[2], adapter->hw.mac_addr[3],
  996. adapter->hw.mac_addr[4], adapter->hw.mac_addr[5]);
  997. err = register_netdev(netdev);
  998. if (err) {
  999. DBG("atl1e: cannot register network device\n");
  1000. goto err_free_netdev;
  1001. }
  1002. netdev_link_down(netdev);
  1003. cards_found++;
  1004. return 0;
  1005. err_free_netdev:
  1006. netdev_nullify(netdev);
  1007. netdev_put(netdev);
  1008. err:
  1009. return err;
  1010. }
  1011. /*
  1012. * atl1e_remove - Device Removal Routine
  1013. * @pdev: PCI device information struct
  1014. *
  1015. * atl1e_remove is called by the PCI subsystem to alert the driver
  1016. * that it should release a PCI device. The could be caused by a
  1017. * Hot-Plug event, or because the driver is going to be removed from
  1018. * memory.
  1019. */
  1020. static void atl1e_remove(struct pci_device *pdev)
  1021. {
  1022. struct net_device *netdev = pci_get_drvdata(pdev);
  1023. struct atl1e_adapter *adapter = netdev_priv(netdev);
  1024. unregister_netdev(netdev);
  1025. atl1e_free_ring_resources(adapter);
  1026. atl1e_force_ps(&adapter->hw);
  1027. netdev_nullify(netdev);
  1028. netdev_put(netdev);
  1029. }
  1030. struct pci_driver atl1e_driver __pci_driver = {
  1031. .ids = atl1e_pci_tbl,
  1032. .id_count = (sizeof(atl1e_pci_tbl) / sizeof(atl1e_pci_tbl[0])),
  1033. .probe = atl1e_probe,
  1034. .remove = atl1e_remove,
  1035. };
  1036. /********** Hardware-level functions: **********/
  1037. /*
  1038. * check_eeprom_exist
  1039. * return 0 if eeprom exist
  1040. */
  1041. int atl1e_check_eeprom_exist(struct atl1e_hw *hw)
  1042. {
  1043. u32 value;
  1044. value = AT_READ_REG(hw, REG_SPI_FLASH_CTRL);
  1045. if (value & SPI_FLASH_CTRL_EN_VPD) {
  1046. value &= ~SPI_FLASH_CTRL_EN_VPD;
  1047. AT_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value);
  1048. }
  1049. value = AT_READ_REGW(hw, REG_PCIE_CAP_LIST);
  1050. return ((value & 0xFF00) == 0x6C00) ? 0 : 1;
  1051. }
  1052. void atl1e_hw_set_mac_addr(struct atl1e_hw *hw)
  1053. {
  1054. u32 value;
  1055. /*
  1056. * 00-0B-6A-F6-00-DC
  1057. * 0: 6AF600DC 1: 000B
  1058. * low dword
  1059. */
  1060. value = (((u32)hw->mac_addr[2]) << 24) |
  1061. (((u32)hw->mac_addr[3]) << 16) |
  1062. (((u32)hw->mac_addr[4]) << 8) |
  1063. (((u32)hw->mac_addr[5])) ;
  1064. AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 0, value);
  1065. /* hight dword */
  1066. value = (((u32)hw->mac_addr[0]) << 8) |
  1067. (((u32)hw->mac_addr[1])) ;
  1068. AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 1, value);
  1069. }
  1070. /*
  1071. * atl1e_get_permanent_address
  1072. * return 0 if get valid mac address,
  1073. */
  1074. static int atl1e_get_permanent_address(struct atl1e_hw *hw)
  1075. {
  1076. union {
  1077. u32 dword[2];
  1078. u8 byte[8];
  1079. } hw_addr;
  1080. u32 i;
  1081. u32 twsi_ctrl_data;
  1082. u8 eth_addr[ETH_ALEN];
  1083. if (!atl1e_check_eeprom_exist(hw)) {
  1084. /* eeprom exist */
  1085. twsi_ctrl_data = AT_READ_REG(hw, REG_TWSI_CTRL);
  1086. twsi_ctrl_data |= TWSI_CTRL_SW_LDSTART;
  1087. AT_WRITE_REG(hw, REG_TWSI_CTRL, twsi_ctrl_data);
  1088. for (i = 0; i < AT_TWSI_EEPROM_TIMEOUT; i++) {
  1089. mdelay(10);
  1090. twsi_ctrl_data = AT_READ_REG(hw, REG_TWSI_CTRL);
  1091. if ((twsi_ctrl_data & TWSI_CTRL_SW_LDSTART) == 0)
  1092. break;
  1093. }
  1094. if (i >= AT_TWSI_EEPROM_TIMEOUT)
  1095. return AT_ERR_TIMEOUT;
  1096. }
  1097. /* maybe MAC-address is from BIOS */
  1098. hw_addr.dword[0] = AT_READ_REG(hw, REG_MAC_STA_ADDR);
  1099. hw_addr.dword[1] = AT_READ_REG(hw, REG_MAC_STA_ADDR + 4);
  1100. for (i = 0; i < ETH_ALEN; i++) {
  1101. eth_addr[ETH_ALEN - i - 1] = hw_addr.byte[i];
  1102. }
  1103. memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
  1104. return 0;
  1105. }
  1106. void atl1e_force_ps(struct atl1e_hw *hw)
  1107. {
  1108. AT_WRITE_REGW(hw, REG_GPHY_CTRL,
  1109. GPHY_CTRL_PW_WOL_DIS | GPHY_CTRL_EXT_RESET);
  1110. }
  1111. /*
  1112. * Reads the adapter's MAC address from the EEPROM
  1113. *
  1114. * hw - Struct containing variables accessed by shared code
  1115. */
  1116. int atl1e_read_mac_addr(struct atl1e_hw *hw)
  1117. {
  1118. int err = 0;
  1119. err = atl1e_get_permanent_address(hw);
  1120. if (err)
  1121. return AT_ERR_EEPROM;
  1122. memcpy(hw->mac_addr, hw->perm_mac_addr, sizeof(hw->perm_mac_addr));
  1123. return 0;
  1124. }
  1125. /*
  1126. * Reads the value from a PHY register
  1127. * hw - Struct containing variables accessed by shared code
  1128. * reg_addr - address of the PHY register to read
  1129. */
  1130. int atl1e_read_phy_reg(struct atl1e_hw *hw, u16 reg_addr, u16 *phy_data)
  1131. {
  1132. u32 val;
  1133. int i;
  1134. val = ((u32)(reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT |
  1135. MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW |
  1136. MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
  1137. AT_WRITE_REG(hw, REG_MDIO_CTRL, val);
  1138. wmb();
  1139. for (i = 0; i < MDIO_WAIT_TIMES; i++) {
  1140. udelay(2);
  1141. val = AT_READ_REG(hw, REG_MDIO_CTRL);
  1142. if (!(val & (MDIO_START | MDIO_BUSY)))
  1143. break;
  1144. wmb();
  1145. }
  1146. if (!(val & (MDIO_START | MDIO_BUSY))) {
  1147. *phy_data = (u16)val;
  1148. return 0;
  1149. }
  1150. return AT_ERR_PHY;
  1151. }
  1152. /*
  1153. * Writes a value to a PHY register
  1154. * hw - Struct containing variables accessed by shared code
  1155. * reg_addr - address of the PHY register to write
  1156. * data - data to write to the PHY
  1157. */
  1158. int atl1e_write_phy_reg(struct atl1e_hw *hw, u32 reg_addr, u16 phy_data)
  1159. {
  1160. int i;
  1161. u32 val;
  1162. val = ((u32)(phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT |
  1163. (reg_addr&MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT |
  1164. MDIO_SUP_PREAMBLE |
  1165. MDIO_START |
  1166. MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
  1167. AT_WRITE_REG(hw, REG_MDIO_CTRL, val);
  1168. wmb();
  1169. for (i = 0; i < MDIO_WAIT_TIMES; i++) {
  1170. udelay(2);
  1171. val = AT_READ_REG(hw, REG_MDIO_CTRL);
  1172. if (!(val & (MDIO_START | MDIO_BUSY)))
  1173. break;
  1174. wmb();
  1175. }
  1176. if (!(val & (MDIO_START | MDIO_BUSY)))
  1177. return 0;
  1178. return AT_ERR_PHY;
  1179. }
  1180. /*
  1181. * atl1e_init_pcie - init PCIE module
  1182. */
  1183. static void atl1e_init_pcie(struct atl1e_hw *hw)
  1184. {
  1185. u32 value;
  1186. /* comment 2lines below to save more power when sususpend
  1187. value = LTSSM_TEST_MODE_DEF;
  1188. AT_WRITE_REG(hw, REG_LTSSM_TEST_MODE, value);
  1189. */
  1190. /* pcie flow control mode change */
  1191. value = AT_READ_REG(hw, 0x1008);
  1192. value |= 0x8000;
  1193. AT_WRITE_REG(hw, 0x1008, value);
  1194. }
  1195. /*
  1196. * Configures PHY autoneg and flow control advertisement settings
  1197. *
  1198. * hw - Struct containing variables accessed by shared code
  1199. */
  1200. static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
  1201. {
  1202. s32 ret_val;
  1203. u16 mii_autoneg_adv_reg;
  1204. u16 mii_1000t_ctrl_reg;
  1205. if (0 != hw->mii_autoneg_adv_reg)
  1206. return 0;
  1207. /* Read the MII Auto-Neg Advertisement Register (Address 4/9). */
  1208. mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK;
  1209. mii_1000t_ctrl_reg = MII_AT001_CR_1000T_DEFAULT_CAP_MASK;
  1210. /*
  1211. * First we clear all the 10/100 mb speed bits in the Auto-Neg
  1212. * Advertisement Register (Address 4) and the 1000 mb speed bits in
  1213. * the 1000Base-T control Register (Address 9).
  1214. */
  1215. mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
  1216. mii_1000t_ctrl_reg &= ~MII_AT001_CR_1000T_SPEED_MASK;
  1217. /* Assume auto-detect media type */
  1218. mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS |
  1219. MII_AR_10T_FD_CAPS |
  1220. MII_AR_100TX_HD_CAPS |
  1221. MII_AR_100TX_FD_CAPS);
  1222. if (hw->nic_type == athr_l1e) {
  1223. mii_1000t_ctrl_reg |= MII_AT001_CR_1000T_FD_CAPS;
  1224. }
  1225. /* flow control fixed to enable all */
  1226. mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
  1227. hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
  1228. hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg;
  1229. ret_val = atl1e_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg);
  1230. if (ret_val)
  1231. return ret_val;
  1232. if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) {
  1233. ret_val = atl1e_write_phy_reg(hw, MII_AT001_CR,
  1234. mii_1000t_ctrl_reg);
  1235. if (ret_val)
  1236. return ret_val;
  1237. }
  1238. return 0;
  1239. }
  1240. /*
  1241. * Resets the PHY and make all config validate
  1242. *
  1243. * hw - Struct containing variables accessed by shared code
  1244. *
  1245. * Sets bit 15 and 12 of the MII control regiser (for F001 bug)
  1246. */
  1247. int atl1e_phy_commit(struct atl1e_hw *hw)
  1248. {
  1249. int ret_val;
  1250. u16 phy_data;
  1251. phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG;
  1252. ret_val = atl1e_write_phy_reg(hw, MII_BMCR, phy_data);
  1253. if (ret_val) {
  1254. u32 val;
  1255. int i;
  1256. /**************************************
  1257. * pcie serdes link may be down !
  1258. **************************************/
  1259. for (i = 0; i < 25; i++) {
  1260. mdelay(1);
  1261. val = AT_READ_REG(hw, REG_MDIO_CTRL);
  1262. if (!(val & (MDIO_START | MDIO_BUSY)))
  1263. break;
  1264. }
  1265. if (0 != (val & (MDIO_START | MDIO_BUSY))) {
  1266. DBG("atl1e: PCI-E link down for at least 25ms\n");
  1267. return ret_val;
  1268. }
  1269. DBG("atl1e: PCI-E link up after %d ms\n", i);
  1270. }
  1271. return 0;
  1272. }
  1273. int atl1e_phy_init(struct atl1e_hw *hw)
  1274. {
  1275. s32 ret_val;
  1276. u16 phy_val;
  1277. if (hw->phy_configured) {
  1278. if (hw->re_autoneg) {
  1279. hw->re_autoneg = 0;
  1280. return atl1e_restart_autoneg(hw);
  1281. }
  1282. return 0;
  1283. }
  1284. /* RESET GPHY Core */
  1285. AT_WRITE_REGW(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT);
  1286. mdelay(2);
  1287. AT_WRITE_REGW(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT |
  1288. GPHY_CTRL_EXT_RESET);
  1289. mdelay(2);
  1290. /* patches */
  1291. /* p1. eable hibernation mode */
  1292. ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0xB);
  1293. if (ret_val)
  1294. return ret_val;
  1295. ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0xBC00);
  1296. if (ret_val)
  1297. return ret_val;
  1298. /* p2. set Class A/B for all modes */
  1299. ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0);
  1300. if (ret_val)
  1301. return ret_val;
  1302. phy_val = 0x02ef;
  1303. /* remove Class AB */
  1304. /* phy_val = hw->emi_ca ? 0x02ef : 0x02df; */
  1305. ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, phy_val);
  1306. if (ret_val)
  1307. return ret_val;
  1308. /* p3. 10B ??? */
  1309. ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0x12);
  1310. if (ret_val)
  1311. return ret_val;
  1312. ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0x4C04);
  1313. if (ret_val)
  1314. return ret_val;
  1315. /* p4. 1000T power */
  1316. ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0x4);
  1317. if (ret_val)
  1318. return ret_val;
  1319. ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0x8BBB);
  1320. if (ret_val)
  1321. return ret_val;
  1322. ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0x5);
  1323. if (ret_val)
  1324. return ret_val;
  1325. ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0x2C46);
  1326. if (ret_val)
  1327. return ret_val;
  1328. mdelay(1);
  1329. /*Enable PHY LinkChange Interrupt */
  1330. ret_val = atl1e_write_phy_reg(hw, MII_INT_CTRL, 0xC00);
  1331. if (ret_val) {
  1332. DBG("atl1e: Error enable PHY linkChange Interrupt\n");
  1333. return ret_val;
  1334. }
  1335. /* setup AutoNeg parameters */
  1336. ret_val = atl1e_phy_setup_autoneg_adv(hw);
  1337. if (ret_val) {
  1338. DBG("atl1e: Error Setting up Auto-Negotiation\n");
  1339. return ret_val;
  1340. }
  1341. /* SW.Reset & En-Auto-Neg to restart Auto-Neg*/
  1342. DBG("atl1e: Restarting Auto-Neg");
  1343. ret_val = atl1e_phy_commit(hw);
  1344. if (ret_val) {
  1345. DBG("atl1e: Error Resetting the phy");
  1346. return ret_val;
  1347. }
  1348. hw->phy_configured = 1;
  1349. return 0;
  1350. }
  1351. /*
  1352. * Reset the transmit and receive units; mask and clear all interrupts.
  1353. * hw - Struct containing variables accessed by shared code
  1354. * return : 0 or idle status (if error)
  1355. */
  1356. int atl1e_reset_hw(struct atl1e_hw *hw)
  1357. {
  1358. struct atl1e_adapter *adapter = hw->adapter;
  1359. struct pci_device *pdev = adapter->pdev;
  1360. int timeout = 0;
  1361. u32 idle_status_data = 0;
  1362. u16 pci_cfg_cmd_word = 0;
  1363. /* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */
  1364. pci_read_config_word(pdev, PCI_COMMAND, &pci_cfg_cmd_word);
  1365. if ((pci_cfg_cmd_word & (PCI_COMMAND_IO | PCI_COMMAND_MEM |
  1366. PCI_COMMAND_MASTER))
  1367. != (PCI_COMMAND_IO | PCI_COMMAND_MEM |
  1368. PCI_COMMAND_MASTER)) {
  1369. pci_cfg_cmd_word |= (PCI_COMMAND_IO | PCI_COMMAND_MEM |
  1370. PCI_COMMAND_MASTER);
  1371. pci_write_config_word(pdev, PCI_COMMAND, pci_cfg_cmd_word);
  1372. }
  1373. /*
  1374. * Issue Soft Reset to the MAC. This will reset the chip's
  1375. * transmit, receive, DMA. It will not effect
  1376. * the current PCI configuration. The global reset bit is self-
  1377. * clearing, and should clear within a microsecond.
  1378. */
  1379. AT_WRITE_REG(hw, REG_MASTER_CTRL,
  1380. MASTER_CTRL_LED_MODE | MASTER_CTRL_SOFT_RST);
  1381. wmb();
  1382. mdelay(1);
  1383. /* Wait at least 10ms for All module to be Idle */
  1384. for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) {
  1385. idle_status_data = AT_READ_REG(hw, REG_IDLE_STATUS);
  1386. if (idle_status_data == 0)
  1387. break;
  1388. mdelay(1);
  1389. }
  1390. if (timeout >= AT_HW_MAX_IDLE_DELAY) {
  1391. DBG("atl1e: MAC reset timeout\n");
  1392. return AT_ERR_TIMEOUT;
  1393. }
  1394. return 0;
  1395. }
  1396. /*
  1397. * Performs basic configuration of the adapter.
  1398. *
  1399. * hw - Struct containing variables accessed by shared code
  1400. * Assumes that the controller has previously been reset and is in a
  1401. * post-reset uninitialized state. Initializes multicast table,
  1402. * and Calls routines to setup link
  1403. * Leaves the transmit and receive units disabled and uninitialized.
  1404. */
  1405. int atl1e_init_hw(struct atl1e_hw *hw)
  1406. {
  1407. s32 ret_val = 0;
  1408. atl1e_init_pcie(hw);
  1409. /* Zero out the Multicast HASH table */
  1410. /* clear the old settings from the multicast hash table */
  1411. AT_WRITE_REG(hw, REG_RX_HASH_TABLE, 0);
  1412. AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
  1413. ret_val = atl1e_phy_init(hw);
  1414. return ret_val;
  1415. }
  1416. /*
  1417. * Detects the current speed and duplex settings of the hardware.
  1418. *
  1419. * hw - Struct containing variables accessed by shared code
  1420. * speed - Speed of the connection
  1421. * duplex - Duplex setting of the connection
  1422. */
  1423. int atl1e_get_speed_and_duplex(struct atl1e_hw *hw, u16 *speed, u16 *duplex)
  1424. {
  1425. int err;
  1426. u16 phy_data;
  1427. /* Read PHY Specific Status Register (17) */
  1428. err = atl1e_read_phy_reg(hw, MII_AT001_PSSR, &phy_data);
  1429. if (err)
  1430. return err;
  1431. if (!(phy_data & MII_AT001_PSSR_SPD_DPLX_RESOLVED))
  1432. return AT_ERR_PHY_RES;
  1433. switch (phy_data & MII_AT001_PSSR_SPEED) {
  1434. case MII_AT001_PSSR_1000MBS:
  1435. *speed = SPEED_1000;
  1436. break;
  1437. case MII_AT001_PSSR_100MBS:
  1438. *speed = SPEED_100;
  1439. break;
  1440. case MII_AT001_PSSR_10MBS:
  1441. *speed = SPEED_10;
  1442. break;
  1443. default:
  1444. return AT_ERR_PHY_SPEED;
  1445. break;
  1446. }
  1447. if (phy_data & MII_AT001_PSSR_DPLX)
  1448. *duplex = FULL_DUPLEX;
  1449. else
  1450. *duplex = HALF_DUPLEX;
  1451. return 0;
  1452. }
  1453. int atl1e_restart_autoneg(struct atl1e_hw *hw)
  1454. {
  1455. int err = 0;
  1456. err = atl1e_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg);
  1457. if (err)
  1458. return err;
  1459. if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) {
  1460. err = atl1e_write_phy_reg(hw, MII_AT001_CR,
  1461. hw->mii_1000t_ctrl_reg);
  1462. if (err)
  1463. return err;
  1464. }
  1465. err = atl1e_write_phy_reg(hw, MII_BMCR,
  1466. MII_CR_RESET | MII_CR_AUTO_NEG_EN |
  1467. MII_CR_RESTART_AUTO_NEG);
  1468. return err;
  1469. }