You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

atl1e.c 45KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749
  1. /*
  2. * Copyright(c) 2007 Atheros Corporation. All rights reserved.
  3. *
  4. * Derived from Intel e1000 driver
  5. * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
  6. *
  7. * Modified for iPXE, October 2009 by Joshua Oreman <oremanj@rwcr.net>.
  8. *
  9. * This program is free software; you can redistribute it and/or modify it
  10. * under the terms of the GNU General Public License as published by the Free
  11. * Software Foundation; either version 2 of the License, or (at your option)
  12. * any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful, but WITHOUT
  15. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  16. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  17. * more details.
  18. *
  19. * You should have received a copy of the GNU General Public License along with
  20. * this program; if not, write to the Free Software Foundation, Inc., 51
  21. * Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  22. */
  23. FILE_LICENCE ( GPL2_OR_LATER );
  24. #include "atl1e.h"
  25. /* User-tweakable parameters: */
  26. #define TX_DESC_COUNT 32 /* TX descriptors, minimum 32 */
  27. #define RX_MEM_SIZE 8192 /* RX area size, minimum 8kb */
  28. #define MAX_FRAME_SIZE 1500 /* Maximum MTU supported, minimum 1500 */
  29. /* Arcane parameters: */
  30. #define PREAMBLE_LEN 7
  31. #define RX_JUMBO_THRESH ((MAX_FRAME_SIZE + ETH_HLEN + \
  32. VLAN_HLEN + ETH_FCS_LEN + 7) >> 3)
  33. #define IMT_VAL 100 /* interrupt moderator timer, us */
  34. #define ICT_VAL 50000 /* interrupt clear timer, us */
  35. #define SMB_TIMER 200000
  36. #define RRD_THRESH 1 /* packets to queue before interrupt */
  37. #define TPD_BURST 5
  38. #define TPD_THRESH (TX_DESC_COUNT / 2)
  39. #define RX_COUNT_DOWN 4
  40. #define TX_COUNT_DOWN (IMT_VAL * 4 / 3)
  41. #define DMAR_DLY_CNT 15
  42. #define DMAW_DLY_CNT 4
  43. #define PCI_DEVICE_ID_ATTANSIC_L1E 0x1026
  44. /*
  45. * atl1e_pci_tbl - PCI Device ID Table
  46. *
  47. * Wildcard entries (PCI_ANY_ID) should come last
  48. * Last entry must be all 0s
  49. *
  50. * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
  51. * Class, Class Mask, private data (not used) }
  52. */
  53. static struct pci_device_id atl1e_pci_tbl[] = {
  54. PCI_ROM(0x1969, 0x1026, "atl1e_26", "Attansic L1E 0x1026", 0),
  55. PCI_ROM(0x1969, 0x1066, "atl1e_66", "Attansic L1E 0x1066", 0),
  56. };
  57. static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter);
  58. static const u16
  59. atl1e_rx_page_vld_regs[AT_PAGE_NUM_PER_QUEUE] =
  60. {
  61. REG_HOST_RXF0_PAGE0_VLD, REG_HOST_RXF0_PAGE1_VLD
  62. };
  63. static const u16
  64. atl1e_rx_page_lo_addr_regs[AT_PAGE_NUM_PER_QUEUE] =
  65. {
  66. REG_HOST_RXF0_PAGE0_LO, REG_HOST_RXF0_PAGE1_LO
  67. };
  68. static const u16
  69. atl1e_rx_page_write_offset_regs[AT_PAGE_NUM_PER_QUEUE] =
  70. {
  71. REG_HOST_RXF0_MB0_LO, REG_HOST_RXF0_MB1_LO
  72. };
  73. static const u16 atl1e_pay_load_size[] = {
  74. 128, 256, 512, 1024, 2048, 4096,
  75. };
  76. /*
  77. * atl1e_irq_enable - Enable default interrupt generation settings
  78. * @adapter: board private structure
  79. */
  80. static inline void atl1e_irq_enable(struct atl1e_adapter *adapter)
  81. {
  82. AT_WRITE_REG(&adapter->hw, REG_ISR, 0);
  83. AT_WRITE_REG(&adapter->hw, REG_IMR, IMR_NORMAL_MASK);
  84. AT_WRITE_FLUSH(&adapter->hw);
  85. }
  86. /*
  87. * atl1e_irq_disable - Mask off interrupt generation on the NIC
  88. * @adapter: board private structure
  89. */
  90. static inline void atl1e_irq_disable(struct atl1e_adapter *adapter)
  91. {
  92. AT_WRITE_REG(&adapter->hw, REG_IMR, 0);
  93. AT_WRITE_FLUSH(&adapter->hw);
  94. }
  95. /*
  96. * atl1e_irq_reset - reset interrupt confiure on the NIC
  97. * @adapter: board private structure
  98. */
  99. static inline void atl1e_irq_reset(struct atl1e_adapter *adapter)
  100. {
  101. AT_WRITE_REG(&adapter->hw, REG_ISR, 0);
  102. AT_WRITE_REG(&adapter->hw, REG_IMR, 0);
  103. AT_WRITE_FLUSH(&adapter->hw);
  104. }
  105. static void atl1e_reset(struct atl1e_adapter *adapter)
  106. {
  107. atl1e_down(adapter);
  108. atl1e_up(adapter);
  109. }
  110. static int atl1e_check_link(struct atl1e_adapter *adapter)
  111. {
  112. struct atl1e_hw *hw = &adapter->hw;
  113. struct net_device *netdev = adapter->netdev;
  114. int err = 0;
  115. u16 speed, duplex, phy_data;
  116. /* MII_BMSR must read twise */
  117. atl1e_read_phy_reg(hw, MII_BMSR, &phy_data);
  118. atl1e_read_phy_reg(hw, MII_BMSR, &phy_data);
  119. if ((phy_data & BMSR_LSTATUS) == 0) {
  120. /* link down */
  121. if (netdev_link_ok(netdev)) { /* old link state: Up */
  122. u32 value;
  123. /* disable rx */
  124. value = AT_READ_REG(hw, REG_MAC_CTRL);
  125. value &= ~MAC_CTRL_RX_EN;
  126. AT_WRITE_REG(hw, REG_MAC_CTRL, value);
  127. adapter->link_speed = SPEED_0;
  128. DBG("atl1e: %s link is down\n", netdev->name);
  129. netdev_link_down(netdev);
  130. }
  131. } else {
  132. /* Link Up */
  133. err = atl1e_get_speed_and_duplex(hw, &speed, &duplex);
  134. if (err)
  135. return err;
  136. /* link result is our setting */
  137. if (adapter->link_speed != speed ||
  138. adapter->link_duplex != duplex) {
  139. adapter->link_speed = speed;
  140. adapter->link_duplex = duplex;
  141. atl1e_setup_mac_ctrl(adapter);
  142. DBG("atl1e: %s link is up, %d Mbps, %s duplex\n",
  143. netdev->name, adapter->link_speed,
  144. adapter->link_duplex == FULL_DUPLEX ?
  145. "full" : "half");
  146. netdev_link_up(netdev);
  147. }
  148. }
  149. return 0;
  150. }
  151. static int atl1e_mdio_read(struct net_device *netdev, int phy_id __unused,
  152. int reg_num)
  153. {
  154. struct atl1e_adapter *adapter = netdev_priv(netdev);
  155. u16 result;
  156. atl1e_read_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, &result);
  157. return result;
  158. }
  159. static void atl1e_mdio_write(struct net_device *netdev, int phy_id __unused,
  160. int reg_num, int val)
  161. {
  162. struct atl1e_adapter *adapter = netdev_priv(netdev);
  163. atl1e_write_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, val);
  164. }
  165. static void atl1e_setup_pcicmd(struct pci_device *pdev)
  166. {
  167. u16 cmd;
  168. pci_read_config_word(pdev, PCI_COMMAND, &cmd);
  169. cmd |= (PCI_COMMAND_MEM | PCI_COMMAND_MASTER);
  170. pci_write_config_word(pdev, PCI_COMMAND, cmd);
  171. /*
  172. * some motherboards BIOS(PXE/EFI) driver may set PME
  173. * while they transfer control to OS (Windows/Linux)
  174. * so we should clear this bit before NIC work normally
  175. */
  176. pci_write_config_dword(pdev, REG_PM_CTRLSTAT, 0);
  177. mdelay(1);
  178. }
  179. /*
  180. * atl1e_sw_init - Initialize general software structures (struct atl1e_adapter)
  181. * @adapter: board private structure to initialize
  182. *
  183. * atl1e_sw_init initializes the Adapter private data structure.
  184. * Fields are initialized based on PCI device information and
  185. * OS network device settings (MTU size).
  186. */
  187. static int atl1e_sw_init(struct atl1e_adapter *adapter)
  188. {
  189. struct atl1e_hw *hw = &adapter->hw;
  190. struct pci_device *pdev = adapter->pdev;
  191. u32 phy_status_data = 0;
  192. u8 rev_id = 0;
  193. adapter->link_speed = SPEED_0; /* hardware init */
  194. adapter->link_duplex = FULL_DUPLEX;
  195. /* PCI config space info */
  196. pci_read_config_byte(pdev, PCI_REVISION, &rev_id);
  197. phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS);
  198. /* nic type */
  199. if (rev_id >= 0xF0) {
  200. hw->nic_type = athr_l2e_revB;
  201. } else {
  202. if (phy_status_data & PHY_STATUS_100M)
  203. hw->nic_type = athr_l1e;
  204. else
  205. hw->nic_type = athr_l2e_revA;
  206. }
  207. phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS);
  208. hw->emi_ca = !!(phy_status_data & PHY_STATUS_EMI_CA);
  209. hw->phy_configured = 0;
  210. /* need confirm */
  211. hw->dmar_block = atl1e_dma_req_1024;
  212. hw->dmaw_block = atl1e_dma_req_1024;
  213. return 0;
  214. }
  215. /*
  216. * atl1e_clean_tx_ring - free all Tx buffers for device close
  217. * @adapter: board private structure
  218. */
  219. static void atl1e_clean_tx_ring(struct atl1e_adapter *adapter)
  220. {
  221. struct atl1e_tx_ring *tx_ring = (struct atl1e_tx_ring *)
  222. &adapter->tx_ring;
  223. struct atl1e_tx_buffer *tx_buffer = NULL;
  224. u16 index, ring_count = tx_ring->count;
  225. if (tx_ring->desc == NULL || tx_ring->tx_buffer == NULL)
  226. return;
  227. for (index = 0; index < ring_count; index++) {
  228. tx_buffer = &tx_ring->tx_buffer[index];
  229. if (tx_buffer->iob) {
  230. netdev_tx_complete(adapter->netdev, tx_buffer->iob);
  231. tx_buffer->dma = 0;
  232. tx_buffer->iob = NULL;
  233. }
  234. }
  235. /* Zero out Tx-buffers */
  236. memset(tx_ring->desc, 0, sizeof(struct atl1e_tpd_desc) *
  237. ring_count);
  238. memset(tx_ring->tx_buffer, 0, sizeof(struct atl1e_tx_buffer) *
  239. ring_count);
  240. }
  241. /*
  242. * atl1e_clean_rx_ring - Free rx-reservation iobs
  243. * @adapter: board private structure
  244. */
  245. static void atl1e_clean_rx_ring(struct atl1e_adapter *adapter)
  246. {
  247. struct atl1e_rx_ring *rx_ring =
  248. (struct atl1e_rx_ring *)&adapter->rx_ring;
  249. struct atl1e_rx_page_desc *rx_page_desc = &rx_ring->rx_page_desc;
  250. u16 j;
  251. if (adapter->ring_vir_addr == NULL)
  252. return;
  253. /* Zero out the descriptor ring */
  254. for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
  255. if (rx_page_desc->rx_page[j].addr != NULL) {
  256. memset(rx_page_desc->rx_page[j].addr, 0,
  257. rx_ring->real_page_size);
  258. }
  259. }
  260. }
  261. static void atl1e_cal_ring_size(struct atl1e_adapter *adapter, u32 *ring_size)
  262. {
  263. *ring_size = ((u32)(adapter->tx_ring.count *
  264. sizeof(struct atl1e_tpd_desc) + 7
  265. /* tx ring, qword align */
  266. + adapter->rx_ring.real_page_size * AT_PAGE_NUM_PER_QUEUE
  267. + 31
  268. /* rx ring, 32 bytes align */
  269. + (1 + AT_PAGE_NUM_PER_QUEUE) *
  270. sizeof(u32) + 3));
  271. /* tx, rx cmd, dword align */
  272. }
  273. static void atl1e_init_ring_resources(struct atl1e_adapter *adapter)
  274. {
  275. struct atl1e_rx_ring *rx_ring = &adapter->rx_ring;
  276. rx_ring->real_page_size = adapter->rx_ring.page_size
  277. + MAX_FRAME_SIZE
  278. + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
  279. rx_ring->real_page_size = (rx_ring->real_page_size + 31) & ~31;
  280. atl1e_cal_ring_size(adapter, &adapter->ring_size);
  281. adapter->ring_vir_addr = NULL;
  282. adapter->rx_ring.desc = NULL;
  283. return;
  284. }
  285. /*
  286. * Read / Write Ptr Initialize:
  287. */
  288. static void atl1e_init_ring_ptrs(struct atl1e_adapter *adapter)
  289. {
  290. struct atl1e_tx_ring *tx_ring = NULL;
  291. struct atl1e_rx_ring *rx_ring = NULL;
  292. struct atl1e_rx_page_desc *rx_page_desc = NULL;
  293. int j;
  294. tx_ring = &adapter->tx_ring;
  295. rx_ring = &adapter->rx_ring;
  296. rx_page_desc = &rx_ring->rx_page_desc;
  297. tx_ring->next_to_use = 0;
  298. tx_ring->next_to_clean = 0;
  299. rx_page_desc->rx_using = 0;
  300. rx_page_desc->rx_nxseq = 0;
  301. for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
  302. *rx_page_desc->rx_page[j].write_offset_addr = 0;
  303. rx_page_desc->rx_page[j].read_offset = 0;
  304. }
  305. }
  306. /*
  307. * atl1e_free_ring_resources - Free Tx / RX descriptor Resources
  308. * @adapter: board private structure
  309. *
  310. * Free all transmit software resources
  311. */
  312. static void atl1e_free_ring_resources(struct atl1e_adapter *adapter)
  313. {
  314. atl1e_clean_tx_ring(adapter);
  315. atl1e_clean_rx_ring(adapter);
  316. if (adapter->ring_vir_addr) {
  317. free_dma(adapter->ring_vir_addr, adapter->ring_size);
  318. adapter->ring_vir_addr = NULL;
  319. adapter->ring_dma = 0;
  320. }
  321. if (adapter->tx_ring.tx_buffer) {
  322. free(adapter->tx_ring.tx_buffer);
  323. adapter->tx_ring.tx_buffer = NULL;
  324. }
  325. }
  326. /*
  327. * atl1e_setup_mem_resources - allocate Tx / RX descriptor resources
  328. * @adapter: board private structure
  329. *
  330. * Return 0 on success, negative on failure
  331. */
  332. static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
  333. {
  334. struct atl1e_tx_ring *tx_ring;
  335. struct atl1e_rx_ring *rx_ring;
  336. struct atl1e_rx_page_desc *rx_page_desc;
  337. int size, j;
  338. u32 offset = 0;
  339. int err = 0;
  340. if (adapter->ring_vir_addr != NULL)
  341. return 0; /* alloced already */
  342. tx_ring = &adapter->tx_ring;
  343. rx_ring = &adapter->rx_ring;
  344. /* real ring DMA buffer */
  345. size = adapter->ring_size;
  346. adapter->ring_vir_addr = malloc_dma(adapter->ring_size, 32);
  347. if (adapter->ring_vir_addr == NULL) {
  348. DBG("atl1e: out of memory allocating %d bytes for %s ring\n",
  349. adapter->ring_size, adapter->netdev->name);
  350. return -ENOMEM;
  351. }
  352. adapter->ring_dma = virt_to_bus(adapter->ring_vir_addr);
  353. memset(adapter->ring_vir_addr, 0, adapter->ring_size);
  354. rx_page_desc = &rx_ring->rx_page_desc;
  355. /* Init TPD Ring */
  356. tx_ring->dma = (adapter->ring_dma + 7) & ~7;
  357. offset = tx_ring->dma - adapter->ring_dma;
  358. tx_ring->desc = (struct atl1e_tpd_desc *)
  359. (adapter->ring_vir_addr + offset);
  360. size = sizeof(struct atl1e_tx_buffer) * (tx_ring->count);
  361. tx_ring->tx_buffer = zalloc(size);
  362. if (tx_ring->tx_buffer == NULL) {
  363. DBG("atl1e: out of memory allocating %d bytes for %s txbuf\n",
  364. size, adapter->netdev->name);
  365. err = -ENOMEM;
  366. goto failed;
  367. }
  368. /* Init RXF-Pages */
  369. offset += (sizeof(struct atl1e_tpd_desc) * tx_ring->count);
  370. offset = (offset + 31) & ~31;
  371. for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
  372. rx_page_desc->rx_page[j].dma =
  373. adapter->ring_dma + offset;
  374. rx_page_desc->rx_page[j].addr =
  375. adapter->ring_vir_addr + offset;
  376. offset += rx_ring->real_page_size;
  377. }
  378. /* Init CMB dma address */
  379. tx_ring->cmb_dma = adapter->ring_dma + offset;
  380. tx_ring->cmb = (u32 *)(adapter->ring_vir_addr + offset);
  381. offset += sizeof(u32);
  382. for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
  383. rx_page_desc->rx_page[j].write_offset_dma =
  384. adapter->ring_dma + offset;
  385. rx_page_desc->rx_page[j].write_offset_addr =
  386. adapter->ring_vir_addr + offset;
  387. offset += sizeof(u32);
  388. }
  389. if (offset > adapter->ring_size) {
  390. DBG("atl1e: ring miscalculation! need %d > %d bytes\n",
  391. offset, adapter->ring_size);
  392. err = -EINVAL;
  393. goto failed;
  394. }
  395. return 0;
  396. failed:
  397. atl1e_free_ring_resources(adapter);
  398. return err;
  399. }
  400. static inline void atl1e_configure_des_ring(const struct atl1e_adapter *adapter)
  401. {
  402. struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw;
  403. struct atl1e_rx_ring *rx_ring =
  404. (struct atl1e_rx_ring *)&adapter->rx_ring;
  405. struct atl1e_tx_ring *tx_ring =
  406. (struct atl1e_tx_ring *)&adapter->tx_ring;
  407. struct atl1e_rx_page_desc *rx_page_desc = NULL;
  408. int j;
  409. AT_WRITE_REG(hw, REG_DESC_BASE_ADDR_HI, 0);
  410. AT_WRITE_REG(hw, REG_TPD_BASE_ADDR_LO, tx_ring->dma);
  411. AT_WRITE_REG(hw, REG_TPD_RING_SIZE, (u16)(tx_ring->count));
  412. AT_WRITE_REG(hw, REG_HOST_TX_CMB_LO, tx_ring->cmb_dma);
  413. rx_page_desc = &rx_ring->rx_page_desc;
  414. /* RXF Page Physical address / Page Length */
  415. AT_WRITE_REG(hw, REG_RXF0_BASE_ADDR_HI, 0);
  416. for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
  417. u32 page_phy_addr;
  418. u32 offset_phy_addr;
  419. page_phy_addr = rx_page_desc->rx_page[j].dma;
  420. offset_phy_addr = rx_page_desc->rx_page[j].write_offset_dma;
  421. AT_WRITE_REG(hw, atl1e_rx_page_lo_addr_regs[j], page_phy_addr);
  422. AT_WRITE_REG(hw, atl1e_rx_page_write_offset_regs[j],
  423. offset_phy_addr);
  424. AT_WRITE_REGB(hw, atl1e_rx_page_vld_regs[j], 1);
  425. }
  426. /* Page Length */
  427. AT_WRITE_REG(hw, REG_HOST_RXFPAGE_SIZE, rx_ring->page_size);
  428. /* Load all of base address above */
  429. AT_WRITE_REG(hw, REG_LOAD_PTR, 1);
  430. return;
  431. }
  432. static inline void atl1e_configure_tx(struct atl1e_adapter *adapter)
  433. {
  434. struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw;
  435. u32 dev_ctrl_data = 0;
  436. u32 max_pay_load = 0;
  437. u32 jumbo_thresh = 0;
  438. u32 extra_size = 0; /* Jumbo frame threshold in QWORD unit */
  439. /* configure TXQ param */
  440. if (hw->nic_type != athr_l2e_revB) {
  441. extra_size = ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
  442. jumbo_thresh = MAX_FRAME_SIZE + extra_size;
  443. AT_WRITE_REG(hw, REG_TX_EARLY_TH, (jumbo_thresh + 7) >> 3);
  444. }
  445. dev_ctrl_data = AT_READ_REG(hw, REG_DEVICE_CTRL);
  446. max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT)) &
  447. DEVICE_CTRL_MAX_PAYLOAD_MASK;
  448. if (max_pay_load < hw->dmaw_block)
  449. hw->dmaw_block = max_pay_load;
  450. max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT)) &
  451. DEVICE_CTRL_MAX_RREQ_SZ_MASK;
  452. if (max_pay_load < hw->dmar_block)
  453. hw->dmar_block = max_pay_load;
  454. if (hw->nic_type != athr_l2e_revB)
  455. AT_WRITE_REGW(hw, REG_TXQ_CTRL + 2,
  456. atl1e_pay_load_size[hw->dmar_block]);
  457. /* enable TXQ */
  458. AT_WRITE_REGW(hw, REG_TXQ_CTRL,
  459. ((TPD_BURST & TXQ_CTRL_NUM_TPD_BURST_MASK)
  460. << TXQ_CTRL_NUM_TPD_BURST_SHIFT)
  461. | TXQ_CTRL_ENH_MODE | TXQ_CTRL_EN);
  462. return;
  463. }
  464. static inline void atl1e_configure_rx(struct atl1e_adapter *adapter)
  465. {
  466. struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw;
  467. u32 rxf_len = 0;
  468. u32 rxf_low = 0;
  469. u32 rxf_high = 0;
  470. u32 rxf_thresh_data = 0;
  471. u32 rxq_ctrl_data = 0;
  472. if (hw->nic_type != athr_l2e_revB) {
  473. AT_WRITE_REGW(hw, REG_RXQ_JMBOSZ_RRDTIM,
  474. (u16)((RX_JUMBO_THRESH & RXQ_JMBOSZ_TH_MASK) <<
  475. RXQ_JMBOSZ_TH_SHIFT |
  476. (1 & RXQ_JMBO_LKAH_MASK) <<
  477. RXQ_JMBO_LKAH_SHIFT));
  478. rxf_len = AT_READ_REG(hw, REG_SRAM_RXF_LEN);
  479. rxf_high = rxf_len * 4 / 5;
  480. rxf_low = rxf_len / 5;
  481. rxf_thresh_data = ((rxf_high & RXQ_RXF_PAUSE_TH_HI_MASK)
  482. << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
  483. ((rxf_low & RXQ_RXF_PAUSE_TH_LO_MASK)
  484. << RXQ_RXF_PAUSE_TH_LO_SHIFT);
  485. AT_WRITE_REG(hw, REG_RXQ_RXF_PAUSE_THRESH, rxf_thresh_data);
  486. }
  487. /* RRS */
  488. AT_WRITE_REG(hw, REG_IDT_TABLE, 0);
  489. AT_WRITE_REG(hw, REG_BASE_CPU_NUMBER, 0);
  490. rxq_ctrl_data |= RXQ_CTRL_PBA_ALIGN_32 |
  491. RXQ_CTRL_CUT_THRU_EN | RXQ_CTRL_EN;
  492. AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data);
  493. return;
  494. }
  495. static inline void atl1e_configure_dma(struct atl1e_adapter *adapter)
  496. {
  497. struct atl1e_hw *hw = &adapter->hw;
  498. u32 dma_ctrl_data = 0;
  499. dma_ctrl_data = DMA_CTRL_RXCMB_EN;
  500. dma_ctrl_data |= (((u32)hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
  501. << DMA_CTRL_DMAR_BURST_LEN_SHIFT;
  502. dma_ctrl_data |= (((u32)hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK)
  503. << DMA_CTRL_DMAW_BURST_LEN_SHIFT;
  504. dma_ctrl_data |= DMA_CTRL_DMAR_REQ_PRI | DMA_CTRL_DMAR_OUT_ORDER;
  505. dma_ctrl_data |= (DMAR_DLY_CNT & DMA_CTRL_DMAR_DLY_CNT_MASK)
  506. << DMA_CTRL_DMAR_DLY_CNT_SHIFT;
  507. dma_ctrl_data |= (DMAW_DLY_CNT & DMA_CTRL_DMAW_DLY_CNT_MASK)
  508. << DMA_CTRL_DMAW_DLY_CNT_SHIFT;
  509. AT_WRITE_REG(hw, REG_DMA_CTRL, dma_ctrl_data);
  510. return;
  511. }
  512. static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter)
  513. {
  514. u32 value;
  515. struct atl1e_hw *hw = &adapter->hw;
  516. /* Config MAC CTRL Register */
  517. value = MAC_CTRL_TX_EN |
  518. MAC_CTRL_RX_EN ;
  519. if (FULL_DUPLEX == adapter->link_duplex)
  520. value |= MAC_CTRL_DUPLX;
  521. value |= ((u32)((SPEED_1000 == adapter->link_speed) ?
  522. MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) <<
  523. MAC_CTRL_SPEED_SHIFT);
  524. value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW);
  525. value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
  526. value |= ((PREAMBLE_LEN & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
  527. value |= MAC_CTRL_BC_EN;
  528. value |= MAC_CTRL_MC_ALL_EN;
  529. AT_WRITE_REG(hw, REG_MAC_CTRL, value);
  530. }
  531. /*
  532. * atl1e_configure - Configure Transmit&Receive Unit after Reset
  533. * @adapter: board private structure
  534. *
  535. * Configure the Tx /Rx unit of the MAC after a reset.
  536. */
  537. static int atl1e_configure(struct atl1e_adapter *adapter)
  538. {
  539. struct atl1e_hw *hw = &adapter->hw;
  540. u32 intr_status_data = 0;
  541. /* clear interrupt status */
  542. AT_WRITE_REG(hw, REG_ISR, ~0);
  543. /* 1. set MAC Address */
  544. atl1e_hw_set_mac_addr(hw);
  545. /* 2. Init the Multicast HASH table (clear) */
  546. AT_WRITE_REG(hw, REG_RX_HASH_TABLE, 0);
  547. AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
  548. /* 3. Clear any WOL status */
  549. AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
  550. /* 4. Descripter Ring BaseMem/Length/Read ptr/Write ptr
  551. * TPD Ring/SMB/RXF0 Page CMBs, they use the same
  552. * High 32bits memory */
  553. atl1e_configure_des_ring(adapter);
  554. /* 5. set Interrupt Moderator Timer */
  555. AT_WRITE_REGW(hw, REG_IRQ_MODU_TIMER_INIT, IMT_VAL);
  556. AT_WRITE_REGW(hw, REG_IRQ_MODU_TIMER2_INIT, IMT_VAL);
  557. AT_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_LED_MODE |
  558. MASTER_CTRL_ITIMER_EN | MASTER_CTRL_ITIMER2_EN);
  559. /* 6. rx/tx threshold to trig interrupt */
  560. AT_WRITE_REGW(hw, REG_TRIG_RRD_THRESH, RRD_THRESH);
  561. AT_WRITE_REGW(hw, REG_TRIG_TPD_THRESH, TPD_THRESH);
  562. AT_WRITE_REGW(hw, REG_TRIG_RXTIMER, RX_COUNT_DOWN);
  563. AT_WRITE_REGW(hw, REG_TRIG_TXTIMER, TX_COUNT_DOWN);
  564. /* 7. set Interrupt Clear Timer */
  565. AT_WRITE_REGW(hw, REG_CMBDISDMA_TIMER, ICT_VAL);
  566. /* 8. set MTU */
  567. AT_WRITE_REG(hw, REG_MTU, MAX_FRAME_SIZE + ETH_HLEN +
  568. VLAN_HLEN + ETH_FCS_LEN);
  569. /* 9. config TXQ early tx threshold */
  570. atl1e_configure_tx(adapter);
  571. /* 10. config RXQ */
  572. atl1e_configure_rx(adapter);
  573. /* 11. config DMA Engine */
  574. atl1e_configure_dma(adapter);
  575. /* 12. smb timer to trig interrupt */
  576. AT_WRITE_REG(hw, REG_SMB_STAT_TIMER, SMB_TIMER);
  577. intr_status_data = AT_READ_REG(hw, REG_ISR);
  578. if ((intr_status_data & ISR_PHY_LINKDOWN) != 0) {
  579. DBG("atl1e: configure failed, PCIE phy link down\n");
  580. return -1;
  581. }
  582. AT_WRITE_REG(hw, REG_ISR, 0x7fffffff);
  583. return 0;
  584. }
  585. static inline void atl1e_clear_phy_int(struct atl1e_adapter *adapter)
  586. {
  587. u16 phy_data;
  588. atl1e_read_phy_reg(&adapter->hw, MII_INT_STATUS, &phy_data);
  589. }
  590. static int atl1e_clean_tx_irq(struct atl1e_adapter *adapter)
  591. {
  592. struct atl1e_tx_ring *tx_ring = (struct atl1e_tx_ring *)
  593. &adapter->tx_ring;
  594. struct atl1e_tx_buffer *tx_buffer = NULL;
  595. u16 hw_next_to_clean = AT_READ_REGW(&adapter->hw, REG_TPD_CONS_IDX);
  596. u16 next_to_clean = tx_ring->next_to_clean;
  597. while (next_to_clean != hw_next_to_clean) {
  598. tx_buffer = &tx_ring->tx_buffer[next_to_clean];
  599. tx_buffer->dma = 0;
  600. if (tx_buffer->iob) {
  601. netdev_tx_complete(adapter->netdev, tx_buffer->iob);
  602. tx_buffer->iob = NULL;
  603. }
  604. if (++next_to_clean == tx_ring->count)
  605. next_to_clean = 0;
  606. }
  607. tx_ring->next_to_clean = next_to_clean;
  608. return 1;
  609. }
  610. static struct atl1e_rx_page *atl1e_get_rx_page(struct atl1e_adapter *adapter)
  611. {
  612. struct atl1e_rx_page_desc *rx_page_desc =
  613. (struct atl1e_rx_page_desc *) &adapter->rx_ring.rx_page_desc;
  614. u8 rx_using = rx_page_desc->rx_using;
  615. return (struct atl1e_rx_page *)&(rx_page_desc->rx_page[rx_using]);
  616. }
  617. static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter)
  618. {
  619. struct net_device *netdev = adapter->netdev;
  620. struct atl1e_rx_ring *rx_ring = (struct atl1e_rx_ring *)
  621. &adapter->rx_ring;
  622. struct atl1e_rx_page_desc *rx_page_desc =
  623. (struct atl1e_rx_page_desc *) &rx_ring->rx_page_desc;
  624. struct io_buffer *iob = NULL;
  625. struct atl1e_rx_page *rx_page = atl1e_get_rx_page(adapter);
  626. u32 packet_size, write_offset;
  627. struct atl1e_recv_ret_status *prrs;
  628. write_offset = *(rx_page->write_offset_addr);
  629. if (rx_page->read_offset >= write_offset)
  630. return;
  631. do {
  632. /* get new packet's rrs */
  633. prrs = (struct atl1e_recv_ret_status *) (rx_page->addr +
  634. rx_page->read_offset);
  635. /* check sequence number */
  636. if (prrs->seq_num != rx_page_desc->rx_nxseq) {
  637. DBG("atl1e %s: RX sequence number error (%d != %d)\n",
  638. netdev->name, prrs->seq_num,
  639. rx_page_desc->rx_nxseq);
  640. rx_page_desc->rx_nxseq++;
  641. goto fatal_err;
  642. }
  643. rx_page_desc->rx_nxseq++;
  644. /* error packet */
  645. if (prrs->pkt_flag & RRS_IS_ERR_FRAME) {
  646. if (prrs->err_flag & (RRS_ERR_BAD_CRC |
  647. RRS_ERR_DRIBBLE | RRS_ERR_CODE |
  648. RRS_ERR_TRUNC)) {
  649. /* hardware error, discard this
  650. packet */
  651. netdev_rx_err(netdev, NULL, EIO);
  652. goto skip_pkt;
  653. }
  654. }
  655. packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) &
  656. RRS_PKT_SIZE_MASK) - ETH_FCS_LEN;
  657. iob = alloc_iob(packet_size + NET_IP_ALIGN);
  658. if (iob == NULL) {
  659. DBG("atl1e %s: dropping packet under memory pressure\n",
  660. netdev->name);
  661. goto skip_pkt;
  662. }
  663. iob_reserve(iob, NET_IP_ALIGN);
  664. memcpy(iob->data, (u8 *)(prrs + 1), packet_size);
  665. iob_put(iob, packet_size);
  666. netdev_rx(netdev, iob);
  667. skip_pkt:
  668. /* skip current packet whether it's ok or not. */
  669. rx_page->read_offset +=
  670. (((u32)((prrs->word1 >> RRS_PKT_SIZE_SHIFT) &
  671. RRS_PKT_SIZE_MASK) +
  672. sizeof(struct atl1e_recv_ret_status) + 31) &
  673. 0xFFFFFFE0);
  674. if (rx_page->read_offset >= rx_ring->page_size) {
  675. /* mark this page clean */
  676. u16 reg_addr;
  677. u8 rx_using;
  678. rx_page->read_offset =
  679. *(rx_page->write_offset_addr) = 0;
  680. rx_using = rx_page_desc->rx_using;
  681. reg_addr =
  682. atl1e_rx_page_vld_regs[rx_using];
  683. AT_WRITE_REGB(&adapter->hw, reg_addr, 1);
  684. rx_page_desc->rx_using ^= 1;
  685. rx_page = atl1e_get_rx_page(adapter);
  686. }
  687. write_offset = *(rx_page->write_offset_addr);
  688. } while (rx_page->read_offset < write_offset);
  689. return;
  690. fatal_err:
  691. if (!netdev_link_ok(adapter->netdev))
  692. atl1e_reset(adapter);
  693. }
  694. /*
  695. * atl1e_poll - poll for completed transmissions and received packets
  696. * @netdev: network device
  697. */
  698. static void atl1e_poll(struct net_device *netdev)
  699. {
  700. struct atl1e_adapter *adapter = netdev_priv(netdev);
  701. struct atl1e_hw *hw = &adapter->hw;
  702. int max_ints = 64;
  703. u32 status;
  704. do {
  705. status = AT_READ_REG(hw, REG_ISR);
  706. if ((status & IMR_NORMAL_MASK) == 0)
  707. break;
  708. /* link event */
  709. if (status & ISR_GPHY)
  710. atl1e_clear_phy_int(adapter);
  711. /* Ack ISR */
  712. AT_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT);
  713. /* check if PCIE PHY Link down */
  714. if (status & ISR_PHY_LINKDOWN) {
  715. DBG("atl1e: PCI-E PHY link down: %x\n", status);
  716. if (netdev_link_ok(adapter->netdev)) {
  717. /* reset MAC */
  718. atl1e_irq_reset(adapter);
  719. atl1e_reset(adapter);
  720. break;
  721. }
  722. }
  723. /* check if DMA read/write error */
  724. if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
  725. DBG("atl1e: PCI-E DMA RW error: %x\n", status);
  726. atl1e_irq_reset(adapter);
  727. atl1e_reset(adapter);
  728. break;
  729. }
  730. /* link event */
  731. if (status & (ISR_GPHY | ISR_MANUAL)) {
  732. atl1e_check_link(adapter);
  733. break;
  734. }
  735. /* transmit event */
  736. if (status & ISR_TX_EVENT)
  737. atl1e_clean_tx_irq(adapter);
  738. if (status & ISR_RX_EVENT)
  739. atl1e_clean_rx_irq(adapter);
  740. } while (--max_ints > 0);
  741. /* re-enable Interrupt*/
  742. AT_WRITE_REG(&adapter->hw, REG_ISR, 0);
  743. return;
  744. }
  745. static inline u16 atl1e_tpd_avail(struct atl1e_adapter *adapter)
  746. {
  747. struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
  748. u16 next_to_use = 0;
  749. u16 next_to_clean = 0;
  750. next_to_clean = tx_ring->next_to_clean;
  751. next_to_use = tx_ring->next_to_use;
  752. return (u16)(next_to_clean > next_to_use) ?
  753. (next_to_clean - next_to_use - 1) :
  754. (tx_ring->count + next_to_clean - next_to_use - 1);
  755. }
  756. /*
  757. * get next usable tpd
  758. * Note: should call atl1e_tdp_avail to make sure
  759. * there is enough tpd to use
  760. */
  761. static struct atl1e_tpd_desc *atl1e_get_tpd(struct atl1e_adapter *adapter)
  762. {
  763. struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
  764. u16 next_to_use = 0;
  765. next_to_use = tx_ring->next_to_use;
  766. if (++tx_ring->next_to_use == tx_ring->count)
  767. tx_ring->next_to_use = 0;
  768. memset(&tx_ring->desc[next_to_use], 0, sizeof(struct atl1e_tpd_desc));
  769. return (struct atl1e_tpd_desc *)&tx_ring->desc[next_to_use];
  770. }
  771. static struct atl1e_tx_buffer *
  772. atl1e_get_tx_buffer(struct atl1e_adapter *adapter, struct atl1e_tpd_desc *tpd)
  773. {
  774. struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
  775. return &tx_ring->tx_buffer[tpd - tx_ring->desc];
  776. }
  777. static void atl1e_tx_map(struct atl1e_adapter *adapter,
  778. struct io_buffer *iob, struct atl1e_tpd_desc *tpd)
  779. {
  780. struct atl1e_tx_buffer *tx_buffer = NULL;
  781. u16 buf_len = iob_len(iob);
  782. tx_buffer = atl1e_get_tx_buffer(adapter, tpd);
  783. tx_buffer->iob = iob;
  784. tx_buffer->length = buf_len;
  785. tx_buffer->dma = virt_to_bus(iob->data);
  786. tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
  787. tpd->word2 = ((tpd->word2 & ~TPD_BUFLEN_MASK) |
  788. ((cpu_to_le32(buf_len) & TPD_BUFLEN_MASK) <<
  789. TPD_BUFLEN_SHIFT));
  790. tpd->word3 |= 1 << TPD_EOP_SHIFT;
  791. }
  792. static void atl1e_tx_queue(struct atl1e_adapter *adapter, u16 count __unused,
  793. struct atl1e_tpd_desc *tpd __unused)
  794. {
  795. struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
  796. wmb();
  797. AT_WRITE_REG(&adapter->hw, REG_MB_TPD_PROD_IDX, tx_ring->next_to_use);
  798. }
  799. static int atl1e_xmit_frame(struct net_device *netdev, struct io_buffer *iob)
  800. {
  801. struct atl1e_adapter *adapter = netdev_priv(netdev);
  802. u16 tpd_req = 1;
  803. struct atl1e_tpd_desc *tpd;
  804. if (!netdev_link_ok(netdev)) {
  805. return -EINVAL;
  806. }
  807. if (atl1e_tpd_avail(adapter) < tpd_req) {
  808. return -EBUSY;
  809. }
  810. tpd = atl1e_get_tpd(adapter);
  811. atl1e_tx_map(adapter, iob, tpd);
  812. atl1e_tx_queue(adapter, tpd_req, tpd);
  813. return 0;
  814. }
  815. int atl1e_up(struct atl1e_adapter *adapter)
  816. {
  817. struct net_device *netdev = adapter->netdev;
  818. int err = 0;
  819. u32 val;
  820. /* hardware has been reset, we need to reload some things */
  821. err = atl1e_init_hw(&adapter->hw);
  822. if (err) {
  823. return -EIO;
  824. }
  825. atl1e_init_ring_ptrs(adapter);
  826. memcpy(adapter->hw.mac_addr, netdev->ll_addr, ETH_ALEN);
  827. if (atl1e_configure(adapter) != 0) {
  828. return -EIO;
  829. }
  830. atl1e_irq_disable(adapter);
  831. val = AT_READ_REG(&adapter->hw, REG_MASTER_CTRL);
  832. AT_WRITE_REG(&adapter->hw, REG_MASTER_CTRL,
  833. val | MASTER_CTRL_MANUAL_INT);
  834. return err;
  835. }
  836. void atl1e_irq(struct net_device *netdev, int enable)
  837. {
  838. struct atl1e_adapter *adapter = netdev_priv(netdev);
  839. if (enable)
  840. atl1e_irq_enable(adapter);
  841. else
  842. atl1e_irq_disable(adapter);
  843. }
  844. void atl1e_down(struct atl1e_adapter *adapter)
  845. {
  846. struct net_device *netdev = adapter->netdev;
  847. /* reset MAC to disable all RX/TX */
  848. atl1e_reset_hw(&adapter->hw);
  849. mdelay(1);
  850. netdev_link_down(netdev);
  851. adapter->link_speed = SPEED_0;
  852. adapter->link_duplex = -1;
  853. atl1e_clean_tx_ring(adapter);
  854. atl1e_clean_rx_ring(adapter);
  855. }
  856. /*
  857. * atl1e_open - Called when a network interface is made active
  858. * @netdev: network interface device structure
  859. *
  860. * Returns 0 on success, negative value on failure
  861. *
  862. * The open entry point is called when a network interface is made
  863. * active by the system (IFF_UP). At this point all resources needed
  864. * for transmit and receive operations are allocated, the interrupt
  865. * handler is registered with the OS, the watchdog timer is started,
  866. * and the stack is notified that the interface is ready.
  867. */
  868. static int atl1e_open(struct net_device *netdev)
  869. {
  870. struct atl1e_adapter *adapter = netdev_priv(netdev);
  871. int err;
  872. /* allocate rx/tx dma buffer & descriptors */
  873. atl1e_init_ring_resources(adapter);
  874. err = atl1e_setup_ring_resources(adapter);
  875. if (err)
  876. return err;
  877. err = atl1e_up(adapter);
  878. if (err)
  879. goto err_up;
  880. return 0;
  881. err_up:
  882. atl1e_free_ring_resources(adapter);
  883. atl1e_reset_hw(&adapter->hw);
  884. return err;
  885. }
  886. /*
  887. * atl1e_close - Disables a network interface
  888. * @netdev: network interface device structure
  889. *
  890. * Returns 0, this is not allowed to fail
  891. *
  892. * The close entry point is called when an interface is de-activated
  893. * by the OS. The hardware is still under the drivers control, but
  894. * needs to be disabled. A global MAC reset is issued to stop the
  895. * hardware, and all transmit and receive resources are freed.
  896. */
  897. static void atl1e_close(struct net_device *netdev)
  898. {
  899. struct atl1e_adapter *adapter = netdev_priv(netdev);
  900. atl1e_down(adapter);
  901. atl1e_free_ring_resources(adapter);
  902. }
  903. static struct net_device_operations atl1e_netdev_ops = {
  904. .open = atl1e_open,
  905. .close = atl1e_close,
  906. .transmit = atl1e_xmit_frame,
  907. .poll = atl1e_poll,
  908. .irq = atl1e_irq,
  909. };
  910. static void atl1e_init_netdev(struct net_device *netdev, struct pci_device *pdev)
  911. {
  912. netdev_init(netdev, &atl1e_netdev_ops);
  913. netdev->dev = &pdev->dev;
  914. pci_set_drvdata(pdev, netdev);
  915. }
  916. /*
  917. * atl1e_probe - Device Initialization Routine
  918. * @pdev: PCI device information struct
  919. * @ent: entry in atl1e_pci_tbl
  920. *
  921. * Returns 0 on success, negative on failure
  922. *
  923. * atl1e_probe initializes an adapter identified by a pci_device structure.
  924. * The OS initialization, configuring of the adapter private structure,
  925. * and a hardware reset occur.
  926. */
  927. static int atl1e_probe(struct pci_device *pdev)
  928. {
  929. struct net_device *netdev;
  930. struct atl1e_adapter *adapter = NULL;
  931. static int cards_found;
  932. int err = 0;
  933. adjust_pci_device(pdev);
  934. netdev = alloc_etherdev(sizeof(struct atl1e_adapter));
  935. if (netdev == NULL) {
  936. err = -ENOMEM;
  937. DBG("atl1e: out of memory allocating net_device\n");
  938. goto err;
  939. }
  940. atl1e_init_netdev(netdev, pdev);
  941. adapter = netdev_priv(netdev);
  942. adapter->bd_number = cards_found;
  943. adapter->netdev = netdev;
  944. adapter->pdev = pdev;
  945. adapter->hw.adapter = adapter;
  946. if (!pdev->membase) {
  947. err = -EIO;
  948. DBG("atl1e: cannot map device registers\n");
  949. goto err_free_netdev;
  950. }
  951. adapter->hw.hw_addr = bus_to_virt(pdev->membase);
  952. /* init mii data */
  953. adapter->mii.dev = netdev;
  954. adapter->mii.mdio_read = atl1e_mdio_read;
  955. adapter->mii.mdio_write = atl1e_mdio_write;
  956. adapter->mii.phy_id_mask = 0x1f;
  957. adapter->mii.reg_num_mask = MDIO_REG_ADDR_MASK;
  958. /* get user settings */
  959. adapter->tx_ring.count = TX_DESC_COUNT;
  960. adapter->rx_ring.page_size = RX_MEM_SIZE;
  961. atl1e_setup_pcicmd(pdev);
  962. /* setup the private structure */
  963. err = atl1e_sw_init(adapter);
  964. if (err) {
  965. DBG("atl1e: private data init failed\n");
  966. goto err_free_netdev;
  967. }
  968. /* Init GPHY as early as possible due to power saving issue */
  969. atl1e_phy_init(&adapter->hw);
  970. /* reset the controller to
  971. * put the device in a known good starting state */
  972. err = atl1e_reset_hw(&adapter->hw);
  973. if (err) {
  974. err = -EIO;
  975. goto err_free_netdev;
  976. }
  977. /* This may have been run by a zero-wait timer around
  978. now... unclear. */
  979. atl1e_restart_autoneg(&adapter->hw);
  980. if (atl1e_read_mac_addr(&adapter->hw) != 0) {
  981. DBG("atl1e: cannot read MAC address from EEPROM\n");
  982. err = -EIO;
  983. goto err_free_netdev;
  984. }
  985. memcpy(netdev->hw_addr, adapter->hw.perm_mac_addr, ETH_ALEN);
  986. memcpy(netdev->ll_addr, adapter->hw.mac_addr, ETH_ALEN);
  987. DBG("atl1e: Attansic L1E Ethernet controller on %s, "
  988. "%02x:%02x:%02x:%02x:%02x:%02x\n", adapter->netdev->name,
  989. adapter->hw.mac_addr[0], adapter->hw.mac_addr[1],
  990. adapter->hw.mac_addr[2], adapter->hw.mac_addr[3],
  991. adapter->hw.mac_addr[4], adapter->hw.mac_addr[5]);
  992. err = register_netdev(netdev);
  993. if (err) {
  994. DBG("atl1e: cannot register network device\n");
  995. goto err_free_netdev;
  996. }
  997. cards_found++;
  998. return 0;
  999. err_free_netdev:
  1000. netdev_nullify(netdev);
  1001. netdev_put(netdev);
  1002. err:
  1003. return err;
  1004. }
  1005. /*
  1006. * atl1e_remove - Device Removal Routine
  1007. * @pdev: PCI device information struct
  1008. *
  1009. * atl1e_remove is called by the PCI subsystem to alert the driver
  1010. * that it should release a PCI device. The could be caused by a
  1011. * Hot-Plug event, or because the driver is going to be removed from
  1012. * memory.
  1013. */
  1014. static void atl1e_remove(struct pci_device *pdev)
  1015. {
  1016. struct net_device *netdev = pci_get_drvdata(pdev);
  1017. struct atl1e_adapter *adapter = netdev_priv(netdev);
  1018. unregister_netdev(netdev);
  1019. atl1e_free_ring_resources(adapter);
  1020. atl1e_force_ps(&adapter->hw);
  1021. netdev_nullify(netdev);
  1022. netdev_put(netdev);
  1023. }
  1024. struct pci_driver atl1e_driver __pci_driver = {
  1025. .ids = atl1e_pci_tbl,
  1026. .id_count = (sizeof(atl1e_pci_tbl) / sizeof(atl1e_pci_tbl[0])),
  1027. .probe = atl1e_probe,
  1028. .remove = atl1e_remove,
  1029. };
  1030. /********** Hardware-level functions: **********/
  1031. /*
  1032. * check_eeprom_exist
  1033. * return 0 if eeprom exist
  1034. */
  1035. int atl1e_check_eeprom_exist(struct atl1e_hw *hw)
  1036. {
  1037. u32 value;
  1038. value = AT_READ_REG(hw, REG_SPI_FLASH_CTRL);
  1039. if (value & SPI_FLASH_CTRL_EN_VPD) {
  1040. value &= ~SPI_FLASH_CTRL_EN_VPD;
  1041. AT_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value);
  1042. }
  1043. value = AT_READ_REGW(hw, REG_PCIE_CAP_LIST);
  1044. return ((value & 0xFF00) == 0x6C00) ? 0 : 1;
  1045. }
  1046. void atl1e_hw_set_mac_addr(struct atl1e_hw *hw)
  1047. {
  1048. u32 value;
  1049. /*
  1050. * 00-0B-6A-F6-00-DC
  1051. * 0: 6AF600DC 1: 000B
  1052. * low dword
  1053. */
  1054. value = (((u32)hw->mac_addr[2]) << 24) |
  1055. (((u32)hw->mac_addr[3]) << 16) |
  1056. (((u32)hw->mac_addr[4]) << 8) |
  1057. (((u32)hw->mac_addr[5])) ;
  1058. AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 0, value);
  1059. /* hight dword */
  1060. value = (((u32)hw->mac_addr[0]) << 8) |
  1061. (((u32)hw->mac_addr[1])) ;
  1062. AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 1, value);
  1063. }
  1064. /*
  1065. * atl1e_get_permanent_address
  1066. * return 0 if get valid mac address,
  1067. */
  1068. static int atl1e_get_permanent_address(struct atl1e_hw *hw)
  1069. {
  1070. union {
  1071. u32 dword[2];
  1072. u8 byte[8];
  1073. } hw_addr;
  1074. u32 i;
  1075. u32 twsi_ctrl_data;
  1076. u8 eth_addr[ETH_ALEN];
  1077. if (!atl1e_check_eeprom_exist(hw)) {
  1078. /* eeprom exist */
  1079. twsi_ctrl_data = AT_READ_REG(hw, REG_TWSI_CTRL);
  1080. twsi_ctrl_data |= TWSI_CTRL_SW_LDSTART;
  1081. AT_WRITE_REG(hw, REG_TWSI_CTRL, twsi_ctrl_data);
  1082. for (i = 0; i < AT_TWSI_EEPROM_TIMEOUT; i++) {
  1083. mdelay(10);
  1084. twsi_ctrl_data = AT_READ_REG(hw, REG_TWSI_CTRL);
  1085. if ((twsi_ctrl_data & TWSI_CTRL_SW_LDSTART) == 0)
  1086. break;
  1087. }
  1088. if (i >= AT_TWSI_EEPROM_TIMEOUT)
  1089. return AT_ERR_TIMEOUT;
  1090. }
  1091. /* maybe MAC-address is from BIOS */
  1092. hw_addr.dword[0] = AT_READ_REG(hw, REG_MAC_STA_ADDR);
  1093. hw_addr.dword[1] = AT_READ_REG(hw, REG_MAC_STA_ADDR + 4);
  1094. for (i = 0; i < ETH_ALEN; i++) {
  1095. eth_addr[ETH_ALEN - i - 1] = hw_addr.byte[i];
  1096. }
  1097. memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
  1098. return 0;
  1099. }
  1100. void atl1e_force_ps(struct atl1e_hw *hw)
  1101. {
  1102. AT_WRITE_REGW(hw, REG_GPHY_CTRL,
  1103. GPHY_CTRL_PW_WOL_DIS | GPHY_CTRL_EXT_RESET);
  1104. }
  1105. /*
  1106. * Reads the adapter's MAC address from the EEPROM
  1107. *
  1108. * hw - Struct containing variables accessed by shared code
  1109. */
  1110. int atl1e_read_mac_addr(struct atl1e_hw *hw)
  1111. {
  1112. int err = 0;
  1113. err = atl1e_get_permanent_address(hw);
  1114. if (err)
  1115. return AT_ERR_EEPROM;
  1116. memcpy(hw->mac_addr, hw->perm_mac_addr, sizeof(hw->perm_mac_addr));
  1117. return 0;
  1118. }
  1119. /*
  1120. * Reads the value from a PHY register
  1121. * hw - Struct containing variables accessed by shared code
  1122. * reg_addr - address of the PHY register to read
  1123. */
  1124. int atl1e_read_phy_reg(struct atl1e_hw *hw, u16 reg_addr, u16 *phy_data)
  1125. {
  1126. u32 val;
  1127. int i;
  1128. val = ((u32)(reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT |
  1129. MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW |
  1130. MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
  1131. AT_WRITE_REG(hw, REG_MDIO_CTRL, val);
  1132. wmb();
  1133. for (i = 0; i < MDIO_WAIT_TIMES; i++) {
  1134. udelay(2);
  1135. val = AT_READ_REG(hw, REG_MDIO_CTRL);
  1136. if (!(val & (MDIO_START | MDIO_BUSY)))
  1137. break;
  1138. wmb();
  1139. }
  1140. if (!(val & (MDIO_START | MDIO_BUSY))) {
  1141. *phy_data = (u16)val;
  1142. return 0;
  1143. }
  1144. return AT_ERR_PHY;
  1145. }
  1146. /*
  1147. * Writes a value to a PHY register
  1148. * hw - Struct containing variables accessed by shared code
  1149. * reg_addr - address of the PHY register to write
  1150. * data - data to write to the PHY
  1151. */
  1152. int atl1e_write_phy_reg(struct atl1e_hw *hw, u32 reg_addr, u16 phy_data)
  1153. {
  1154. int i;
  1155. u32 val;
  1156. val = ((u32)(phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT |
  1157. (reg_addr&MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT |
  1158. MDIO_SUP_PREAMBLE |
  1159. MDIO_START |
  1160. MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
  1161. AT_WRITE_REG(hw, REG_MDIO_CTRL, val);
  1162. wmb();
  1163. for (i = 0; i < MDIO_WAIT_TIMES; i++) {
  1164. udelay(2);
  1165. val = AT_READ_REG(hw, REG_MDIO_CTRL);
  1166. if (!(val & (MDIO_START | MDIO_BUSY)))
  1167. break;
  1168. wmb();
  1169. }
  1170. if (!(val & (MDIO_START | MDIO_BUSY)))
  1171. return 0;
  1172. return AT_ERR_PHY;
  1173. }
  1174. /*
  1175. * atl1e_init_pcie - init PCIE module
  1176. */
  1177. static void atl1e_init_pcie(struct atl1e_hw *hw)
  1178. {
  1179. u32 value;
  1180. /* comment 2lines below to save more power when sususpend
  1181. value = LTSSM_TEST_MODE_DEF;
  1182. AT_WRITE_REG(hw, REG_LTSSM_TEST_MODE, value);
  1183. */
  1184. /* pcie flow control mode change */
  1185. value = AT_READ_REG(hw, 0x1008);
  1186. value |= 0x8000;
  1187. AT_WRITE_REG(hw, 0x1008, value);
  1188. }
  1189. /*
  1190. * Configures PHY autoneg and flow control advertisement settings
  1191. *
  1192. * hw - Struct containing variables accessed by shared code
  1193. */
  1194. static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
  1195. {
  1196. s32 ret_val;
  1197. u16 mii_autoneg_adv_reg;
  1198. u16 mii_1000t_ctrl_reg;
  1199. if (0 != hw->mii_autoneg_adv_reg)
  1200. return 0;
  1201. /* Read the MII Auto-Neg Advertisement Register (Address 4/9). */
  1202. mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK;
  1203. mii_1000t_ctrl_reg = MII_AT001_CR_1000T_DEFAULT_CAP_MASK;
  1204. /*
  1205. * First we clear all the 10/100 mb speed bits in the Auto-Neg
  1206. * Advertisement Register (Address 4) and the 1000 mb speed bits in
  1207. * the 1000Base-T control Register (Address 9).
  1208. */
  1209. mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
  1210. mii_1000t_ctrl_reg &= ~MII_AT001_CR_1000T_SPEED_MASK;
  1211. /* Assume auto-detect media type */
  1212. mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS |
  1213. MII_AR_10T_FD_CAPS |
  1214. MII_AR_100TX_HD_CAPS |
  1215. MII_AR_100TX_FD_CAPS);
  1216. if (hw->nic_type == athr_l1e) {
  1217. mii_1000t_ctrl_reg |= MII_AT001_CR_1000T_FD_CAPS;
  1218. }
  1219. /* flow control fixed to enable all */
  1220. mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
  1221. hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
  1222. hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg;
  1223. ret_val = atl1e_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg);
  1224. if (ret_val)
  1225. return ret_val;
  1226. if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) {
  1227. ret_val = atl1e_write_phy_reg(hw, MII_AT001_CR,
  1228. mii_1000t_ctrl_reg);
  1229. if (ret_val)
  1230. return ret_val;
  1231. }
  1232. return 0;
  1233. }
  1234. /*
  1235. * Resets the PHY and make all config validate
  1236. *
  1237. * hw - Struct containing variables accessed by shared code
  1238. *
  1239. * Sets bit 15 and 12 of the MII control regiser (for F001 bug)
  1240. */
  1241. int atl1e_phy_commit(struct atl1e_hw *hw)
  1242. {
  1243. int ret_val;
  1244. u16 phy_data;
  1245. phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG;
  1246. ret_val = atl1e_write_phy_reg(hw, MII_BMCR, phy_data);
  1247. if (ret_val) {
  1248. u32 val;
  1249. int i;
  1250. /**************************************
  1251. * pcie serdes link may be down !
  1252. **************************************/
  1253. for (i = 0; i < 25; i++) {
  1254. mdelay(1);
  1255. val = AT_READ_REG(hw, REG_MDIO_CTRL);
  1256. if (!(val & (MDIO_START | MDIO_BUSY)))
  1257. break;
  1258. }
  1259. if (0 != (val & (MDIO_START | MDIO_BUSY))) {
  1260. DBG("atl1e: PCI-E link down for at least 25ms\n");
  1261. return ret_val;
  1262. }
  1263. DBG("atl1e: PCI-E link up after %d ms\n", i);
  1264. }
  1265. return 0;
  1266. }
  1267. int atl1e_phy_init(struct atl1e_hw *hw)
  1268. {
  1269. s32 ret_val;
  1270. u16 phy_val;
  1271. if (hw->phy_configured) {
  1272. if (hw->re_autoneg) {
  1273. hw->re_autoneg = 0;
  1274. return atl1e_restart_autoneg(hw);
  1275. }
  1276. return 0;
  1277. }
  1278. /* RESET GPHY Core */
  1279. AT_WRITE_REGW(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT);
  1280. mdelay(2);
  1281. AT_WRITE_REGW(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT |
  1282. GPHY_CTRL_EXT_RESET);
  1283. mdelay(2);
  1284. /* patches */
  1285. /* p1. eable hibernation mode */
  1286. ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0xB);
  1287. if (ret_val)
  1288. return ret_val;
  1289. ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0xBC00);
  1290. if (ret_val)
  1291. return ret_val;
  1292. /* p2. set Class A/B for all modes */
  1293. ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0);
  1294. if (ret_val)
  1295. return ret_val;
  1296. phy_val = 0x02ef;
  1297. /* remove Class AB */
  1298. /* phy_val = hw->emi_ca ? 0x02ef : 0x02df; */
  1299. ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, phy_val);
  1300. if (ret_val)
  1301. return ret_val;
  1302. /* p3. 10B ??? */
  1303. ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0x12);
  1304. if (ret_val)
  1305. return ret_val;
  1306. ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0x4C04);
  1307. if (ret_val)
  1308. return ret_val;
  1309. /* p4. 1000T power */
  1310. ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0x4);
  1311. if (ret_val)
  1312. return ret_val;
  1313. ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0x8BBB);
  1314. if (ret_val)
  1315. return ret_val;
  1316. ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0x5);
  1317. if (ret_val)
  1318. return ret_val;
  1319. ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0x2C46);
  1320. if (ret_val)
  1321. return ret_val;
  1322. mdelay(1);
  1323. /*Enable PHY LinkChange Interrupt */
  1324. ret_val = atl1e_write_phy_reg(hw, MII_INT_CTRL, 0xC00);
  1325. if (ret_val) {
  1326. DBG("atl1e: Error enable PHY linkChange Interrupt\n");
  1327. return ret_val;
  1328. }
  1329. /* setup AutoNeg parameters */
  1330. ret_val = atl1e_phy_setup_autoneg_adv(hw);
  1331. if (ret_val) {
  1332. DBG("atl1e: Error Setting up Auto-Negotiation\n");
  1333. return ret_val;
  1334. }
  1335. /* SW.Reset & En-Auto-Neg to restart Auto-Neg*/
  1336. DBG("atl1e: Restarting Auto-Neg");
  1337. ret_val = atl1e_phy_commit(hw);
  1338. if (ret_val) {
  1339. DBG("atl1e: Error Resetting the phy");
  1340. return ret_val;
  1341. }
  1342. hw->phy_configured = 1;
  1343. return 0;
  1344. }
  1345. /*
  1346. * Reset the transmit and receive units; mask and clear all interrupts.
  1347. * hw - Struct containing variables accessed by shared code
  1348. * return : 0 or idle status (if error)
  1349. */
  1350. int atl1e_reset_hw(struct atl1e_hw *hw)
  1351. {
  1352. struct atl1e_adapter *adapter = hw->adapter;
  1353. struct pci_device *pdev = adapter->pdev;
  1354. int timeout = 0;
  1355. u32 idle_status_data = 0;
  1356. u16 pci_cfg_cmd_word = 0;
  1357. /* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */
  1358. pci_read_config_word(pdev, PCI_COMMAND, &pci_cfg_cmd_word);
  1359. if ((pci_cfg_cmd_word & (PCI_COMMAND_IO | PCI_COMMAND_MEM |
  1360. PCI_COMMAND_MASTER))
  1361. != (PCI_COMMAND_IO | PCI_COMMAND_MEM |
  1362. PCI_COMMAND_MASTER)) {
  1363. pci_cfg_cmd_word |= (PCI_COMMAND_IO | PCI_COMMAND_MEM |
  1364. PCI_COMMAND_MASTER);
  1365. pci_write_config_word(pdev, PCI_COMMAND, pci_cfg_cmd_word);
  1366. }
  1367. /*
  1368. * Issue Soft Reset to the MAC. This will reset the chip's
  1369. * transmit, receive, DMA. It will not effect
  1370. * the current PCI configuration. The global reset bit is self-
  1371. * clearing, and should clear within a microsecond.
  1372. */
  1373. AT_WRITE_REG(hw, REG_MASTER_CTRL,
  1374. MASTER_CTRL_LED_MODE | MASTER_CTRL_SOFT_RST);
  1375. wmb();
  1376. mdelay(1);
  1377. /* Wait at least 10ms for All module to be Idle */
  1378. for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) {
  1379. idle_status_data = AT_READ_REG(hw, REG_IDLE_STATUS);
  1380. if (idle_status_data == 0)
  1381. break;
  1382. mdelay(1);
  1383. }
  1384. if (timeout >= AT_HW_MAX_IDLE_DELAY) {
  1385. DBG("atl1e: MAC reset timeout\n");
  1386. return AT_ERR_TIMEOUT;
  1387. }
  1388. return 0;
  1389. }
  1390. /*
  1391. * Performs basic configuration of the adapter.
  1392. *
  1393. * hw - Struct containing variables accessed by shared code
  1394. * Assumes that the controller has previously been reset and is in a
  1395. * post-reset uninitialized state. Initializes multicast table,
  1396. * and Calls routines to setup link
  1397. * Leaves the transmit and receive units disabled and uninitialized.
  1398. */
  1399. int atl1e_init_hw(struct atl1e_hw *hw)
  1400. {
  1401. s32 ret_val = 0;
  1402. atl1e_init_pcie(hw);
  1403. /* Zero out the Multicast HASH table */
  1404. /* clear the old settings from the multicast hash table */
  1405. AT_WRITE_REG(hw, REG_RX_HASH_TABLE, 0);
  1406. AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
  1407. ret_val = atl1e_phy_init(hw);
  1408. return ret_val;
  1409. }
  1410. /*
  1411. * Detects the current speed and duplex settings of the hardware.
  1412. *
  1413. * hw - Struct containing variables accessed by shared code
  1414. * speed - Speed of the connection
  1415. * duplex - Duplex setting of the connection
  1416. */
  1417. int atl1e_get_speed_and_duplex(struct atl1e_hw *hw, u16 *speed, u16 *duplex)
  1418. {
  1419. int err;
  1420. u16 phy_data;
  1421. /* Read PHY Specific Status Register (17) */
  1422. err = atl1e_read_phy_reg(hw, MII_AT001_PSSR, &phy_data);
  1423. if (err)
  1424. return err;
  1425. if (!(phy_data & MII_AT001_PSSR_SPD_DPLX_RESOLVED))
  1426. return AT_ERR_PHY_RES;
  1427. switch (phy_data & MII_AT001_PSSR_SPEED) {
  1428. case MII_AT001_PSSR_1000MBS:
  1429. *speed = SPEED_1000;
  1430. break;
  1431. case MII_AT001_PSSR_100MBS:
  1432. *speed = SPEED_100;
  1433. break;
  1434. case MII_AT001_PSSR_10MBS:
  1435. *speed = SPEED_10;
  1436. break;
  1437. default:
  1438. return AT_ERR_PHY_SPEED;
  1439. break;
  1440. }
  1441. if (phy_data & MII_AT001_PSSR_DPLX)
  1442. *duplex = FULL_DUPLEX;
  1443. else
  1444. *duplex = HALF_DUPLEX;
  1445. return 0;
  1446. }
  1447. int atl1e_restart_autoneg(struct atl1e_hw *hw)
  1448. {
  1449. int err = 0;
  1450. err = atl1e_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg);
  1451. if (err)
  1452. return err;
  1453. if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) {
  1454. err = atl1e_write_phy_reg(hw, MII_AT001_CR,
  1455. hw->mii_1000t_ctrl_reg);
  1456. if (err)
  1457. return err;
  1458. }
  1459. err = atl1e_write_phy_reg(hw, MII_BMCR,
  1460. MII_CR_RESET | MII_CR_AUTO_NEG_EN |
  1461. MII_CR_RESTART_AUTO_NEG);
  1462. return err;
  1463. }