You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

atl1e.c 45KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757
  1. /*
  2. * Copyright(c) 2007 Atheros Corporation. All rights reserved.
  3. *
  4. * Derived from Intel e1000 driver
  5. * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
  6. *
  7. * Modified for gPXE, October 2009 by Joshua Oreman <oremanj@rwcr.net>.
  8. *
  9. * This program is free software; you can redistribute it and/or modify it
  10. * under the terms of the GNU General Public License as published by the Free
  11. * Software Foundation; either version 2 of the License, or (at your option)
  12. * any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful, but WITHOUT
  15. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  16. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  17. * more details.
  18. *
  19. * You should have received a copy of the GNU General Public License along with
  20. * this program; if not, write to the Free Software Foundation, Inc., 59
  21. * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  22. */
  23. FILE_LICENCE ( GPL2_OR_LATER );
  24. #include "atl1e.h"
  25. /* User-tweakable parameters: */
  26. #define TX_DESC_COUNT 32 /* TX descriptors, minimum 32 */
  27. #define RX_MEM_SIZE 8192 /* RX area size, minimum 8kb */
  28. #define MAX_FRAME_SIZE 1500 /* Maximum MTU supported, minimum 1500 */
  29. /* Arcane parameters: */
  30. #define PREAMBLE_LEN 7
  31. #define RX_JUMBO_THRESH ((MAX_FRAME_SIZE + ETH_HLEN + \
  32. VLAN_HLEN + ETH_FCS_LEN + 7) >> 3)
  33. #define IMT_VAL 100 /* interrupt moderator timer, us */
  34. #define ICT_VAL 50000 /* interrupt clear timer, us */
  35. #define SMB_TIMER 200000
  36. #define RRD_THRESH 1 /* packets to queue before interrupt */
  37. #define TPD_BURST 5
  38. #define TPD_THRESH (TX_DESC_COUNT / 2)
  39. #define RX_COUNT_DOWN 4
  40. #define TX_COUNT_DOWN (IMT_VAL * 4 / 3)
  41. #define DMAR_DLY_CNT 15
  42. #define DMAW_DLY_CNT 4
  43. #define PCI_DEVICE_ID_ATTANSIC_L1E 0x1026
  44. /*
  45. * atl1e_pci_tbl - PCI Device ID Table
  46. *
  47. * Wildcard entries (PCI_ANY_ID) should come last
  48. * Last entry must be all 0s
  49. *
  50. * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
  51. * Class, Class Mask, private data (not used) }
  52. */
  53. static struct pci_device_id atl1e_pci_tbl[] = {
  54. PCI_ROM(0x1969, 0x1026, "atl1e_26", "Attansic L1E 0x1026", 0),
  55. PCI_ROM(0x1969, 0x1066, "atl1e_66", "Attansic L1E 0x1066", 0),
  56. };
  57. static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter);
  58. static const u16
  59. atl1e_rx_page_vld_regs[AT_PAGE_NUM_PER_QUEUE] =
  60. {
  61. REG_HOST_RXF0_PAGE0_VLD, REG_HOST_RXF0_PAGE1_VLD
  62. };
  63. static const u16
  64. atl1e_rx_page_lo_addr_regs[AT_PAGE_NUM_PER_QUEUE] =
  65. {
  66. REG_HOST_RXF0_PAGE0_LO, REG_HOST_RXF0_PAGE1_LO
  67. };
  68. static const u16
  69. atl1e_rx_page_write_offset_regs[AT_PAGE_NUM_PER_QUEUE] =
  70. {
  71. REG_HOST_RXF0_MB0_LO, REG_HOST_RXF0_MB1_LO
  72. };
  73. static const u16 atl1e_pay_load_size[] = {
  74. 128, 256, 512, 1024, 2048, 4096,
  75. };
  76. /*
  77. * atl1e_irq_enable - Enable default interrupt generation settings
  78. * @adapter: board private structure
  79. */
  80. static inline void atl1e_irq_enable(struct atl1e_adapter *adapter)
  81. {
  82. AT_WRITE_REG(&adapter->hw, REG_ISR, 0);
  83. AT_WRITE_REG(&adapter->hw, REG_IMR, IMR_NORMAL_MASK);
  84. AT_WRITE_FLUSH(&adapter->hw);
  85. }
  86. /*
  87. * atl1e_irq_disable - Mask off interrupt generation on the NIC
  88. * @adapter: board private structure
  89. */
  90. static inline void atl1e_irq_disable(struct atl1e_adapter *adapter)
  91. {
  92. AT_WRITE_REG(&adapter->hw, REG_IMR, 0);
  93. AT_WRITE_FLUSH(&adapter->hw);
  94. }
  95. /*
  96. * atl1e_irq_reset - reset interrupt confiure on the NIC
  97. * @adapter: board private structure
  98. */
  99. static inline void atl1e_irq_reset(struct atl1e_adapter *adapter)
  100. {
  101. AT_WRITE_REG(&adapter->hw, REG_ISR, 0);
  102. AT_WRITE_REG(&adapter->hw, REG_IMR, 0);
  103. AT_WRITE_FLUSH(&adapter->hw);
  104. }
  105. static void atl1e_reset(struct atl1e_adapter *adapter)
  106. {
  107. atl1e_down(adapter);
  108. atl1e_up(adapter);
  109. }
  110. static int atl1e_check_link(struct atl1e_adapter *adapter)
  111. {
  112. struct atl1e_hw *hw = &adapter->hw;
  113. struct net_device *netdev = adapter->netdev;
  114. int err = 0;
  115. u16 speed, duplex, phy_data;
  116. /* MII_BMSR must read twise */
  117. atl1e_read_phy_reg(hw, MII_BMSR, &phy_data);
  118. atl1e_read_phy_reg(hw, MII_BMSR, &phy_data);
  119. if ((phy_data & BMSR_LSTATUS) == 0) {
  120. /* link down */
  121. if (netdev_link_ok(netdev)) { /* old link state: Up */
  122. u32 value;
  123. /* disable rx */
  124. value = AT_READ_REG(hw, REG_MAC_CTRL);
  125. value &= ~MAC_CTRL_RX_EN;
  126. AT_WRITE_REG(hw, REG_MAC_CTRL, value);
  127. adapter->link_speed = SPEED_0;
  128. DBG("atl1e: %s link is down\n", netdev->name);
  129. netdev_link_down(netdev);
  130. }
  131. } else {
  132. /* Link Up */
  133. err = atl1e_get_speed_and_duplex(hw, &speed, &duplex);
  134. if (err)
  135. return err;
  136. /* link result is our setting */
  137. if (adapter->link_speed != speed ||
  138. adapter->link_duplex != duplex) {
  139. adapter->link_speed = speed;
  140. adapter->link_duplex = duplex;
  141. atl1e_setup_mac_ctrl(adapter);
  142. DBG("atl1e: %s link is up, %d Mbps, %s duplex\n",
  143. netdev->name, adapter->link_speed,
  144. adapter->link_duplex == FULL_DUPLEX ?
  145. "full" : "half");
  146. netdev_link_up(netdev);
  147. }
  148. }
  149. return 0;
  150. }
  151. static int atl1e_mdio_read(struct net_device *netdev, int phy_id __unused,
  152. int reg_num)
  153. {
  154. struct atl1e_adapter *adapter = netdev_priv(netdev);
  155. u16 result;
  156. atl1e_read_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, &result);
  157. return result;
  158. }
  159. static void atl1e_mdio_write(struct net_device *netdev, int phy_id __unused,
  160. int reg_num, int val)
  161. {
  162. struct atl1e_adapter *adapter = netdev_priv(netdev);
  163. atl1e_write_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, val);
  164. }
  165. static void atl1e_setup_pcicmd(struct pci_device *pdev)
  166. {
  167. u16 cmd;
  168. pci_read_config_word(pdev, PCI_COMMAND, &cmd);
  169. cmd |= (PCI_COMMAND_MEM | PCI_COMMAND_MASTER);
  170. pci_write_config_word(pdev, PCI_COMMAND, cmd);
  171. /*
  172. * some motherboards BIOS(PXE/EFI) driver may set PME
  173. * while they transfer control to OS (Windows/Linux)
  174. * so we should clear this bit before NIC work normally
  175. */
  176. pci_write_config_dword(pdev, REG_PM_CTRLSTAT, 0);
  177. mdelay(1);
  178. }
  179. /*
  180. * atl1e_sw_init - Initialize general software structures (struct atl1e_adapter)
  181. * @adapter: board private structure to initialize
  182. *
  183. * atl1e_sw_init initializes the Adapter private data structure.
  184. * Fields are initialized based on PCI device information and
  185. * OS network device settings (MTU size).
  186. */
  187. static int atl1e_sw_init(struct atl1e_adapter *adapter)
  188. {
  189. struct atl1e_hw *hw = &adapter->hw;
  190. struct pci_device *pdev = adapter->pdev;
  191. u32 phy_status_data = 0;
  192. u8 rev_id = 0;
  193. adapter->link_speed = SPEED_0; /* hardware init */
  194. adapter->link_duplex = FULL_DUPLEX;
  195. /* PCI config space info */
  196. pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
  197. phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS);
  198. /* nic type */
  199. if (rev_id >= 0xF0) {
  200. hw->nic_type = athr_l2e_revB;
  201. } else {
  202. if (phy_status_data & PHY_STATUS_100M)
  203. hw->nic_type = athr_l1e;
  204. else
  205. hw->nic_type = athr_l2e_revA;
  206. }
  207. phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS);
  208. hw->emi_ca = !!(phy_status_data & PHY_STATUS_EMI_CA);
  209. hw->phy_configured = 0;
  210. /* need confirm */
  211. hw->dmar_block = atl1e_dma_req_1024;
  212. hw->dmaw_block = atl1e_dma_req_1024;
  213. netdev_link_down(adapter->netdev);
  214. return 0;
  215. }
  216. /*
  217. * atl1e_clean_tx_ring - free all Tx buffers for device close
  218. * @adapter: board private structure
  219. */
  220. static void atl1e_clean_tx_ring(struct atl1e_adapter *adapter)
  221. {
  222. struct atl1e_tx_ring *tx_ring = (struct atl1e_tx_ring *)
  223. &adapter->tx_ring;
  224. struct atl1e_tx_buffer *tx_buffer = NULL;
  225. u16 index, ring_count = tx_ring->count;
  226. if (tx_ring->desc == NULL || tx_ring->tx_buffer == NULL)
  227. return;
  228. for (index = 0; index < ring_count; index++) {
  229. tx_buffer = &tx_ring->tx_buffer[index];
  230. if (tx_buffer->iob) {
  231. netdev_tx_complete(adapter->netdev, tx_buffer->iob);
  232. tx_buffer->dma = 0;
  233. tx_buffer->iob = NULL;
  234. }
  235. }
  236. /* Zero out Tx-buffers */
  237. memset(tx_ring->desc, 0, sizeof(struct atl1e_tpd_desc) *
  238. ring_count);
  239. memset(tx_ring->tx_buffer, 0, sizeof(struct atl1e_tx_buffer) *
  240. ring_count);
  241. }
  242. /*
  243. * atl1e_clean_rx_ring - Free rx-reservation iobs
  244. * @adapter: board private structure
  245. */
  246. static void atl1e_clean_rx_ring(struct atl1e_adapter *adapter)
  247. {
  248. struct atl1e_rx_ring *rx_ring =
  249. (struct atl1e_rx_ring *)&adapter->rx_ring;
  250. struct atl1e_rx_page_desc *rx_page_desc = &rx_ring->rx_page_desc;
  251. u16 j;
  252. if (adapter->ring_vir_addr == NULL)
  253. return;
  254. /* Zero out the descriptor ring */
  255. for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
  256. if (rx_page_desc->rx_page[j].addr != NULL) {
  257. memset(rx_page_desc->rx_page[j].addr, 0,
  258. rx_ring->real_page_size);
  259. }
  260. }
  261. }
  262. static void atl1e_cal_ring_size(struct atl1e_adapter *adapter, u32 *ring_size)
  263. {
  264. *ring_size = ((u32)(adapter->tx_ring.count *
  265. sizeof(struct atl1e_tpd_desc) + 7
  266. /* tx ring, qword align */
  267. + adapter->rx_ring.real_page_size * AT_PAGE_NUM_PER_QUEUE
  268. + 31
  269. /* rx ring, 32 bytes align */
  270. + (1 + AT_PAGE_NUM_PER_QUEUE) *
  271. sizeof(u32) + 3));
  272. /* tx, rx cmd, dword align */
  273. }
  274. static void atl1e_init_ring_resources(struct atl1e_adapter *adapter)
  275. {
  276. struct atl1e_tx_ring *tx_ring = NULL;
  277. struct atl1e_rx_ring *rx_ring = NULL;
  278. tx_ring = &adapter->tx_ring;
  279. rx_ring = &adapter->rx_ring;
  280. rx_ring->real_page_size = adapter->rx_ring.page_size
  281. + MAX_FRAME_SIZE
  282. + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
  283. rx_ring->real_page_size = (rx_ring->real_page_size + 31) & ~31;
  284. atl1e_cal_ring_size(adapter, &adapter->ring_size);
  285. adapter->ring_vir_addr = NULL;
  286. adapter->rx_ring.desc = NULL;
  287. return;
  288. }
  289. /*
  290. * Read / Write Ptr Initialize:
  291. */
  292. static void atl1e_init_ring_ptrs(struct atl1e_adapter *adapter)
  293. {
  294. struct atl1e_tx_ring *tx_ring = NULL;
  295. struct atl1e_rx_ring *rx_ring = NULL;
  296. struct atl1e_rx_page_desc *rx_page_desc = NULL;
  297. int j;
  298. tx_ring = &adapter->tx_ring;
  299. rx_ring = &adapter->rx_ring;
  300. rx_page_desc = &rx_ring->rx_page_desc;
  301. tx_ring->next_to_use = 0;
  302. tx_ring->next_to_clean = 0;
  303. rx_page_desc->rx_using = 0;
  304. rx_page_desc->rx_nxseq = 0;
  305. for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
  306. *rx_page_desc->rx_page[j].write_offset_addr = 0;
  307. rx_page_desc->rx_page[j].read_offset = 0;
  308. }
  309. }
  310. /*
  311. * atl1e_free_ring_resources - Free Tx / RX descriptor Resources
  312. * @adapter: board private structure
  313. *
  314. * Free all transmit software resources
  315. */
  316. static void atl1e_free_ring_resources(struct atl1e_adapter *adapter)
  317. {
  318. atl1e_clean_tx_ring(adapter);
  319. atl1e_clean_rx_ring(adapter);
  320. if (adapter->ring_vir_addr) {
  321. free_dma(adapter->ring_vir_addr, adapter->ring_size);
  322. adapter->ring_vir_addr = NULL;
  323. adapter->ring_dma = 0;
  324. }
  325. if (adapter->tx_ring.tx_buffer) {
  326. free(adapter->tx_ring.tx_buffer);
  327. adapter->tx_ring.tx_buffer = NULL;
  328. }
  329. }
  330. /*
  331. * atl1e_setup_mem_resources - allocate Tx / RX descriptor resources
  332. * @adapter: board private structure
  333. *
  334. * Return 0 on success, negative on failure
  335. */
  336. static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
  337. {
  338. struct atl1e_tx_ring *tx_ring;
  339. struct atl1e_rx_ring *rx_ring;
  340. struct atl1e_rx_page_desc *rx_page_desc;
  341. int size, j;
  342. u32 offset = 0;
  343. int err = 0;
  344. if (adapter->ring_vir_addr != NULL)
  345. return 0; /* alloced already */
  346. tx_ring = &adapter->tx_ring;
  347. rx_ring = &adapter->rx_ring;
  348. /* real ring DMA buffer */
  349. size = adapter->ring_size;
  350. adapter->ring_vir_addr = malloc_dma(adapter->ring_size, 32);
  351. if (adapter->ring_vir_addr == NULL) {
  352. DBG("atl1e: out of memory allocating %d bytes for %s ring\n",
  353. adapter->ring_size, adapter->netdev->name);
  354. return -ENOMEM;
  355. }
  356. adapter->ring_dma = virt_to_bus(adapter->ring_vir_addr);
  357. memset(adapter->ring_vir_addr, 0, adapter->ring_size);
  358. rx_page_desc = &rx_ring->rx_page_desc;
  359. /* Init TPD Ring */
  360. tx_ring->dma = (adapter->ring_dma + 7) & ~7;
  361. offset = tx_ring->dma - adapter->ring_dma;
  362. tx_ring->desc = (struct atl1e_tpd_desc *)
  363. (adapter->ring_vir_addr + offset);
  364. size = sizeof(struct atl1e_tx_buffer) * (tx_ring->count);
  365. tx_ring->tx_buffer = zalloc(size);
  366. if (tx_ring->tx_buffer == NULL) {
  367. DBG("atl1e: out of memory allocating %d bytes for %s txbuf\n",
  368. size, adapter->netdev->name);
  369. err = -ENOMEM;
  370. goto failed;
  371. }
  372. /* Init RXF-Pages */
  373. offset += (sizeof(struct atl1e_tpd_desc) * tx_ring->count);
  374. offset = (offset + 31) & ~31;
  375. for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
  376. rx_page_desc->rx_page[j].dma =
  377. adapter->ring_dma + offset;
  378. rx_page_desc->rx_page[j].addr =
  379. adapter->ring_vir_addr + offset;
  380. offset += rx_ring->real_page_size;
  381. }
  382. /* Init CMB dma address */
  383. tx_ring->cmb_dma = adapter->ring_dma + offset;
  384. tx_ring->cmb = (u32 *)(adapter->ring_vir_addr + offset);
  385. offset += sizeof(u32);
  386. for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
  387. rx_page_desc->rx_page[j].write_offset_dma =
  388. adapter->ring_dma + offset;
  389. rx_page_desc->rx_page[j].write_offset_addr =
  390. adapter->ring_vir_addr + offset;
  391. offset += sizeof(u32);
  392. }
  393. if (offset > adapter->ring_size) {
  394. DBG("atl1e: ring miscalculation! need %d > %d bytes\n",
  395. offset, adapter->ring_size);
  396. err = -EINVAL;
  397. goto failed;
  398. }
  399. return 0;
  400. failed:
  401. atl1e_free_ring_resources(adapter);
  402. return err;
  403. }
  404. static inline void atl1e_configure_des_ring(const struct atl1e_adapter *adapter)
  405. {
  406. struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw;
  407. struct atl1e_rx_ring *rx_ring =
  408. (struct atl1e_rx_ring *)&adapter->rx_ring;
  409. struct atl1e_tx_ring *tx_ring =
  410. (struct atl1e_tx_ring *)&adapter->tx_ring;
  411. struct atl1e_rx_page_desc *rx_page_desc = NULL;
  412. int j;
  413. AT_WRITE_REG(hw, REG_DESC_BASE_ADDR_HI, 0);
  414. AT_WRITE_REG(hw, REG_TPD_BASE_ADDR_LO, tx_ring->dma);
  415. AT_WRITE_REG(hw, REG_TPD_RING_SIZE, (u16)(tx_ring->count));
  416. AT_WRITE_REG(hw, REG_HOST_TX_CMB_LO, tx_ring->cmb_dma);
  417. rx_page_desc = &rx_ring->rx_page_desc;
  418. /* RXF Page Physical address / Page Length */
  419. AT_WRITE_REG(hw, REG_RXF0_BASE_ADDR_HI, 0);
  420. for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
  421. u32 page_phy_addr;
  422. u32 offset_phy_addr;
  423. page_phy_addr = rx_page_desc->rx_page[j].dma;
  424. offset_phy_addr = rx_page_desc->rx_page[j].write_offset_dma;
  425. AT_WRITE_REG(hw, atl1e_rx_page_lo_addr_regs[j], page_phy_addr);
  426. AT_WRITE_REG(hw, atl1e_rx_page_write_offset_regs[j],
  427. offset_phy_addr);
  428. AT_WRITE_REGB(hw, atl1e_rx_page_vld_regs[j], 1);
  429. }
  430. /* Page Length */
  431. AT_WRITE_REG(hw, REG_HOST_RXFPAGE_SIZE, rx_ring->page_size);
  432. /* Load all of base address above */
  433. AT_WRITE_REG(hw, REG_LOAD_PTR, 1);
  434. return;
  435. }
  436. static inline void atl1e_configure_tx(struct atl1e_adapter *adapter)
  437. {
  438. struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw;
  439. u32 dev_ctrl_data = 0;
  440. u32 max_pay_load = 0;
  441. u32 jumbo_thresh = 0;
  442. u32 extra_size = 0; /* Jumbo frame threshold in QWORD unit */
  443. /* configure TXQ param */
  444. if (hw->nic_type != athr_l2e_revB) {
  445. extra_size = ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
  446. jumbo_thresh = MAX_FRAME_SIZE + extra_size;
  447. AT_WRITE_REG(hw, REG_TX_EARLY_TH, (jumbo_thresh + 7) >> 3);
  448. }
  449. dev_ctrl_data = AT_READ_REG(hw, REG_DEVICE_CTRL);
  450. max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT)) &
  451. DEVICE_CTRL_MAX_PAYLOAD_MASK;
  452. if (max_pay_load < hw->dmaw_block)
  453. hw->dmaw_block = max_pay_load;
  454. max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT)) &
  455. DEVICE_CTRL_MAX_RREQ_SZ_MASK;
  456. if (max_pay_load < hw->dmar_block)
  457. hw->dmar_block = max_pay_load;
  458. if (hw->nic_type != athr_l2e_revB)
  459. AT_WRITE_REGW(hw, REG_TXQ_CTRL + 2,
  460. atl1e_pay_load_size[hw->dmar_block]);
  461. /* enable TXQ */
  462. AT_WRITE_REGW(hw, REG_TXQ_CTRL,
  463. ((TPD_BURST & TXQ_CTRL_NUM_TPD_BURST_MASK)
  464. << TXQ_CTRL_NUM_TPD_BURST_SHIFT)
  465. | TXQ_CTRL_ENH_MODE | TXQ_CTRL_EN);
  466. return;
  467. }
  468. static inline void atl1e_configure_rx(struct atl1e_adapter *adapter)
  469. {
  470. struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw;
  471. u32 rxf_len = 0;
  472. u32 rxf_low = 0;
  473. u32 rxf_high = 0;
  474. u32 rxf_thresh_data = 0;
  475. u32 rxq_ctrl_data = 0;
  476. if (hw->nic_type != athr_l2e_revB) {
  477. AT_WRITE_REGW(hw, REG_RXQ_JMBOSZ_RRDTIM,
  478. (u16)((RX_JUMBO_THRESH & RXQ_JMBOSZ_TH_MASK) <<
  479. RXQ_JMBOSZ_TH_SHIFT |
  480. (1 & RXQ_JMBO_LKAH_MASK) <<
  481. RXQ_JMBO_LKAH_SHIFT));
  482. rxf_len = AT_READ_REG(hw, REG_SRAM_RXF_LEN);
  483. rxf_high = rxf_len * 4 / 5;
  484. rxf_low = rxf_len / 5;
  485. rxf_thresh_data = ((rxf_high & RXQ_RXF_PAUSE_TH_HI_MASK)
  486. << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
  487. ((rxf_low & RXQ_RXF_PAUSE_TH_LO_MASK)
  488. << RXQ_RXF_PAUSE_TH_LO_SHIFT);
  489. AT_WRITE_REG(hw, REG_RXQ_RXF_PAUSE_THRESH, rxf_thresh_data);
  490. }
  491. /* RRS */
  492. AT_WRITE_REG(hw, REG_IDT_TABLE, 0);
  493. AT_WRITE_REG(hw, REG_BASE_CPU_NUMBER, 0);
  494. rxq_ctrl_data |= RXQ_CTRL_PBA_ALIGN_32 |
  495. RXQ_CTRL_CUT_THRU_EN | RXQ_CTRL_EN;
  496. AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data);
  497. return;
  498. }
  499. static inline void atl1e_configure_dma(struct atl1e_adapter *adapter)
  500. {
  501. struct atl1e_hw *hw = &adapter->hw;
  502. u32 dma_ctrl_data = 0;
  503. dma_ctrl_data = DMA_CTRL_RXCMB_EN;
  504. dma_ctrl_data |= (((u32)hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
  505. << DMA_CTRL_DMAR_BURST_LEN_SHIFT;
  506. dma_ctrl_data |= (((u32)hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK)
  507. << DMA_CTRL_DMAW_BURST_LEN_SHIFT;
  508. dma_ctrl_data |= DMA_CTRL_DMAR_REQ_PRI | DMA_CTRL_DMAR_OUT_ORDER;
  509. dma_ctrl_data |= (DMAR_DLY_CNT & DMA_CTRL_DMAR_DLY_CNT_MASK)
  510. << DMA_CTRL_DMAR_DLY_CNT_SHIFT;
  511. dma_ctrl_data |= (DMAW_DLY_CNT & DMA_CTRL_DMAW_DLY_CNT_MASK)
  512. << DMA_CTRL_DMAW_DLY_CNT_SHIFT;
  513. AT_WRITE_REG(hw, REG_DMA_CTRL, dma_ctrl_data);
  514. return;
  515. }
  516. static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter)
  517. {
  518. u32 value;
  519. struct atl1e_hw *hw = &adapter->hw;
  520. /* Config MAC CTRL Register */
  521. value = MAC_CTRL_TX_EN |
  522. MAC_CTRL_RX_EN ;
  523. if (FULL_DUPLEX == adapter->link_duplex)
  524. value |= MAC_CTRL_DUPLX;
  525. value |= ((u32)((SPEED_1000 == adapter->link_speed) ?
  526. MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) <<
  527. MAC_CTRL_SPEED_SHIFT);
  528. value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW);
  529. value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
  530. value |= ((PREAMBLE_LEN & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
  531. value |= MAC_CTRL_BC_EN;
  532. value |= MAC_CTRL_MC_ALL_EN;
  533. AT_WRITE_REG(hw, REG_MAC_CTRL, value);
  534. }
  535. /*
  536. * atl1e_configure - Configure Transmit&Receive Unit after Reset
  537. * @adapter: board private structure
  538. *
  539. * Configure the Tx /Rx unit of the MAC after a reset.
  540. */
  541. static int atl1e_configure(struct atl1e_adapter *adapter)
  542. {
  543. struct atl1e_hw *hw = &adapter->hw;
  544. u32 intr_status_data = 0;
  545. /* clear interrupt status */
  546. AT_WRITE_REG(hw, REG_ISR, ~0);
  547. /* 1. set MAC Address */
  548. atl1e_hw_set_mac_addr(hw);
  549. /* 2. Init the Multicast HASH table (clear) */
  550. AT_WRITE_REG(hw, REG_RX_HASH_TABLE, 0);
  551. AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
  552. /* 3. Clear any WOL status */
  553. AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
  554. /* 4. Descripter Ring BaseMem/Length/Read ptr/Write ptr
  555. * TPD Ring/SMB/RXF0 Page CMBs, they use the same
  556. * High 32bits memory */
  557. atl1e_configure_des_ring(adapter);
  558. /* 5. set Interrupt Moderator Timer */
  559. AT_WRITE_REGW(hw, REG_IRQ_MODU_TIMER_INIT, IMT_VAL);
  560. AT_WRITE_REGW(hw, REG_IRQ_MODU_TIMER2_INIT, IMT_VAL);
  561. AT_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_LED_MODE |
  562. MASTER_CTRL_ITIMER_EN | MASTER_CTRL_ITIMER2_EN);
  563. /* 6. rx/tx threshold to trig interrupt */
  564. AT_WRITE_REGW(hw, REG_TRIG_RRD_THRESH, RRD_THRESH);
  565. AT_WRITE_REGW(hw, REG_TRIG_TPD_THRESH, TPD_THRESH);
  566. AT_WRITE_REGW(hw, REG_TRIG_RXTIMER, RX_COUNT_DOWN);
  567. AT_WRITE_REGW(hw, REG_TRIG_TXTIMER, TX_COUNT_DOWN);
  568. /* 7. set Interrupt Clear Timer */
  569. AT_WRITE_REGW(hw, REG_CMBDISDMA_TIMER, ICT_VAL);
  570. /* 8. set MTU */
  571. AT_WRITE_REG(hw, REG_MTU, MAX_FRAME_SIZE + ETH_HLEN +
  572. VLAN_HLEN + ETH_FCS_LEN);
  573. /* 9. config TXQ early tx threshold */
  574. atl1e_configure_tx(adapter);
  575. /* 10. config RXQ */
  576. atl1e_configure_rx(adapter);
  577. /* 11. config DMA Engine */
  578. atl1e_configure_dma(adapter);
  579. /* 12. smb timer to trig interrupt */
  580. AT_WRITE_REG(hw, REG_SMB_STAT_TIMER, SMB_TIMER);
  581. intr_status_data = AT_READ_REG(hw, REG_ISR);
  582. if ((intr_status_data & ISR_PHY_LINKDOWN) != 0) {
  583. DBG("atl1e: configure failed, PCIE phy link down\n");
  584. return -1;
  585. }
  586. AT_WRITE_REG(hw, REG_ISR, 0x7fffffff);
  587. return 0;
  588. }
  589. static inline void atl1e_clear_phy_int(struct atl1e_adapter *adapter)
  590. {
  591. u16 phy_data;
  592. atl1e_read_phy_reg(&adapter->hw, MII_INT_STATUS, &phy_data);
  593. }
  594. static int atl1e_clean_tx_irq(struct atl1e_adapter *adapter)
  595. {
  596. struct atl1e_tx_ring *tx_ring = (struct atl1e_tx_ring *)
  597. &adapter->tx_ring;
  598. struct atl1e_tx_buffer *tx_buffer = NULL;
  599. u16 hw_next_to_clean = AT_READ_REGW(&adapter->hw, REG_TPD_CONS_IDX);
  600. u16 next_to_clean = tx_ring->next_to_clean;
  601. while (next_to_clean != hw_next_to_clean) {
  602. tx_buffer = &tx_ring->tx_buffer[next_to_clean];
  603. tx_buffer->dma = 0;
  604. if (tx_buffer->iob) {
  605. netdev_tx_complete(adapter->netdev, tx_buffer->iob);
  606. tx_buffer->iob = NULL;
  607. }
  608. if (++next_to_clean == tx_ring->count)
  609. next_to_clean = 0;
  610. }
  611. tx_ring->next_to_clean = next_to_clean;
  612. return 1;
  613. }
  614. static struct atl1e_rx_page *atl1e_get_rx_page(struct atl1e_adapter *adapter)
  615. {
  616. struct atl1e_rx_page_desc *rx_page_desc =
  617. (struct atl1e_rx_page_desc *) &adapter->rx_ring.rx_page_desc;
  618. u8 rx_using = rx_page_desc->rx_using;
  619. return (struct atl1e_rx_page *)&(rx_page_desc->rx_page[rx_using]);
  620. }
  621. static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter)
  622. {
  623. struct net_device *netdev = adapter->netdev;
  624. struct atl1e_rx_ring *rx_ring = (struct atl1e_rx_ring *)
  625. &adapter->rx_ring;
  626. struct atl1e_rx_page_desc *rx_page_desc =
  627. (struct atl1e_rx_page_desc *) &rx_ring->rx_page_desc;
  628. struct io_buffer *iob = NULL;
  629. struct atl1e_rx_page *rx_page = atl1e_get_rx_page(adapter);
  630. u32 packet_size, write_offset;
  631. struct atl1e_recv_ret_status *prrs;
  632. write_offset = *(rx_page->write_offset_addr);
  633. if (rx_page->read_offset >= write_offset)
  634. return;
  635. do {
  636. /* get new packet's rrs */
  637. prrs = (struct atl1e_recv_ret_status *) (rx_page->addr +
  638. rx_page->read_offset);
  639. /* check sequence number */
  640. if (prrs->seq_num != rx_page_desc->rx_nxseq) {
  641. DBG("atl1e %s: RX sequence number error (%d != %d)\n",
  642. netdev->name, prrs->seq_num,
  643. rx_page_desc->rx_nxseq);
  644. rx_page_desc->rx_nxseq++;
  645. goto fatal_err;
  646. }
  647. rx_page_desc->rx_nxseq++;
  648. /* error packet */
  649. if (prrs->pkt_flag & RRS_IS_ERR_FRAME) {
  650. if (prrs->err_flag & (RRS_ERR_BAD_CRC |
  651. RRS_ERR_DRIBBLE | RRS_ERR_CODE |
  652. RRS_ERR_TRUNC)) {
  653. /* hardware error, discard this
  654. packet */
  655. netdev_rx_err(netdev, NULL, EIO);
  656. goto skip_pkt;
  657. }
  658. }
  659. packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) &
  660. RRS_PKT_SIZE_MASK) - ETH_FCS_LEN;
  661. iob = alloc_iob(packet_size + NET_IP_ALIGN);
  662. if (iob == NULL) {
  663. DBG("atl1e %s: dropping packet under memory pressure\n",
  664. netdev->name);
  665. goto skip_pkt;
  666. }
  667. iob_reserve(iob, NET_IP_ALIGN);
  668. memcpy(iob->data, (u8 *)(prrs + 1), packet_size);
  669. iob_put(iob, packet_size);
  670. netdev_rx(netdev, iob);
  671. skip_pkt:
  672. /* skip current packet whether it's ok or not. */
  673. rx_page->read_offset +=
  674. (((u32)((prrs->word1 >> RRS_PKT_SIZE_SHIFT) &
  675. RRS_PKT_SIZE_MASK) +
  676. sizeof(struct atl1e_recv_ret_status) + 31) &
  677. 0xFFFFFFE0);
  678. if (rx_page->read_offset >= rx_ring->page_size) {
  679. /* mark this page clean */
  680. u16 reg_addr;
  681. u8 rx_using;
  682. rx_page->read_offset =
  683. *(rx_page->write_offset_addr) = 0;
  684. rx_using = rx_page_desc->rx_using;
  685. reg_addr =
  686. atl1e_rx_page_vld_regs[rx_using];
  687. AT_WRITE_REGB(&adapter->hw, reg_addr, 1);
  688. rx_page_desc->rx_using ^= 1;
  689. rx_page = atl1e_get_rx_page(adapter);
  690. }
  691. write_offset = *(rx_page->write_offset_addr);
  692. } while (rx_page->read_offset < write_offset);
  693. return;
  694. fatal_err:
  695. if (!netdev_link_ok(adapter->netdev))
  696. atl1e_reset(adapter);
  697. }
  698. /*
  699. * atl1e_poll - poll for completed transmissions and received packets
  700. * @netdev: network device
  701. */
  702. static void atl1e_poll(struct net_device *netdev)
  703. {
  704. struct atl1e_adapter *adapter = netdev_priv(netdev);
  705. struct atl1e_hw *hw = &adapter->hw;
  706. int max_ints = 64;
  707. u32 status;
  708. do {
  709. status = AT_READ_REG(hw, REG_ISR);
  710. if ((status & IMR_NORMAL_MASK) == 0)
  711. break;
  712. /* link event */
  713. if (status & ISR_GPHY)
  714. atl1e_clear_phy_int(adapter);
  715. /* Ack ISR */
  716. AT_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT);
  717. /* check if PCIE PHY Link down */
  718. if (status & ISR_PHY_LINKDOWN) {
  719. DBG("atl1e: PCI-E PHY link down: %x\n", status);
  720. if (netdev_link_ok(adapter->netdev)) {
  721. /* reset MAC */
  722. atl1e_irq_reset(adapter);
  723. atl1e_reset(adapter);
  724. break;
  725. }
  726. }
  727. /* check if DMA read/write error */
  728. if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
  729. DBG("atl1e: PCI-E DMA RW error: %x\n", status);
  730. atl1e_irq_reset(adapter);
  731. atl1e_reset(adapter);
  732. break;
  733. }
  734. /* link event */
  735. if (status & (ISR_GPHY | ISR_MANUAL)) {
  736. atl1e_check_link(adapter);
  737. break;
  738. }
  739. /* transmit event */
  740. if (status & ISR_TX_EVENT)
  741. atl1e_clean_tx_irq(adapter);
  742. if (status & ISR_RX_EVENT)
  743. atl1e_clean_rx_irq(adapter);
  744. } while (--max_ints > 0);
  745. /* re-enable Interrupt*/
  746. AT_WRITE_REG(&adapter->hw, REG_ISR, 0);
  747. return;
  748. }
  749. static inline u16 atl1e_tpd_avail(struct atl1e_adapter *adapter)
  750. {
  751. struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
  752. u16 next_to_use = 0;
  753. u16 next_to_clean = 0;
  754. next_to_clean = tx_ring->next_to_clean;
  755. next_to_use = tx_ring->next_to_use;
  756. return (u16)(next_to_clean > next_to_use) ?
  757. (next_to_clean - next_to_use - 1) :
  758. (tx_ring->count + next_to_clean - next_to_use - 1);
  759. }
  760. /*
  761. * get next usable tpd
  762. * Note: should call atl1e_tdp_avail to make sure
  763. * there is enough tpd to use
  764. */
  765. static struct atl1e_tpd_desc *atl1e_get_tpd(struct atl1e_adapter *adapter)
  766. {
  767. struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
  768. u16 next_to_use = 0;
  769. next_to_use = tx_ring->next_to_use;
  770. if (++tx_ring->next_to_use == tx_ring->count)
  771. tx_ring->next_to_use = 0;
  772. memset(&tx_ring->desc[next_to_use], 0, sizeof(struct atl1e_tpd_desc));
  773. return (struct atl1e_tpd_desc *)&tx_ring->desc[next_to_use];
  774. }
  775. static struct atl1e_tx_buffer *
  776. atl1e_get_tx_buffer(struct atl1e_adapter *adapter, struct atl1e_tpd_desc *tpd)
  777. {
  778. struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
  779. return &tx_ring->tx_buffer[tpd - tx_ring->desc];
  780. }
  781. static void atl1e_tx_map(struct atl1e_adapter *adapter,
  782. struct io_buffer *iob, struct atl1e_tpd_desc *tpd)
  783. {
  784. struct atl1e_tx_buffer *tx_buffer = NULL;
  785. u16 buf_len = iob_len(iob);
  786. tx_buffer = atl1e_get_tx_buffer(adapter, tpd);
  787. tx_buffer->iob = iob;
  788. tx_buffer->length = buf_len;
  789. tx_buffer->dma = virt_to_bus(iob->data);
  790. tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
  791. tpd->word2 = ((tpd->word2 & ~TPD_BUFLEN_MASK) |
  792. ((cpu_to_le32(buf_len) & TPD_BUFLEN_MASK) <<
  793. TPD_BUFLEN_SHIFT));
  794. tpd->word3 |= 1 << TPD_EOP_SHIFT;
  795. }
  796. static void atl1e_tx_queue(struct atl1e_adapter *adapter, u16 count __unused,
  797. struct atl1e_tpd_desc *tpd __unused)
  798. {
  799. struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
  800. wmb();
  801. AT_WRITE_REG(&adapter->hw, REG_MB_TPD_PROD_IDX, tx_ring->next_to_use);
  802. }
  803. static int atl1e_xmit_frame(struct net_device *netdev, struct io_buffer *iob)
  804. {
  805. struct atl1e_adapter *adapter = netdev_priv(netdev);
  806. u16 tpd_req = 1;
  807. struct atl1e_tpd_desc *tpd;
  808. if (!netdev_link_ok(netdev)) {
  809. return -EINVAL;
  810. }
  811. if (atl1e_tpd_avail(adapter) < tpd_req) {
  812. return -EBUSY;
  813. }
  814. tpd = atl1e_get_tpd(adapter);
  815. atl1e_tx_map(adapter, iob, tpd);
  816. atl1e_tx_queue(adapter, tpd_req, tpd);
  817. return 0;
  818. }
  819. int atl1e_up(struct atl1e_adapter *adapter)
  820. {
  821. struct net_device *netdev = adapter->netdev;
  822. int err = 0;
  823. u32 val;
  824. /* hardware has been reset, we need to reload some things */
  825. err = atl1e_init_hw(&adapter->hw);
  826. if (err) {
  827. return -EIO;
  828. }
  829. atl1e_init_ring_ptrs(adapter);
  830. memcpy(adapter->hw.mac_addr, netdev->ll_addr, ETH_ALEN);
  831. if (atl1e_configure(adapter) != 0) {
  832. return -EIO;
  833. }
  834. atl1e_irq_disable(adapter);
  835. val = AT_READ_REG(&adapter->hw, REG_MASTER_CTRL);
  836. AT_WRITE_REG(&adapter->hw, REG_MASTER_CTRL,
  837. val | MASTER_CTRL_MANUAL_INT);
  838. return err;
  839. }
  840. void atl1e_irq(struct net_device *netdev, int enable)
  841. {
  842. struct atl1e_adapter *adapter = netdev_priv(netdev);
  843. if (enable)
  844. atl1e_irq_enable(adapter);
  845. else
  846. atl1e_irq_disable(adapter);
  847. }
  848. void atl1e_down(struct atl1e_adapter *adapter)
  849. {
  850. struct net_device *netdev = adapter->netdev;
  851. /* reset MAC to disable all RX/TX */
  852. atl1e_reset_hw(&adapter->hw);
  853. mdelay(1);
  854. netdev_link_down(netdev);
  855. adapter->link_speed = SPEED_0;
  856. adapter->link_duplex = -1;
  857. atl1e_clean_tx_ring(adapter);
  858. atl1e_clean_rx_ring(adapter);
  859. }
  860. /*
  861. * atl1e_open - Called when a network interface is made active
  862. * @netdev: network interface device structure
  863. *
  864. * Returns 0 on success, negative value on failure
  865. *
  866. * The open entry point is called when a network interface is made
  867. * active by the system (IFF_UP). At this point all resources needed
  868. * for transmit and receive operations are allocated, the interrupt
  869. * handler is registered with the OS, the watchdog timer is started,
  870. * and the stack is notified that the interface is ready.
  871. */
  872. static int atl1e_open(struct net_device *netdev)
  873. {
  874. struct atl1e_adapter *adapter = netdev_priv(netdev);
  875. int err;
  876. /* allocate rx/tx dma buffer & descriptors */
  877. atl1e_init_ring_resources(adapter);
  878. err = atl1e_setup_ring_resources(adapter);
  879. if (err)
  880. return err;
  881. err = atl1e_up(adapter);
  882. if (err)
  883. goto err_up;
  884. return 0;
  885. err_up:
  886. atl1e_free_ring_resources(adapter);
  887. atl1e_reset_hw(&adapter->hw);
  888. return err;
  889. }
  890. /*
  891. * atl1e_close - Disables a network interface
  892. * @netdev: network interface device structure
  893. *
  894. * Returns 0, this is not allowed to fail
  895. *
  896. * The close entry point is called when an interface is de-activated
  897. * by the OS. The hardware is still under the drivers control, but
  898. * needs to be disabled. A global MAC reset is issued to stop the
  899. * hardware, and all transmit and receive resources are freed.
  900. */
  901. static void atl1e_close(struct net_device *netdev)
  902. {
  903. struct atl1e_adapter *adapter = netdev_priv(netdev);
  904. atl1e_down(adapter);
  905. atl1e_free_ring_resources(adapter);
  906. }
  907. static struct net_device_operations atl1e_netdev_ops = {
  908. .open = atl1e_open,
  909. .close = atl1e_close,
  910. .transmit = atl1e_xmit_frame,
  911. .poll = atl1e_poll,
  912. .irq = atl1e_irq,
  913. };
  914. static void atl1e_init_netdev(struct net_device *netdev, struct pci_device *pdev)
  915. {
  916. netdev_init(netdev, &atl1e_netdev_ops);
  917. netdev->dev = &pdev->dev;
  918. pci_set_drvdata(pdev, netdev);
  919. }
  920. /*
  921. * atl1e_probe - Device Initialization Routine
  922. * @pdev: PCI device information struct
  923. * @ent: entry in atl1e_pci_tbl
  924. *
  925. * Returns 0 on success, negative on failure
  926. *
  927. * atl1e_probe initializes an adapter identified by a pci_device structure.
  928. * The OS initialization, configuring of the adapter private structure,
  929. * and a hardware reset occur.
  930. */
  931. static int atl1e_probe(struct pci_device *pdev,
  932. const struct pci_device_id *ent __unused)
  933. {
  934. struct net_device *netdev;
  935. struct atl1e_adapter *adapter = NULL;
  936. static int cards_found;
  937. int err = 0;
  938. adjust_pci_device(pdev);
  939. netdev = alloc_etherdev(sizeof(struct atl1e_adapter));
  940. if (netdev == NULL) {
  941. err = -ENOMEM;
  942. DBG("atl1e: out of memory allocating net_device\n");
  943. goto err;
  944. }
  945. atl1e_init_netdev(netdev, pdev);
  946. adapter = netdev_priv(netdev);
  947. adapter->bd_number = cards_found;
  948. adapter->netdev = netdev;
  949. adapter->pdev = pdev;
  950. adapter->hw.adapter = adapter;
  951. if (!pdev->membase) {
  952. err = -EIO;
  953. DBG("atl1e: cannot map device registers\n");
  954. goto err_free_netdev;
  955. }
  956. adapter->hw.hw_addr = bus_to_virt(pdev->membase);
  957. /* init mii data */
  958. adapter->mii.dev = netdev;
  959. adapter->mii.mdio_read = atl1e_mdio_read;
  960. adapter->mii.mdio_write = atl1e_mdio_write;
  961. adapter->mii.phy_id_mask = 0x1f;
  962. adapter->mii.reg_num_mask = MDIO_REG_ADDR_MASK;
  963. /* get user settings */
  964. adapter->tx_ring.count = TX_DESC_COUNT;
  965. adapter->rx_ring.page_size = RX_MEM_SIZE;
  966. atl1e_setup_pcicmd(pdev);
  967. /* setup the private structure */
  968. err = atl1e_sw_init(adapter);
  969. if (err) {
  970. DBG("atl1e: private data init failed\n");
  971. goto err_free_netdev;
  972. }
  973. /* Init GPHY as early as possible due to power saving issue */
  974. atl1e_phy_init(&adapter->hw);
  975. /* reset the controller to
  976. * put the device in a known good starting state */
  977. err = atl1e_reset_hw(&adapter->hw);
  978. if (err) {
  979. err = -EIO;
  980. goto err_free_netdev;
  981. }
  982. /* This may have been run by a zero-wait timer around
  983. now... unclear. */
  984. atl1e_restart_autoneg(&adapter->hw);
  985. if (atl1e_read_mac_addr(&adapter->hw) != 0) {
  986. DBG("atl1e: cannot read MAC address from EEPROM\n");
  987. err = -EIO;
  988. goto err_free_netdev;
  989. }
  990. memcpy(netdev->hw_addr, adapter->hw.perm_mac_addr, ETH_ALEN);
  991. memcpy(netdev->ll_addr, adapter->hw.mac_addr, ETH_ALEN);
  992. DBG("atl1e: Attansic L1E Ethernet controller on %s, "
  993. "%02x:%02x:%02x:%02x:%02x:%02x\n", adapter->netdev->name,
  994. adapter->hw.mac_addr[0], adapter->hw.mac_addr[1],
  995. adapter->hw.mac_addr[2], adapter->hw.mac_addr[3],
  996. adapter->hw.mac_addr[4], adapter->hw.mac_addr[5]);
  997. err = register_netdev(netdev);
  998. if (err) {
  999. DBG("atl1e: cannot register network device\n");
  1000. goto err_free_netdev;
  1001. }
  1002. netdev_link_down(netdev);
  1003. cards_found++;
  1004. return 0;
  1005. err_free_netdev:
  1006. netdev_nullify(netdev);
  1007. netdev_put(netdev);
  1008. err:
  1009. return err;
  1010. }
  1011. /*
  1012. * atl1e_remove - Device Removal Routine
  1013. * @pdev: PCI device information struct
  1014. *
  1015. * atl1e_remove is called by the PCI subsystem to alert the driver
  1016. * that it should release a PCI device. The could be caused by a
  1017. * Hot-Plug event, or because the driver is going to be removed from
  1018. * memory.
  1019. */
  1020. static void atl1e_remove(struct pci_device *pdev)
  1021. {
  1022. struct net_device *netdev = pci_get_drvdata(pdev);
  1023. struct atl1e_adapter *adapter = netdev_priv(netdev);
  1024. unregister_netdev(netdev);
  1025. atl1e_free_ring_resources(adapter);
  1026. atl1e_force_ps(&adapter->hw);
  1027. netdev_nullify(netdev);
  1028. netdev_put(netdev);
  1029. }
  1030. struct pci_driver atl1e_driver __pci_driver = {
  1031. .ids = atl1e_pci_tbl,
  1032. .id_count = (sizeof(atl1e_pci_tbl) / sizeof(atl1e_pci_tbl[0])),
  1033. .probe = atl1e_probe,
  1034. .remove = atl1e_remove,
  1035. };
  1036. /********** Hardware-level functions: **********/
  1037. /*
  1038. * check_eeprom_exist
  1039. * return 0 if eeprom exist
  1040. */
  1041. int atl1e_check_eeprom_exist(struct atl1e_hw *hw)
  1042. {
  1043. u32 value;
  1044. value = AT_READ_REG(hw, REG_SPI_FLASH_CTRL);
  1045. if (value & SPI_FLASH_CTRL_EN_VPD) {
  1046. value &= ~SPI_FLASH_CTRL_EN_VPD;
  1047. AT_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value);
  1048. }
  1049. value = AT_READ_REGW(hw, REG_PCIE_CAP_LIST);
  1050. return ((value & 0xFF00) == 0x6C00) ? 0 : 1;
  1051. }
  1052. void atl1e_hw_set_mac_addr(struct atl1e_hw *hw)
  1053. {
  1054. u32 value;
  1055. /*
  1056. * 00-0B-6A-F6-00-DC
  1057. * 0: 6AF600DC 1: 000B
  1058. * low dword
  1059. */
  1060. value = (((u32)hw->mac_addr[2]) << 24) |
  1061. (((u32)hw->mac_addr[3]) << 16) |
  1062. (((u32)hw->mac_addr[4]) << 8) |
  1063. (((u32)hw->mac_addr[5])) ;
  1064. AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 0, value);
  1065. /* hight dword */
  1066. value = (((u32)hw->mac_addr[0]) << 8) |
  1067. (((u32)hw->mac_addr[1])) ;
  1068. AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 1, value);
  1069. }
  1070. /*
  1071. * atl1e_get_permanent_address
  1072. * return 0 if get valid mac address,
  1073. */
  1074. static int atl1e_get_permanent_address(struct atl1e_hw *hw)
  1075. {
  1076. u32 addr[2];
  1077. u32 i;
  1078. u32 twsi_ctrl_data;
  1079. u8 eth_addr[ETH_ALEN];
  1080. /* init */
  1081. addr[0] = addr[1] = 0;
  1082. if (!atl1e_check_eeprom_exist(hw)) {
  1083. /* eeprom exist */
  1084. twsi_ctrl_data = AT_READ_REG(hw, REG_TWSI_CTRL);
  1085. twsi_ctrl_data |= TWSI_CTRL_SW_LDSTART;
  1086. AT_WRITE_REG(hw, REG_TWSI_CTRL, twsi_ctrl_data);
  1087. for (i = 0; i < AT_TWSI_EEPROM_TIMEOUT; i++) {
  1088. mdelay(10);
  1089. twsi_ctrl_data = AT_READ_REG(hw, REG_TWSI_CTRL);
  1090. if ((twsi_ctrl_data & TWSI_CTRL_SW_LDSTART) == 0)
  1091. break;
  1092. }
  1093. if (i >= AT_TWSI_EEPROM_TIMEOUT)
  1094. return AT_ERR_TIMEOUT;
  1095. }
  1096. /* maybe MAC-address is from BIOS */
  1097. addr[0] = AT_READ_REG(hw, REG_MAC_STA_ADDR);
  1098. addr[1] = AT_READ_REG(hw, REG_MAC_STA_ADDR + 4);
  1099. *(u32 *) &eth_addr[2] = swap32(addr[0]);
  1100. *(u16 *) &eth_addr[0] = swap16(*(u16 *)&addr[1]);
  1101. memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
  1102. return 0;
  1103. }
  1104. void atl1e_force_ps(struct atl1e_hw *hw)
  1105. {
  1106. AT_WRITE_REGW(hw, REG_GPHY_CTRL,
  1107. GPHY_CTRL_PW_WOL_DIS | GPHY_CTRL_EXT_RESET);
  1108. }
  1109. /*
  1110. * Reads the adapter's MAC address from the EEPROM
  1111. *
  1112. * hw - Struct containing variables accessed by shared code
  1113. */
  1114. int atl1e_read_mac_addr(struct atl1e_hw *hw)
  1115. {
  1116. int err = 0;
  1117. err = atl1e_get_permanent_address(hw);
  1118. if (err)
  1119. return AT_ERR_EEPROM;
  1120. memcpy(hw->mac_addr, hw->perm_mac_addr, sizeof(hw->perm_mac_addr));
  1121. return 0;
  1122. }
  1123. /*
  1124. * Reads the value from a PHY register
  1125. * hw - Struct containing variables accessed by shared code
  1126. * reg_addr - address of the PHY register to read
  1127. */
  1128. int atl1e_read_phy_reg(struct atl1e_hw *hw, u16 reg_addr, u16 *phy_data)
  1129. {
  1130. u32 val;
  1131. int i;
  1132. val = ((u32)(reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT |
  1133. MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW |
  1134. MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
  1135. AT_WRITE_REG(hw, REG_MDIO_CTRL, val);
  1136. wmb();
  1137. for (i = 0; i < MDIO_WAIT_TIMES; i++) {
  1138. udelay(2);
  1139. val = AT_READ_REG(hw, REG_MDIO_CTRL);
  1140. if (!(val & (MDIO_START | MDIO_BUSY)))
  1141. break;
  1142. wmb();
  1143. }
  1144. if (!(val & (MDIO_START | MDIO_BUSY))) {
  1145. *phy_data = (u16)val;
  1146. return 0;
  1147. }
  1148. return AT_ERR_PHY;
  1149. }
  1150. /*
  1151. * Writes a value to a PHY register
  1152. * hw - Struct containing variables accessed by shared code
  1153. * reg_addr - address of the PHY register to write
  1154. * data - data to write to the PHY
  1155. */
  1156. int atl1e_write_phy_reg(struct atl1e_hw *hw, u32 reg_addr, u16 phy_data)
  1157. {
  1158. int i;
  1159. u32 val;
  1160. val = ((u32)(phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT |
  1161. (reg_addr&MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT |
  1162. MDIO_SUP_PREAMBLE |
  1163. MDIO_START |
  1164. MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
  1165. AT_WRITE_REG(hw, REG_MDIO_CTRL, val);
  1166. wmb();
  1167. for (i = 0; i < MDIO_WAIT_TIMES; i++) {
  1168. udelay(2);
  1169. val = AT_READ_REG(hw, REG_MDIO_CTRL);
  1170. if (!(val & (MDIO_START | MDIO_BUSY)))
  1171. break;
  1172. wmb();
  1173. }
  1174. if (!(val & (MDIO_START | MDIO_BUSY)))
  1175. return 0;
  1176. return AT_ERR_PHY;
  1177. }
  1178. /*
  1179. * atl1e_init_pcie - init PCIE module
  1180. */
  1181. static void atl1e_init_pcie(struct atl1e_hw *hw)
  1182. {
  1183. u32 value;
  1184. /* comment 2lines below to save more power when sususpend
  1185. value = LTSSM_TEST_MODE_DEF;
  1186. AT_WRITE_REG(hw, REG_LTSSM_TEST_MODE, value);
  1187. */
  1188. /* pcie flow control mode change */
  1189. value = AT_READ_REG(hw, 0x1008);
  1190. value |= 0x8000;
  1191. AT_WRITE_REG(hw, 0x1008, value);
  1192. }
  1193. /*
  1194. * Configures PHY autoneg and flow control advertisement settings
  1195. *
  1196. * hw - Struct containing variables accessed by shared code
  1197. */
  1198. static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
  1199. {
  1200. s32 ret_val;
  1201. u16 mii_autoneg_adv_reg;
  1202. u16 mii_1000t_ctrl_reg;
  1203. if (0 != hw->mii_autoneg_adv_reg)
  1204. return 0;
  1205. /* Read the MII Auto-Neg Advertisement Register (Address 4/9). */
  1206. mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK;
  1207. mii_1000t_ctrl_reg = MII_AT001_CR_1000T_DEFAULT_CAP_MASK;
  1208. /*
  1209. * First we clear all the 10/100 mb speed bits in the Auto-Neg
  1210. * Advertisement Register (Address 4) and the 1000 mb speed bits in
  1211. * the 1000Base-T control Register (Address 9).
  1212. */
  1213. mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
  1214. mii_1000t_ctrl_reg &= ~MII_AT001_CR_1000T_SPEED_MASK;
  1215. /* Assume auto-detect media type */
  1216. mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS |
  1217. MII_AR_10T_FD_CAPS |
  1218. MII_AR_100TX_HD_CAPS |
  1219. MII_AR_100TX_FD_CAPS);
  1220. if (hw->nic_type == athr_l1e) {
  1221. mii_1000t_ctrl_reg |= MII_AT001_CR_1000T_FD_CAPS;
  1222. }
  1223. /* flow control fixed to enable all */
  1224. mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
  1225. hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
  1226. hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg;
  1227. ret_val = atl1e_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg);
  1228. if (ret_val)
  1229. return ret_val;
  1230. if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) {
  1231. ret_val = atl1e_write_phy_reg(hw, MII_AT001_CR,
  1232. mii_1000t_ctrl_reg);
  1233. if (ret_val)
  1234. return ret_val;
  1235. }
  1236. return 0;
  1237. }
  1238. /*
  1239. * Resets the PHY and make all config validate
  1240. *
  1241. * hw - Struct containing variables accessed by shared code
  1242. *
  1243. * Sets bit 15 and 12 of the MII control regiser (for F001 bug)
  1244. */
  1245. int atl1e_phy_commit(struct atl1e_hw *hw)
  1246. {
  1247. int ret_val;
  1248. u16 phy_data;
  1249. phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG;
  1250. ret_val = atl1e_write_phy_reg(hw, MII_BMCR, phy_data);
  1251. if (ret_val) {
  1252. u32 val;
  1253. int i;
  1254. /**************************************
  1255. * pcie serdes link may be down !
  1256. **************************************/
  1257. for (i = 0; i < 25; i++) {
  1258. mdelay(1);
  1259. val = AT_READ_REG(hw, REG_MDIO_CTRL);
  1260. if (!(val & (MDIO_START | MDIO_BUSY)))
  1261. break;
  1262. }
  1263. if (0 != (val & (MDIO_START | MDIO_BUSY))) {
  1264. DBG("atl1e: PCI-E link down for at least 25ms\n");
  1265. return ret_val;
  1266. }
  1267. DBG("atl1e: PCI-E link up after %d ms\n", i);
  1268. }
  1269. return 0;
  1270. }
  1271. int atl1e_phy_init(struct atl1e_hw *hw)
  1272. {
  1273. s32 ret_val;
  1274. u16 phy_val;
  1275. if (hw->phy_configured) {
  1276. if (hw->re_autoneg) {
  1277. hw->re_autoneg = 0;
  1278. return atl1e_restart_autoneg(hw);
  1279. }
  1280. return 0;
  1281. }
  1282. /* RESET GPHY Core */
  1283. AT_WRITE_REGW(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT);
  1284. mdelay(2);
  1285. AT_WRITE_REGW(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT |
  1286. GPHY_CTRL_EXT_RESET);
  1287. mdelay(2);
  1288. /* patches */
  1289. /* p1. eable hibernation mode */
  1290. ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0xB);
  1291. if (ret_val)
  1292. return ret_val;
  1293. ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0xBC00);
  1294. if (ret_val)
  1295. return ret_val;
  1296. /* p2. set Class A/B for all modes */
  1297. ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0);
  1298. if (ret_val)
  1299. return ret_val;
  1300. phy_val = 0x02ef;
  1301. /* remove Class AB */
  1302. /* phy_val = hw->emi_ca ? 0x02ef : 0x02df; */
  1303. ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, phy_val);
  1304. if (ret_val)
  1305. return ret_val;
  1306. /* p3. 10B ??? */
  1307. ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0x12);
  1308. if (ret_val)
  1309. return ret_val;
  1310. ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0x4C04);
  1311. if (ret_val)
  1312. return ret_val;
  1313. /* p4. 1000T power */
  1314. ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0x4);
  1315. if (ret_val)
  1316. return ret_val;
  1317. ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0x8BBB);
  1318. if (ret_val)
  1319. return ret_val;
  1320. ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0x5);
  1321. if (ret_val)
  1322. return ret_val;
  1323. ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0x2C46);
  1324. if (ret_val)
  1325. return ret_val;
  1326. mdelay(1);
  1327. /*Enable PHY LinkChange Interrupt */
  1328. ret_val = atl1e_write_phy_reg(hw, MII_INT_CTRL, 0xC00);
  1329. if (ret_val) {
  1330. DBG("atl1e: Error enable PHY linkChange Interrupt\n");
  1331. return ret_val;
  1332. }
  1333. /* setup AutoNeg parameters */
  1334. ret_val = atl1e_phy_setup_autoneg_adv(hw);
  1335. if (ret_val) {
  1336. DBG("atl1e: Error Setting up Auto-Negotiation\n");
  1337. return ret_val;
  1338. }
  1339. /* SW.Reset & En-Auto-Neg to restart Auto-Neg*/
  1340. DBG("atl1e: Restarting Auto-Neg");
  1341. ret_val = atl1e_phy_commit(hw);
  1342. if (ret_val) {
  1343. DBG("atl1e: Error Resetting the phy");
  1344. return ret_val;
  1345. }
  1346. hw->phy_configured = 1;
  1347. return 0;
  1348. }
  1349. /*
  1350. * Reset the transmit and receive units; mask and clear all interrupts.
  1351. * hw - Struct containing variables accessed by shared code
  1352. * return : 0 or idle status (if error)
  1353. */
  1354. int atl1e_reset_hw(struct atl1e_hw *hw)
  1355. {
  1356. struct atl1e_adapter *adapter = hw->adapter;
  1357. struct pci_device *pdev = adapter->pdev;
  1358. int timeout = 0;
  1359. u32 idle_status_data = 0;
  1360. u16 pci_cfg_cmd_word = 0;
  1361. /* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */
  1362. pci_read_config_word(pdev, PCI_COMMAND, &pci_cfg_cmd_word);
  1363. if ((pci_cfg_cmd_word & (PCI_COMMAND_IO | PCI_COMMAND_MEM |
  1364. PCI_COMMAND_MASTER))
  1365. != (PCI_COMMAND_IO | PCI_COMMAND_MEM |
  1366. PCI_COMMAND_MASTER)) {
  1367. pci_cfg_cmd_word |= (PCI_COMMAND_IO | PCI_COMMAND_MEM |
  1368. PCI_COMMAND_MASTER);
  1369. pci_write_config_word(pdev, PCI_COMMAND, pci_cfg_cmd_word);
  1370. }
  1371. /*
  1372. * Issue Soft Reset to the MAC. This will reset the chip's
  1373. * transmit, receive, DMA. It will not effect
  1374. * the current PCI configuration. The global reset bit is self-
  1375. * clearing, and should clear within a microsecond.
  1376. */
  1377. AT_WRITE_REG(hw, REG_MASTER_CTRL,
  1378. MASTER_CTRL_LED_MODE | MASTER_CTRL_SOFT_RST);
  1379. wmb();
  1380. mdelay(1);
  1381. /* Wait at least 10ms for All module to be Idle */
  1382. for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) {
  1383. idle_status_data = AT_READ_REG(hw, REG_IDLE_STATUS);
  1384. if (idle_status_data == 0)
  1385. break;
  1386. mdelay(1);
  1387. }
  1388. if (timeout >= AT_HW_MAX_IDLE_DELAY) {
  1389. DBG("atl1e: MAC reset timeout\n");
  1390. return AT_ERR_TIMEOUT;
  1391. }
  1392. return 0;
  1393. }
  1394. /*
  1395. * Performs basic configuration of the adapter.
  1396. *
  1397. * hw - Struct containing variables accessed by shared code
  1398. * Assumes that the controller has previously been reset and is in a
  1399. * post-reset uninitialized state. Initializes multicast table,
  1400. * and Calls routines to setup link
  1401. * Leaves the transmit and receive units disabled and uninitialized.
  1402. */
  1403. int atl1e_init_hw(struct atl1e_hw *hw)
  1404. {
  1405. s32 ret_val = 0;
  1406. atl1e_init_pcie(hw);
  1407. /* Zero out the Multicast HASH table */
  1408. /* clear the old settings from the multicast hash table */
  1409. AT_WRITE_REG(hw, REG_RX_HASH_TABLE, 0);
  1410. AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
  1411. ret_val = atl1e_phy_init(hw);
  1412. return ret_val;
  1413. }
  1414. /*
  1415. * Detects the current speed and duplex settings of the hardware.
  1416. *
  1417. * hw - Struct containing variables accessed by shared code
  1418. * speed - Speed of the connection
  1419. * duplex - Duplex setting of the connection
  1420. */
  1421. int atl1e_get_speed_and_duplex(struct atl1e_hw *hw, u16 *speed, u16 *duplex)
  1422. {
  1423. int err;
  1424. u16 phy_data;
  1425. /* Read PHY Specific Status Register (17) */
  1426. err = atl1e_read_phy_reg(hw, MII_AT001_PSSR, &phy_data);
  1427. if (err)
  1428. return err;
  1429. if (!(phy_data & MII_AT001_PSSR_SPD_DPLX_RESOLVED))
  1430. return AT_ERR_PHY_RES;
  1431. switch (phy_data & MII_AT001_PSSR_SPEED) {
  1432. case MII_AT001_PSSR_1000MBS:
  1433. *speed = SPEED_1000;
  1434. break;
  1435. case MII_AT001_PSSR_100MBS:
  1436. *speed = SPEED_100;
  1437. break;
  1438. case MII_AT001_PSSR_10MBS:
  1439. *speed = SPEED_10;
  1440. break;
  1441. default:
  1442. return AT_ERR_PHY_SPEED;
  1443. break;
  1444. }
  1445. if (phy_data & MII_AT001_PSSR_DPLX)
  1446. *duplex = FULL_DUPLEX;
  1447. else
  1448. *duplex = HALF_DUPLEX;
  1449. return 0;
  1450. }
  1451. int atl1e_restart_autoneg(struct atl1e_hw *hw)
  1452. {
  1453. int err = 0;
  1454. err = atl1e_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg);
  1455. if (err)
  1456. return err;
  1457. if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) {
  1458. err = atl1e_write_phy_reg(hw, MII_AT001_CR,
  1459. hw->mii_1000t_ctrl_reg);
  1460. if (err)
  1461. return err;
  1462. }
  1463. err = atl1e_write_phy_reg(hw, MII_BMCR,
  1464. MII_CR_RESET | MII_CR_AUTO_NEG_EN |
  1465. MII_CR_RESTART_AUTO_NEG);
  1466. return err;
  1467. }