You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

sky2.c 64KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399
  1. /*
  2. * iPXE driver for Marvell Yukon 2 chipset. Derived from Linux sky2 driver
  3. * (v1.22), which was based on earlier sk98lin and skge drivers.
  4. *
  5. * This driver intentionally does not support all the features
  6. * of the original driver such as link fail-over and link management because
  7. * those should be done at higher levels.
  8. *
  9. * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
  10. *
  11. * Modified for iPXE, April 2009 by Joshua Oreman
  12. *
  13. * This program is free software; you can redistribute it and/or modify
  14. * it under the terms of the GNU General Public License as published by
  15. * the Free Software Foundation; either version 2 of the License.
  16. *
  17. * This program is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. * GNU General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU General Public License
  23. * along with this program; if not, write to the Free Software
  24. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  25. */
  26. FILE_LICENCE ( GPL2_ONLY );
  27. #include <stdint.h>
  28. #include <errno.h>
  29. #include <stdio.h>
  30. #include <unistd.h>
  31. #include <ipxe/ethernet.h>
  32. #include <ipxe/if_ether.h>
  33. #include <ipxe/iobuf.h>
  34. #include <ipxe/malloc.h>
  35. #include <ipxe/pci.h>
  36. #include <byteswap.h>
  37. #include <mii.h>
  38. #include "sky2.h"
  39. #define DRV_NAME "sky2"
  40. #define DRV_VERSION "1.22"
  41. #define PFX DRV_NAME " "
  42. /*
  43. * The Yukon II chipset takes 64 bit command blocks (called list elements)
  44. * that are organized into three (receive, transmit, status) different rings
  45. * similar to Tigon3.
  46. *
  47. * Each ring start must be aligned to a 4k boundary. You will get mysterious
  48. * "invalid LE" errors if they're not.
  49. *
  50. * The card silently forces each ring size to be at least 128. If you
  51. * act as though one of them is smaller (by setting the below
  52. * #defines) you'll get bad bugs.
  53. */
  54. #define RX_LE_SIZE 128
  55. #define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le))
  56. #define RX_RING_ALIGN 4096
  57. #define RX_PENDING (RX_LE_SIZE/6 - 2)
  58. #define TX_RING_SIZE 128
  59. #define TX_PENDING (TX_RING_SIZE - 1)
  60. #define TX_RING_ALIGN 4096
  61. #define MAX_SKB_TX_LE 4
  62. #define STATUS_RING_SIZE 512 /* 2 ports * (TX + RX) */
  63. #define STATUS_LE_BYTES (STATUS_RING_SIZE*sizeof(struct sky2_status_le))
  64. #define STATUS_RING_ALIGN 4096
  65. #define PHY_RETRIES 1000
  66. #define SKY2_EEPROM_MAGIC 0x9955aabb
  67. #define RING_NEXT(x,s) (((x)+1) & ((s)-1))
  68. static struct pci_device_id sky2_id_table[] = {
  69. PCI_ROM(0x1148, 0x9000, "sk9sxx", "Syskonnect SK-9Sxx", 0),
  70. PCI_ROM(0x1148, 0x9e00, "sk9exx", "Syskonnect SK-9Exx", 0),
  71. PCI_ROM(0x1186, 0x4b00, "dge560t", "D-Link DGE-560T", 0),
  72. PCI_ROM(0x1186, 0x4001, "dge550sx", "D-Link DGE-550SX", 0),
  73. PCI_ROM(0x1186, 0x4b02, "dge560sx", "D-Link DGE-560SX", 0),
  74. PCI_ROM(0x1186, 0x4b03, "dge550t", "D-Link DGE-550T", 0),
  75. PCI_ROM(0x11ab, 0x4340, "m88e8021", "Marvell 88E8021", 0),
  76. PCI_ROM(0x11ab, 0x4341, "m88e8022", "Marvell 88E8022", 0),
  77. PCI_ROM(0x11ab, 0x4342, "m88e8061", "Marvell 88E8061", 0),
  78. PCI_ROM(0x11ab, 0x4343, "m88e8062", "Marvell 88E8062", 0),
  79. PCI_ROM(0x11ab, 0x4344, "m88e8021b", "Marvell 88E8021", 0),
  80. PCI_ROM(0x11ab, 0x4345, "m88e8022b", "Marvell 88E8022", 0),
  81. PCI_ROM(0x11ab, 0x4346, "m88e8061b", "Marvell 88E8061", 0),
  82. PCI_ROM(0x11ab, 0x4347, "m88e8062b", "Marvell 88E8062", 0),
  83. PCI_ROM(0x11ab, 0x4350, "m88e8035", "Marvell 88E8035", 0),
  84. PCI_ROM(0x11ab, 0x4351, "m88e8036", "Marvell 88E8036", 0),
  85. PCI_ROM(0x11ab, 0x4352, "m88e8038", "Marvell 88E8038", 0),
  86. PCI_ROM(0x11ab, 0x4353, "m88e8039", "Marvell 88E8039", 0),
  87. PCI_ROM(0x11ab, 0x4354, "m88e8040", "Marvell 88E8040", 0),
  88. PCI_ROM(0x11ab, 0x4355, "m88e8040t", "Marvell 88E8040T", 0),
  89. PCI_ROM(0x11ab, 0x4356, "m88ec033", "Marvel 88EC033", 0),
  90. PCI_ROM(0x11ab, 0x4357, "m88e8042", "Marvell 88E8042", 0),
  91. PCI_ROM(0x11ab, 0x435a, "m88e8048", "Marvell 88E8048", 0),
  92. PCI_ROM(0x11ab, 0x4360, "m88e8052", "Marvell 88E8052", 0),
  93. PCI_ROM(0x11ab, 0x4361, "m88e8050", "Marvell 88E8050", 0),
  94. PCI_ROM(0x11ab, 0x4362, "m88e8053", "Marvell 88E8053", 0),
  95. PCI_ROM(0x11ab, 0x4363, "m88e8055", "Marvell 88E8055", 0),
  96. PCI_ROM(0x11ab, 0x4364, "m88e8056", "Marvell 88E8056", 0),
  97. PCI_ROM(0x11ab, 0x4365, "m88e8070", "Marvell 88E8070", 0),
  98. PCI_ROM(0x11ab, 0x4366, "m88ec036", "Marvell 88EC036", 0),
  99. PCI_ROM(0x11ab, 0x4367, "m88ec032", "Marvell 88EC032", 0),
  100. PCI_ROM(0x11ab, 0x4368, "m88ec034", "Marvell 88EC034", 0),
  101. PCI_ROM(0x11ab, 0x4369, "m88ec042", "Marvell 88EC042", 0),
  102. PCI_ROM(0x11ab, 0x436a, "m88e8058", "Marvell 88E8058", 0),
  103. PCI_ROM(0x11ab, 0x436b, "m88e8071", "Marvell 88E8071", 0),
  104. PCI_ROM(0x11ab, 0x436c, "m88e8072", "Marvell 88E8072", 0),
  105. PCI_ROM(0x11ab, 0x436d, "m88e8055b", "Marvell 88E8055", 0),
  106. PCI_ROM(0x11ab, 0x4370, "m88e8075", "Marvell 88E8075", 0),
  107. PCI_ROM(0x11ab, 0x4380, "m88e8057", "Marvell 88E8057", 0)
  108. };
  109. /* Avoid conditionals by using array */
  110. static const unsigned txqaddr[] = { Q_XA1, Q_XA2 };
  111. static const unsigned rxqaddr[] = { Q_R1, Q_R2 };
  112. static const u32 portirq_msk[] = { Y2_IS_PORT_1, Y2_IS_PORT_2 };
  113. static void sky2_set_multicast(struct net_device *dev);
  114. /* Access to PHY via serial interconnect */
  115. static int gm_phy_write(struct sky2_hw *hw, unsigned port, u16 reg, u16 val)
  116. {
  117. int i;
  118. gma_write16(hw, port, GM_SMI_DATA, val);
  119. gma_write16(hw, port, GM_SMI_CTRL,
  120. GM_SMI_CT_PHY_AD(PHY_ADDR_MARV) | GM_SMI_CT_REG_AD(reg));
  121. for (i = 0; i < PHY_RETRIES; i++) {
  122. u16 ctrl = gma_read16(hw, port, GM_SMI_CTRL);
  123. if (ctrl == 0xffff)
  124. goto io_error;
  125. if (!(ctrl & GM_SMI_CT_BUSY))
  126. return 0;
  127. udelay(10);
  128. }
  129. DBG(PFX "%s: phy write timeout\n", hw->dev[port]->name);
  130. return -ETIMEDOUT;
  131. io_error:
  132. DBG(PFX "%s: phy I/O error\n", hw->dev[port]->name);
  133. return -EIO;
  134. }
  135. static int __gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg, u16 *val)
  136. {
  137. int i;
  138. gma_write16(hw, port, GM_SMI_CTRL, GM_SMI_CT_PHY_AD(PHY_ADDR_MARV)
  139. | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
  140. for (i = 0; i < PHY_RETRIES; i++) {
  141. u16 ctrl = gma_read16(hw, port, GM_SMI_CTRL);
  142. if (ctrl == 0xffff)
  143. goto io_error;
  144. if (ctrl & GM_SMI_CT_RD_VAL) {
  145. *val = gma_read16(hw, port, GM_SMI_DATA);
  146. return 0;
  147. }
  148. udelay(10);
  149. }
  150. DBG(PFX "%s: phy read timeout\n", hw->dev[port]->name);
  151. return -ETIMEDOUT;
  152. io_error:
  153. DBG(PFX "%s: phy I/O error\n", hw->dev[port]->name);
  154. return -EIO;
  155. }
  156. static inline u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg)
  157. {
  158. u16 v = 0;
  159. __gm_phy_read(hw, port, reg, &v);
  160. return v;
  161. }
  162. static void sky2_power_on(struct sky2_hw *hw)
  163. {
  164. /* switch power to VCC (WA for VAUX problem) */
  165. sky2_write8(hw, B0_POWER_CTRL,
  166. PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
  167. /* disable Core Clock Division, */
  168. sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
  169. if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
  170. /* enable bits are inverted */
  171. sky2_write8(hw, B2_Y2_CLK_GATE,
  172. Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
  173. Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
  174. Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
  175. else
  176. sky2_write8(hw, B2_Y2_CLK_GATE, 0);
  177. if (hw->flags & SKY2_HW_ADV_POWER_CTL) {
  178. u32 reg;
  179. sky2_pci_write32(hw, PCI_DEV_REG3, 0);
  180. reg = sky2_pci_read32(hw, PCI_DEV_REG4);
  181. /* set all bits to 0 except bits 15..12 and 8 */
  182. reg &= P_ASPM_CONTROL_MSK;
  183. sky2_pci_write32(hw, PCI_DEV_REG4, reg);
  184. reg = sky2_pci_read32(hw, PCI_DEV_REG5);
  185. /* set all bits to 0 except bits 28 & 27 */
  186. reg &= P_CTL_TIM_VMAIN_AV_MSK;
  187. sky2_pci_write32(hw, PCI_DEV_REG5, reg);
  188. sky2_pci_write32(hw, PCI_CFG_REG_1, 0);
  189. /* Enable workaround for dev 4.107 on Yukon-Ultra & Extreme */
  190. reg = sky2_read32(hw, B2_GP_IO);
  191. reg |= GLB_GPIO_STAT_RACE_DIS;
  192. sky2_write32(hw, B2_GP_IO, reg);
  193. sky2_read32(hw, B2_GP_IO);
  194. }
  195. }
  196. static void sky2_power_aux(struct sky2_hw *hw)
  197. {
  198. if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
  199. sky2_write8(hw, B2_Y2_CLK_GATE, 0);
  200. else
  201. /* enable bits are inverted */
  202. sky2_write8(hw, B2_Y2_CLK_GATE,
  203. Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
  204. Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
  205. Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
  206. /* switch power to VAUX */
  207. if (sky2_read16(hw, B0_CTST) & Y2_VAUX_AVAIL)
  208. sky2_write8(hw, B0_POWER_CTRL,
  209. (PC_VAUX_ENA | PC_VCC_ENA |
  210. PC_VAUX_ON | PC_VCC_OFF));
  211. }
  212. static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port)
  213. {
  214. u16 reg;
  215. /* disable all GMAC IRQ's */
  216. sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
  217. gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */
  218. gma_write16(hw, port, GM_MC_ADDR_H2, 0);
  219. gma_write16(hw, port, GM_MC_ADDR_H3, 0);
  220. gma_write16(hw, port, GM_MC_ADDR_H4, 0);
  221. reg = gma_read16(hw, port, GM_RX_CTRL);
  222. reg |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA;
  223. gma_write16(hw, port, GM_RX_CTRL, reg);
  224. }
  225. /* flow control to advertise bits */
  226. static const u16 copper_fc_adv[] = {
  227. [FC_NONE] = 0,
  228. [FC_TX] = PHY_M_AN_ASP,
  229. [FC_RX] = PHY_M_AN_PC,
  230. [FC_BOTH] = PHY_M_AN_PC | PHY_M_AN_ASP,
  231. };
  232. /* flow control to advertise bits when using 1000BaseX */
  233. static const u16 fiber_fc_adv[] = {
  234. [FC_NONE] = PHY_M_P_NO_PAUSE_X,
  235. [FC_TX] = PHY_M_P_ASYM_MD_X,
  236. [FC_RX] = PHY_M_P_SYM_MD_X,
  237. [FC_BOTH] = PHY_M_P_BOTH_MD_X,
  238. };
  239. /* flow control to GMA disable bits */
  240. static const u16 gm_fc_disable[] = {
  241. [FC_NONE] = GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS,
  242. [FC_TX] = GM_GPCR_FC_RX_DIS,
  243. [FC_RX] = GM_GPCR_FC_TX_DIS,
  244. [FC_BOTH] = 0,
  245. };
  246. static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
  247. {
  248. struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
  249. u16 ctrl, ct1000, adv, pg, ledctrl, ledover, reg;
  250. if (sky2->autoneg == AUTONEG_ENABLE &&
  251. !(hw->flags & SKY2_HW_NEWER_PHY)) {
  252. u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
  253. ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
  254. PHY_M_EC_MAC_S_MSK);
  255. ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ);
  256. /* on PHY 88E1040 Rev.D0 (and newer) downshift control changed */
  257. if (hw->chip_id == CHIP_ID_YUKON_EC)
  258. /* set downshift counter to 3x and enable downshift */
  259. ectrl |= PHY_M_EC_DSC_2(2) | PHY_M_EC_DOWN_S_ENA;
  260. else
  261. /* set master & slave downshift counter to 1x */
  262. ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1);
  263. gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl);
  264. }
  265. ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
  266. if (sky2_is_copper(hw)) {
  267. if (!(hw->flags & SKY2_HW_GIGABIT)) {
  268. /* enable automatic crossover */
  269. ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO) >> 1;
  270. if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
  271. hw->chip_rev == CHIP_REV_YU_FE2_A0) {
  272. u16 spec;
  273. /* Enable Class A driver for FE+ A0 */
  274. spec = gm_phy_read(hw, port, PHY_MARV_FE_SPEC_2);
  275. spec |= PHY_M_FESC_SEL_CL_A;
  276. gm_phy_write(hw, port, PHY_MARV_FE_SPEC_2, spec);
  277. }
  278. } else {
  279. /* disable energy detect */
  280. ctrl &= ~PHY_M_PC_EN_DET_MSK;
  281. /* enable automatic crossover */
  282. ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO);
  283. /* downshift on PHY 88E1112 and 88E1149 is changed */
  284. if (sky2->autoneg == AUTONEG_ENABLE
  285. && (hw->flags & SKY2_HW_NEWER_PHY)) {
  286. /* set downshift counter to 3x and enable downshift */
  287. ctrl &= ~PHY_M_PC_DSC_MSK;
  288. ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA;
  289. }
  290. }
  291. } else {
  292. /* workaround for deviation #4.88 (CRC errors) */
  293. /* disable Automatic Crossover */
  294. ctrl &= ~PHY_M_PC_MDIX_MSK;
  295. }
  296. gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
  297. /* special setup for PHY 88E1112 Fiber */
  298. if (hw->chip_id == CHIP_ID_YUKON_XL && (hw->flags & SKY2_HW_FIBRE_PHY)) {
  299. pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
  300. /* Fiber: select 1000BASE-X only mode MAC Specific Ctrl Reg. */
  301. gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
  302. ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
  303. ctrl &= ~PHY_M_MAC_MD_MSK;
  304. ctrl |= PHY_M_MAC_MODE_SEL(PHY_M_MAC_MD_1000BX);
  305. gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
  306. if (hw->pmd_type == 'P') {
  307. /* select page 1 to access Fiber registers */
  308. gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 1);
  309. /* for SFP-module set SIGDET polarity to low */
  310. ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
  311. ctrl |= PHY_M_FIB_SIGD_POL;
  312. gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
  313. }
  314. gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
  315. }
  316. ctrl = PHY_CT_RESET;
  317. ct1000 = 0;
  318. adv = PHY_AN_CSMA;
  319. reg = 0;
  320. if (sky2->autoneg == AUTONEG_ENABLE) {
  321. if (sky2_is_copper(hw)) {
  322. if (sky2->advertising & ADVERTISED_1000baseT_Full)
  323. ct1000 |= PHY_M_1000C_AFD;
  324. if (sky2->advertising & ADVERTISED_1000baseT_Half)
  325. ct1000 |= PHY_M_1000C_AHD;
  326. if (sky2->advertising & ADVERTISED_100baseT_Full)
  327. adv |= PHY_M_AN_100_FD;
  328. if (sky2->advertising & ADVERTISED_100baseT_Half)
  329. adv |= PHY_M_AN_100_HD;
  330. if (sky2->advertising & ADVERTISED_10baseT_Full)
  331. adv |= PHY_M_AN_10_FD;
  332. if (sky2->advertising & ADVERTISED_10baseT_Half)
  333. adv |= PHY_M_AN_10_HD;
  334. adv |= copper_fc_adv[sky2->flow_mode];
  335. } else { /* special defines for FIBER (88E1040S only) */
  336. if (sky2->advertising & ADVERTISED_1000baseT_Full)
  337. adv |= PHY_M_AN_1000X_AFD;
  338. if (sky2->advertising & ADVERTISED_1000baseT_Half)
  339. adv |= PHY_M_AN_1000X_AHD;
  340. adv |= fiber_fc_adv[sky2->flow_mode];
  341. }
  342. /* Restart Auto-negotiation */
  343. ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG;
  344. } else {
  345. /* forced speed/duplex settings */
  346. ct1000 = PHY_M_1000C_MSE;
  347. /* Disable auto update for duplex flow control and speed */
  348. reg |= GM_GPCR_AU_ALL_DIS;
  349. switch (sky2->speed) {
  350. case SPEED_1000:
  351. ctrl |= PHY_CT_SP1000;
  352. reg |= GM_GPCR_SPEED_1000;
  353. break;
  354. case SPEED_100:
  355. ctrl |= PHY_CT_SP100;
  356. reg |= GM_GPCR_SPEED_100;
  357. break;
  358. }
  359. if (sky2->duplex == DUPLEX_FULL) {
  360. reg |= GM_GPCR_DUP_FULL;
  361. ctrl |= PHY_CT_DUP_MD;
  362. } else if (sky2->speed < SPEED_1000)
  363. sky2->flow_mode = FC_NONE;
  364. reg |= gm_fc_disable[sky2->flow_mode];
  365. /* Forward pause packets to GMAC? */
  366. if (sky2->flow_mode & FC_RX)
  367. sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
  368. else
  369. sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
  370. }
  371. gma_write16(hw, port, GM_GP_CTRL, reg);
  372. if (hw->flags & SKY2_HW_GIGABIT)
  373. gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000);
  374. gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv);
  375. gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
  376. /* Setup Phy LED's */
  377. ledctrl = PHY_M_LED_PULS_DUR(PULS_170MS);
  378. ledover = 0;
  379. switch (hw->chip_id) {
  380. case CHIP_ID_YUKON_FE:
  381. /* on 88E3082 these bits are at 11..9 (shifted left) */
  382. ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) << 1;
  383. ctrl = gm_phy_read(hw, port, PHY_MARV_FE_LED_PAR);
  384. /* delete ACT LED control bits */
  385. ctrl &= ~PHY_M_FELP_LED1_MSK;
  386. /* change ACT LED control to blink mode */
  387. ctrl |= PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_ACT_BL);
  388. gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl);
  389. break;
  390. case CHIP_ID_YUKON_FE_P:
  391. /* Enable Link Partner Next Page */
  392. ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
  393. ctrl |= PHY_M_PC_ENA_LIP_NP;
  394. /* disable Energy Detect and enable scrambler */
  395. ctrl &= ~(PHY_M_PC_ENA_ENE_DT | PHY_M_PC_DIS_SCRAMB);
  396. gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
  397. /* set LED2 -> ACT, LED1 -> LINK, LED0 -> SPEED */
  398. ctrl = PHY_M_FELP_LED2_CTRL(LED_PAR_CTRL_ACT_BL) |
  399. PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_LINK) |
  400. PHY_M_FELP_LED0_CTRL(LED_PAR_CTRL_SPEED);
  401. gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl);
  402. break;
  403. case CHIP_ID_YUKON_XL:
  404. pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
  405. /* select page 3 to access LED control register */
  406. gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
  407. /* set LED Function Control register */
  408. gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
  409. (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */
  410. PHY_M_LEDC_INIT_CTRL(7) | /* 10 Mbps */
  411. PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */
  412. PHY_M_LEDC_STA0_CTRL(7))); /* 1000 Mbps */
  413. /* set Polarity Control register */
  414. gm_phy_write(hw, port, PHY_MARV_PHY_STAT,
  415. (PHY_M_POLC_LS1_P_MIX(4) |
  416. PHY_M_POLC_IS0_P_MIX(4) |
  417. PHY_M_POLC_LOS_CTRL(2) |
  418. PHY_M_POLC_INIT_CTRL(2) |
  419. PHY_M_POLC_STA1_CTRL(2) |
  420. PHY_M_POLC_STA0_CTRL(2)));
  421. /* restore page register */
  422. gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
  423. break;
  424. case CHIP_ID_YUKON_EC_U:
  425. case CHIP_ID_YUKON_EX:
  426. case CHIP_ID_YUKON_SUPR:
  427. pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
  428. /* select page 3 to access LED control register */
  429. gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
  430. /* set LED Function Control register */
  431. gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
  432. (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */
  433. PHY_M_LEDC_INIT_CTRL(8) | /* 10 Mbps */
  434. PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */
  435. PHY_M_LEDC_STA0_CTRL(7)));/* 1000 Mbps */
  436. /* set Blink Rate in LED Timer Control Register */
  437. gm_phy_write(hw, port, PHY_MARV_INT_MASK,
  438. ledctrl | PHY_M_LED_BLINK_RT(BLINK_84MS));
  439. /* restore page register */
  440. gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
  441. break;
  442. default:
  443. /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */
  444. ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL;
  445. /* turn off the Rx LED (LED_RX) */
  446. ledover |= PHY_M_LED_MO_RX(MO_LED_OFF);
  447. }
  448. if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_UL_2) {
  449. /* apply fixes in PHY AFE */
  450. gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 255);
  451. /* increase differential signal amplitude in 10BASE-T */
  452. gm_phy_write(hw, port, 0x18, 0xaa99);
  453. gm_phy_write(hw, port, 0x17, 0x2011);
  454. if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
  455. /* fix for IEEE A/B Symmetry failure in 1000BASE-T */
  456. gm_phy_write(hw, port, 0x18, 0xa204);
  457. gm_phy_write(hw, port, 0x17, 0x2002);
  458. }
  459. /* set page register to 0 */
  460. gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
  461. } else if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
  462. hw->chip_rev == CHIP_REV_YU_FE2_A0) {
  463. /* apply workaround for integrated resistors calibration */
  464. gm_phy_write(hw, port, PHY_MARV_PAGE_ADDR, 17);
  465. gm_phy_write(hw, port, PHY_MARV_PAGE_DATA, 0x3f60);
  466. } else if (hw->chip_id != CHIP_ID_YUKON_EX &&
  467. hw->chip_id < CHIP_ID_YUKON_SUPR) {
  468. /* no effect on Yukon-XL */
  469. gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
  470. if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) {
  471. /* turn on 100 Mbps LED (LED_LINK100) */
  472. ledover |= PHY_M_LED_MO_100(MO_LED_ON);
  473. }
  474. if (ledover)
  475. gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
  476. }
  477. /* Enable phy interrupt on auto-negotiation complete (or link up) */
  478. if (sky2->autoneg == AUTONEG_ENABLE)
  479. gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL);
  480. else
  481. gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
  482. }
  483. static const u32 phy_power[] = { PCI_Y2_PHY1_POWD, PCI_Y2_PHY2_POWD };
  484. static const u32 coma_mode[] = { PCI_Y2_PHY1_COMA, PCI_Y2_PHY2_COMA };
  485. static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port)
  486. {
  487. u32 reg1;
  488. sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
  489. reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
  490. reg1 &= ~phy_power[port];
  491. if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
  492. reg1 |= coma_mode[port];
  493. sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
  494. sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
  495. sky2_pci_read32(hw, PCI_DEV_REG1);
  496. if (hw->chip_id == CHIP_ID_YUKON_FE)
  497. gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_ANE);
  498. else if (hw->flags & SKY2_HW_ADV_POWER_CTL)
  499. sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
  500. }
  501. static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port)
  502. {
  503. u32 reg1;
  504. u16 ctrl;
  505. /* release GPHY Control reset */
  506. sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
  507. /* release GMAC reset */
  508. sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
  509. if (hw->flags & SKY2_HW_NEWER_PHY) {
  510. /* select page 2 to access MAC control register */
  511. gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
  512. ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
  513. /* allow GMII Power Down */
  514. ctrl &= ~PHY_M_MAC_GMIF_PUP;
  515. gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
  516. /* set page register back to 0 */
  517. gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
  518. }
  519. /* setup General Purpose Control Register */
  520. gma_write16(hw, port, GM_GP_CTRL,
  521. GM_GPCR_FL_PASS | GM_GPCR_SPEED_100 | GM_GPCR_AU_ALL_DIS);
  522. if (hw->chip_id != CHIP_ID_YUKON_EC) {
  523. if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
  524. /* select page 2 to access MAC control register */
  525. gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
  526. ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
  527. /* enable Power Down */
  528. ctrl |= PHY_M_PC_POW_D_ENA;
  529. gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
  530. /* set page register back to 0 */
  531. gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
  532. }
  533. /* set IEEE compatible Power Down Mode (dev. #4.99) */
  534. gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_PDOWN);
  535. }
  536. sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
  537. reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
  538. reg1 |= phy_power[port]; /* set PHY to PowerDown/COMA Mode */
  539. sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
  540. sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
  541. }
  542. static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port)
  543. {
  544. if ( (hw->chip_id == CHIP_ID_YUKON_EX &&
  545. hw->chip_rev != CHIP_REV_YU_EX_A0) ||
  546. hw->chip_id == CHIP_ID_YUKON_FE_P ||
  547. hw->chip_id == CHIP_ID_YUKON_SUPR) {
  548. /* disable jumbo frames on devices that support them */
  549. sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
  550. TX_JUMBO_DIS | TX_STFW_ENA);
  551. } else {
  552. sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA);
  553. }
  554. }
  555. static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
  556. {
  557. u16 reg;
  558. u32 rx_reg;
  559. int i;
  560. const u8 *addr = hw->dev[port]->ll_addr;
  561. sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
  562. sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
  563. sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
  564. if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0 && port == 1) {
  565. /* WA DEV_472 -- looks like crossed wires on port 2 */
  566. /* clear GMAC 1 Control reset */
  567. sky2_write8(hw, SK_REG(0, GMAC_CTRL), GMC_RST_CLR);
  568. do {
  569. sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_SET);
  570. sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_CLR);
  571. } while (gm_phy_read(hw, 1, PHY_MARV_ID0) != PHY_MARV_ID0_VAL ||
  572. gm_phy_read(hw, 1, PHY_MARV_ID1) != PHY_MARV_ID1_Y2 ||
  573. gm_phy_read(hw, 1, PHY_MARV_INT_MASK) != 0);
  574. }
  575. sky2_read16(hw, SK_REG(port, GMAC_IRQ_SRC));
  576. /* Enable Transmit FIFO Underrun */
  577. sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK);
  578. sky2_phy_power_up(hw, port);
  579. sky2_phy_init(hw, port);
  580. /* MIB clear */
  581. reg = gma_read16(hw, port, GM_PHY_ADDR);
  582. gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR);
  583. for (i = GM_MIB_CNT_BASE; i <= GM_MIB_CNT_END; i += 4)
  584. gma_read16(hw, port, i);
  585. gma_write16(hw, port, GM_PHY_ADDR, reg);
  586. /* transmit control */
  587. gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
  588. /* receive control reg: unicast + multicast + no FCS */
  589. gma_write16(hw, port, GM_RX_CTRL,
  590. GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA);
  591. /* transmit flow control */
  592. gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff);
  593. /* transmit parameter */
  594. gma_write16(hw, port, GM_TX_PARAM,
  595. TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) |
  596. TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
  597. TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) |
  598. TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
  599. /* serial mode register */
  600. reg = DATA_BLIND_VAL(DATA_BLIND_DEF) |
  601. GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
  602. gma_write16(hw, port, GM_SERIAL_MODE, reg);
  603. /* virtual address for data */
  604. gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr);
  605. /* physical address: used for pause frames */
  606. gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr);
  607. /* ignore counter overflows */
  608. gma_write16(hw, port, GM_TX_IRQ_MSK, 0);
  609. gma_write16(hw, port, GM_RX_IRQ_MSK, 0);
  610. gma_write16(hw, port, GM_TR_IRQ_MSK, 0);
  611. /* Configure Rx MAC FIFO */
  612. sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
  613. rx_reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
  614. if (hw->chip_id == CHIP_ID_YUKON_EX ||
  615. hw->chip_id == CHIP_ID_YUKON_FE_P)
  616. rx_reg |= GMF_RX_OVER_ON;
  617. sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), rx_reg);
  618. if (hw->chip_id == CHIP_ID_YUKON_XL) {
  619. /* Hardware errata - clear flush mask */
  620. sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), 0);
  621. } else {
  622. /* Flush Rx MAC FIFO on any flow control or error */
  623. sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR);
  624. }
  625. /* Set threshold to 0xa (64 bytes) + 1 to workaround pause bug */
  626. reg = RX_GMF_FL_THR_DEF + 1;
  627. /* Another magic mystery workaround from sk98lin */
  628. if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
  629. hw->chip_rev == CHIP_REV_YU_FE2_A0)
  630. reg = 0x178;
  631. sky2_write16(hw, SK_REG(port, RX_GMF_FL_THR), reg);
  632. /* Configure Tx MAC FIFO */
  633. sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
  634. sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
  635. /* On chips without ram buffer, pause is controled by MAC level */
  636. if (!(hw->flags & SKY2_HW_RAM_BUFFER)) {
  637. sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8);
  638. sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8);
  639. sky2_set_tx_stfwd(hw, port);
  640. }
  641. if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
  642. hw->chip_rev == CHIP_REV_YU_FE2_A0) {
  643. /* disable dynamic watermark */
  644. reg = sky2_read16(hw, SK_REG(port, TX_GMF_EA));
  645. reg &= ~TX_DYN_WM_ENA;
  646. sky2_write16(hw, SK_REG(port, TX_GMF_EA), reg);
  647. }
  648. }
  649. /* Assign Ram Buffer allocation to queue */
  650. static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space)
  651. {
  652. u32 end;
  653. /* convert from K bytes to qwords used for hw register */
  654. start *= 1024/8;
  655. space *= 1024/8;
  656. end = start + space - 1;
  657. sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
  658. sky2_write32(hw, RB_ADDR(q, RB_START), start);
  659. sky2_write32(hw, RB_ADDR(q, RB_END), end);
  660. sky2_write32(hw, RB_ADDR(q, RB_WP), start);
  661. sky2_write32(hw, RB_ADDR(q, RB_RP), start);
  662. if (q == Q_R1 || q == Q_R2) {
  663. u32 tp = space - space/4;
  664. /* On receive queue's set the thresholds
  665. * give receiver priority when > 3/4 full
  666. * send pause when down to 2K
  667. */
  668. sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp);
  669. sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2);
  670. tp = space - 2048/8;
  671. sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp);
  672. sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4);
  673. } else {
  674. /* Enable store & forward on Tx queue's because
  675. * Tx FIFO is only 1K on Yukon
  676. */
  677. sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD);
  678. }
  679. sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD);
  680. sky2_read8(hw, RB_ADDR(q, RB_CTRL));
  681. }
  682. /* Setup Bus Memory Interface */
  683. static void sky2_qset(struct sky2_hw *hw, u16 q)
  684. {
  685. sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_RESET);
  686. sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_OPER_INIT);
  687. sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_FIFO_OP_ON);
  688. sky2_write32(hw, Q_ADDR(q, Q_WM), BMU_WM_DEFAULT);
  689. }
  690. /* Setup prefetch unit registers. This is the interface between
  691. * hardware and driver list elements
  692. */
  693. static void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr,
  694. u64 addr, u32 last)
  695. {
  696. sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
  697. sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_CLR);
  698. sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_HI), addr >> 32);
  699. sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_LO), (u32) addr);
  700. sky2_write16(hw, Y2_QADDR(qaddr, PREF_UNIT_LAST_IDX), last);
  701. sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_OP_ON);
  702. sky2_read32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL));
  703. }
  704. static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2)
  705. {
  706. struct sky2_tx_le *le = sky2->tx_le + sky2->tx_prod;
  707. sky2->tx_prod = RING_NEXT(sky2->tx_prod, TX_RING_SIZE);
  708. le->ctrl = 0;
  709. return le;
  710. }
  711. static void tx_init(struct sky2_port *sky2)
  712. {
  713. struct sky2_tx_le *le;
  714. sky2->tx_prod = sky2->tx_cons = 0;
  715. le = get_tx_le(sky2);
  716. le->addr = 0;
  717. le->opcode = OP_ADDR64 | HW_OWNER;
  718. }
  719. static inline struct tx_ring_info *tx_le_re(struct sky2_port *sky2,
  720. struct sky2_tx_le *le)
  721. {
  722. return sky2->tx_ring + (le - sky2->tx_le);
  723. }
  724. /* Update chip's next pointer */
  725. static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx)
  726. {
  727. /* Make sure write' to descriptors are complete before we tell hardware */
  728. wmb();
  729. sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
  730. DBGIO(PFX "queue %#x idx <- %d\n", q, idx);
  731. }
  732. static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2)
  733. {
  734. struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put;
  735. sky2->rx_put = RING_NEXT(sky2->rx_put, RX_LE_SIZE);
  736. le->ctrl = 0;
  737. return le;
  738. }
  739. /* Build description to hardware for one receive segment */
  740. static void sky2_rx_add(struct sky2_port *sky2, u8 op,
  741. u32 map, unsigned len)
  742. {
  743. struct sky2_rx_le *le;
  744. le = sky2_next_rx(sky2);
  745. le->addr = cpu_to_le32(map);
  746. le->length = cpu_to_le16(len);
  747. le->opcode = op | HW_OWNER;
  748. }
  749. /* Build description to hardware for one possibly fragmented skb */
  750. static void sky2_rx_submit(struct sky2_port *sky2,
  751. const struct rx_ring_info *re)
  752. {
  753. sky2_rx_add(sky2, OP_PACKET, re->data_addr, sky2->rx_data_size);
  754. }
  755. static void sky2_rx_map_iob(struct pci_device *pdev __unused,
  756. struct rx_ring_info *re,
  757. unsigned size __unused)
  758. {
  759. struct io_buffer *iob = re->iob;
  760. re->data_addr = virt_to_bus(iob->data);
  761. }
  762. /* Diable the checksum offloading.
  763. */
  764. static void rx_set_checksum(struct sky2_port *sky2)
  765. {
  766. struct sky2_rx_le *le = sky2_next_rx(sky2);
  767. le->addr = cpu_to_le32((ETH_HLEN << 16) | ETH_HLEN);
  768. le->ctrl = 0;
  769. le->opcode = OP_TCPSTART | HW_OWNER;
  770. sky2_write32(sky2->hw,
  771. Q_ADDR(rxqaddr[sky2->port], Q_CSR),
  772. BMU_DIS_RX_CHKSUM);
  773. }
  774. /*
  775. * The RX Stop command will not work for Yukon-2 if the BMU does not
  776. * reach the end of packet and since we can't make sure that we have
  777. * incoming data, we must reset the BMU while it is not doing a DMA
  778. * transfer. Since it is possible that the RX path is still active,
  779. * the RX RAM buffer will be stopped first, so any possible incoming
  780. * data will not trigger a DMA. After the RAM buffer is stopped, the
  781. * BMU is polled until any DMA in progress is ended and only then it
  782. * will be reset.
  783. */
  784. static void sky2_rx_stop(struct sky2_port *sky2)
  785. {
  786. struct sky2_hw *hw = sky2->hw;
  787. unsigned rxq = rxqaddr[sky2->port];
  788. int i;
  789. /* disable the RAM Buffer receive queue */
  790. sky2_write8(hw, RB_ADDR(rxq, RB_CTRL), RB_DIS_OP_MD);
  791. for (i = 0; i < 0xffff; i++)
  792. if (sky2_read8(hw, RB_ADDR(rxq, Q_RSL))
  793. == sky2_read8(hw, RB_ADDR(rxq, Q_RL)))
  794. goto stopped;
  795. DBG(PFX "%s: receiver stop failed\n", sky2->netdev->name);
  796. stopped:
  797. sky2_write32(hw, Q_ADDR(rxq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST);
  798. /* reset the Rx prefetch unit */
  799. sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
  800. wmb();
  801. }
  802. /* Clean out receive buffer area, assumes receiver hardware stopped */
  803. static void sky2_rx_clean(struct sky2_port *sky2)
  804. {
  805. unsigned i;
  806. memset(sky2->rx_le, 0, RX_LE_BYTES);
  807. for (i = 0; i < RX_PENDING; i++) {
  808. struct rx_ring_info *re = sky2->rx_ring + i;
  809. if (re->iob) {
  810. free_iob(re->iob);
  811. re->iob = NULL;
  812. }
  813. }
  814. }
  815. /*
  816. * Allocate an iob for receiving.
  817. */
  818. static struct io_buffer *sky2_rx_alloc(struct sky2_port *sky2)
  819. {
  820. struct io_buffer *iob;
  821. iob = alloc_iob(sky2->rx_data_size + ETH_DATA_ALIGN);
  822. if (!iob)
  823. return NULL;
  824. /*
  825. * Cards with a RAM buffer hang in the rx FIFO if the
  826. * receive buffer isn't aligned to (Linux module comments say
  827. * 64 bytes, Linux module code says 8 bytes). Since io_buffers
  828. * are always 2kb-aligned under iPXE, just leave it be
  829. * without ETH_DATA_ALIGN in those cases.
  830. *
  831. * XXX This causes unaligned access to the IP header,
  832. * which is undesirable, but it's less undesirable than the
  833. * card hanging.
  834. */
  835. if (!(sky2->hw->flags & SKY2_HW_RAM_BUFFER)) {
  836. iob_reserve(iob, ETH_DATA_ALIGN);
  837. }
  838. return iob;
  839. }
  840. static inline void sky2_rx_update(struct sky2_port *sky2, unsigned rxq)
  841. {
  842. sky2_put_idx(sky2->hw, rxq, sky2->rx_put);
  843. }
  844. /*
  845. * Allocate and setup receiver buffer pool.
  846. * Normal case this ends up creating one list element for skb
  847. * in the receive ring. One element is used for checksum
  848. * enable/disable, and one extra to avoid wrap.
  849. */
  850. static int sky2_rx_start(struct sky2_port *sky2)
  851. {
  852. struct sky2_hw *hw = sky2->hw;
  853. struct rx_ring_info *re;
  854. unsigned rxq = rxqaddr[sky2->port];
  855. unsigned i, size, thresh;
  856. sky2->rx_put = sky2->rx_next = 0;
  857. sky2_qset(hw, rxq);
  858. /* On PCI express lowering the watermark gives better performance */
  859. if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP))
  860. sky2_write32(hw, Q_ADDR(rxq, Q_WM), BMU_WM_PEX);
  861. /* These chips have no ram buffer?
  862. * MAC Rx RAM Read is controlled by hardware */
  863. if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
  864. (hw->chip_rev == CHIP_REV_YU_EC_U_A1
  865. || hw->chip_rev == CHIP_REV_YU_EC_U_B0))
  866. sky2_write32(hw, Q_ADDR(rxq, Q_TEST), F_M_RX_RAM_DIS);
  867. sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1);
  868. if (!(hw->flags & SKY2_HW_NEW_LE))
  869. rx_set_checksum(sky2);
  870. /* Space needed for frame data + headers rounded up */
  871. size = (ETH_FRAME_LEN + 8) & ~7;
  872. /* Stopping point for hardware truncation */
  873. thresh = (size - 8) / sizeof(u32);
  874. sky2->rx_data_size = size;
  875. /* Fill Rx ring */
  876. for (i = 0; i < RX_PENDING; i++) {
  877. re = sky2->rx_ring + i;
  878. re->iob = sky2_rx_alloc(sky2);
  879. if (!re->iob)
  880. goto nomem;
  881. sky2_rx_map_iob(hw->pdev, re, sky2->rx_data_size);
  882. sky2_rx_submit(sky2, re);
  883. }
  884. /*
  885. * The receiver hangs if it receives frames larger than the
  886. * packet buffer. As a workaround, truncate oversize frames, but
  887. * the register is limited to 9 bits, so if you do frames > 2052
  888. * you better get the MTU right!
  889. */
  890. if (thresh > 0x1ff)
  891. sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_OFF);
  892. else {
  893. sky2_write16(hw, SK_REG(sky2->port, RX_GMF_TR_THR), thresh);
  894. sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_ON);
  895. }
  896. /* Tell chip about available buffers */
  897. sky2_rx_update(sky2, rxq);
  898. return 0;
  899. nomem:
  900. sky2_rx_clean(sky2);
  901. return -ENOMEM;
  902. }
  903. /* Free the le and ring buffers */
  904. static void sky2_free_rings(struct sky2_port *sky2)
  905. {
  906. free_dma(sky2->rx_le, RX_LE_BYTES);
  907. free(sky2->rx_ring);
  908. free_dma(sky2->tx_le, TX_RING_SIZE * sizeof(struct sky2_tx_le));
  909. free(sky2->tx_ring);
  910. sky2->tx_le = NULL;
  911. sky2->rx_le = NULL;
  912. sky2->rx_ring = NULL;
  913. sky2->tx_ring = NULL;
  914. }
  915. /* Bring up network interface. */
  916. static int sky2_up(struct net_device *dev)
  917. {
  918. struct sky2_port *sky2 = netdev_priv(dev);
  919. struct sky2_hw *hw = sky2->hw;
  920. unsigned port = sky2->port;
  921. u32 imask, ramsize;
  922. int err = -ENOMEM;
  923. netdev_link_down(dev);
  924. /* must be power of 2 */
  925. sky2->tx_le = malloc_dma(TX_RING_SIZE * sizeof(struct sky2_tx_le), TX_RING_ALIGN);
  926. sky2->tx_le_map = virt_to_bus(sky2->tx_le);
  927. if (!sky2->tx_le)
  928. goto err_out;
  929. memset(sky2->tx_le, 0, TX_RING_SIZE * sizeof(struct sky2_tx_le));
  930. sky2->tx_ring = zalloc(TX_RING_SIZE * sizeof(struct tx_ring_info));
  931. if (!sky2->tx_ring)
  932. goto err_out;
  933. tx_init(sky2);
  934. sky2->rx_le = malloc_dma(RX_LE_BYTES, RX_RING_ALIGN);
  935. sky2->rx_le_map = virt_to_bus(sky2->rx_le);
  936. if (!sky2->rx_le)
  937. goto err_out;
  938. memset(sky2->rx_le, 0, RX_LE_BYTES);
  939. sky2->rx_ring = zalloc(RX_PENDING * sizeof(struct rx_ring_info));
  940. if (!sky2->rx_ring)
  941. goto err_out;
  942. sky2_mac_init(hw, port);
  943. /* Register is number of 4K blocks on internal RAM buffer. */
  944. ramsize = sky2_read8(hw, B2_E_0) * 4;
  945. if (ramsize > 0) {
  946. u32 rxspace;
  947. hw->flags |= SKY2_HW_RAM_BUFFER;
  948. DBG2(PFX "%s: ram buffer %dK\n", dev->name, ramsize);
  949. if (ramsize < 16)
  950. rxspace = ramsize / 2;
  951. else
  952. rxspace = 8 + (2*(ramsize - 16))/3;
  953. sky2_ramset(hw, rxqaddr[port], 0, rxspace);
  954. sky2_ramset(hw, txqaddr[port], rxspace, ramsize - rxspace);
  955. /* Make sure SyncQ is disabled */
  956. sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL),
  957. RB_RST_SET);
  958. }
  959. sky2_qset(hw, txqaddr[port]);
  960. /* This is copied from sk98lin 10.0.5.3; no one tells me about erratta's */
  961. if (hw->chip_id == CHIP_ID_YUKON_EX && hw->chip_rev == CHIP_REV_YU_EX_B0)
  962. sky2_write32(hw, Q_ADDR(txqaddr[port], Q_TEST), F_TX_CHK_AUTO_OFF);
  963. /* Set almost empty threshold */
  964. if (hw->chip_id == CHIP_ID_YUKON_EC_U
  965. && hw->chip_rev == CHIP_REV_YU_EC_U_A0)
  966. sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), ECU_TXFF_LEV);
  967. sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
  968. TX_RING_SIZE - 1);
  969. err = sky2_rx_start(sky2);
  970. if (err)
  971. goto err_out;
  972. /* Enable interrupts from phy/mac for port */
  973. imask = sky2_read32(hw, B0_IMSK);
  974. imask |= portirq_msk[port];
  975. sky2_write32(hw, B0_IMSK, imask);
  976. DBGIO(PFX "%s: le bases: st %p [%x], rx %p [%x], tx %p [%x]\n",
  977. dev->name, hw->st_le, hw->st_dma, sky2->rx_le, sky2->rx_le_map,
  978. sky2->tx_le, sky2->tx_le_map);
  979. sky2_set_multicast(dev);
  980. return 0;
  981. err_out:
  982. sky2_free_rings(sky2);
  983. return err;
  984. }
  985. /* Modular subtraction in ring */
  986. static inline int tx_dist(unsigned tail, unsigned head)
  987. {
  988. return (head - tail) & (TX_RING_SIZE - 1);
  989. }
  990. /* Number of list elements available for next tx */
  991. static inline int tx_avail(const struct sky2_port *sky2)
  992. {
  993. return TX_PENDING - tx_dist(sky2->tx_cons, sky2->tx_prod);
  994. }
  995. /*
  996. * Put one packet in ring for transmit.
  997. * A single packet can generate multiple list elements, and
  998. * the number of ring elements will probably be less than the number
  999. * of list elements used.
  1000. */
  1001. static int sky2_xmit_frame(struct net_device *dev, struct io_buffer *iob)
  1002. {
  1003. struct sky2_port *sky2 = netdev_priv(dev);
  1004. struct sky2_hw *hw = sky2->hw;
  1005. struct sky2_tx_le *le = NULL;
  1006. struct tx_ring_info *re;
  1007. unsigned len;
  1008. u32 mapping;
  1009. u8 ctrl;
  1010. if (tx_avail(sky2) < 1)
  1011. return -EBUSY;
  1012. len = iob_len(iob);
  1013. mapping = virt_to_bus(iob->data);
  1014. DBGIO(PFX "%s: tx queued, slot %d, len %d\n", dev->name,
  1015. sky2->tx_prod, len);
  1016. ctrl = 0;
  1017. le = get_tx_le(sky2);
  1018. le->addr = cpu_to_le32((u32) mapping);
  1019. le->length = cpu_to_le16(len);
  1020. le->ctrl = ctrl;
  1021. le->opcode = (OP_PACKET | HW_OWNER);
  1022. re = tx_le_re(sky2, le);
  1023. re->iob = iob;
  1024. le->ctrl |= EOP;
  1025. sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod);
  1026. return 0;
  1027. }
  1028. /*
  1029. * Free ring elements from starting at tx_cons until "done"
  1030. *
  1031. * NB: the hardware will tell us about partial completion of multi-part
  1032. * buffers so make sure not to free iob too early.
  1033. */
  1034. static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
  1035. {
  1036. struct net_device *dev = sky2->netdev;
  1037. unsigned idx;
  1038. assert(done < TX_RING_SIZE);
  1039. for (idx = sky2->tx_cons; idx != done;
  1040. idx = RING_NEXT(idx, TX_RING_SIZE)) {
  1041. struct sky2_tx_le *le = sky2->tx_le + idx;
  1042. struct tx_ring_info *re = sky2->tx_ring + idx;
  1043. if (le->ctrl & EOP) {
  1044. DBGIO(PFX "%s: tx done %d\n", dev->name, idx);
  1045. netdev_tx_complete(dev, re->iob);
  1046. }
  1047. }
  1048. sky2->tx_cons = idx;
  1049. mb();
  1050. }
  1051. /* Cleanup all untransmitted buffers, assume transmitter not running */
  1052. static void sky2_tx_clean(struct net_device *dev)
  1053. {
  1054. struct sky2_port *sky2 = netdev_priv(dev);
  1055. sky2_tx_complete(sky2, sky2->tx_prod);
  1056. }
  1057. /* Network shutdown */
  1058. static void sky2_down(struct net_device *dev)
  1059. {
  1060. struct sky2_port *sky2 = netdev_priv(dev);
  1061. struct sky2_hw *hw = sky2->hw;
  1062. unsigned port = sky2->port;
  1063. u16 ctrl;
  1064. u32 imask;
  1065. /* Never really got started! */
  1066. if (!sky2->tx_le)
  1067. return;
  1068. DBG2(PFX "%s: disabling interface\n", dev->name);
  1069. /* Disable port IRQ */
  1070. imask = sky2_read32(hw, B0_IMSK);
  1071. imask &= ~portirq_msk[port];
  1072. sky2_write32(hw, B0_IMSK, imask);
  1073. sky2_gmac_reset(hw, port);
  1074. /* Stop transmitter */
  1075. sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_STOP);
  1076. sky2_read32(hw, Q_ADDR(txqaddr[port], Q_CSR));
  1077. sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
  1078. RB_RST_SET | RB_DIS_OP_MD);
  1079. ctrl = gma_read16(hw, port, GM_GP_CTRL);
  1080. ctrl &= ~(GM_GPCR_TX_ENA | GM_GPCR_RX_ENA);
  1081. gma_write16(hw, port, GM_GP_CTRL, ctrl);
  1082. sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
  1083. /* Workaround shared GMAC reset */
  1084. if (!(hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0
  1085. && port == 0 && hw->dev[1]))
  1086. sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
  1087. /* Disable Force Sync bit and Enable Alloc bit */
  1088. sky2_write8(hw, SK_REG(port, TXA_CTRL),
  1089. TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
  1090. /* Stop Interval Timer and Limit Counter of Tx Arbiter */
  1091. sky2_write32(hw, SK_REG(port, TXA_ITI_INI), 0L);
  1092. sky2_write32(hw, SK_REG(port, TXA_LIM_INI), 0L);
  1093. /* Reset the PCI FIFO of the async Tx queue */
  1094. sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR),
  1095. BMU_RST_SET | BMU_FIFO_RST);
  1096. /* Reset the Tx prefetch units */
  1097. sky2_write32(hw, Y2_QADDR(txqaddr[port], PREF_UNIT_CTRL),
  1098. PREF_UNIT_RST_SET);
  1099. sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET);
  1100. sky2_rx_stop(sky2);
  1101. sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
  1102. sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
  1103. sky2_phy_power_down(hw, port);
  1104. /* turn off LED's */
  1105. sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
  1106. sky2_tx_clean(dev);
  1107. sky2_rx_clean(sky2);
  1108. sky2_free_rings(sky2);
  1109. return;
  1110. }
  1111. static u16 sky2_phy_speed(const struct sky2_hw *hw, u16 aux)
  1112. {
  1113. if (hw->flags & SKY2_HW_FIBRE_PHY)
  1114. return SPEED_1000;
  1115. if (!(hw->flags & SKY2_HW_GIGABIT)) {
  1116. if (aux & PHY_M_PS_SPEED_100)
  1117. return SPEED_100;
  1118. else
  1119. return SPEED_10;
  1120. }
  1121. switch (aux & PHY_M_PS_SPEED_MSK) {
  1122. case PHY_M_PS_SPEED_1000:
  1123. return SPEED_1000;
  1124. case PHY_M_PS_SPEED_100:
  1125. return SPEED_100;
  1126. default:
  1127. return SPEED_10;
  1128. }
  1129. }
  1130. static void sky2_link_up(struct sky2_port *sky2)
  1131. {
  1132. struct sky2_hw *hw = sky2->hw;
  1133. unsigned port = sky2->port;
  1134. u16 reg;
  1135. static const char *fc_name[] = {
  1136. [FC_NONE] = "none",
  1137. [FC_TX] = "tx",
  1138. [FC_RX] = "rx",
  1139. [FC_BOTH] = "both",
  1140. };
  1141. /* enable Rx/Tx */
  1142. reg = gma_read16(hw, port, GM_GP_CTRL);
  1143. reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
  1144. gma_write16(hw, port, GM_GP_CTRL, reg);
  1145. gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
  1146. netdev_link_up(sky2->netdev);
  1147. /* Turn on link LED */
  1148. sky2_write8(hw, SK_REG(port, LNK_LED_REG),
  1149. LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF);
  1150. DBG(PFX "%s: Link is up at %d Mbps, %s duplex, flow control %s\n",
  1151. sky2->netdev->name, sky2->speed,
  1152. sky2->duplex == DUPLEX_FULL ? "full" : "half",
  1153. fc_name[sky2->flow_status]);
  1154. }
  1155. static void sky2_link_down(struct sky2_port *sky2)
  1156. {
  1157. struct sky2_hw *hw = sky2->hw;
  1158. unsigned port = sky2->port;
  1159. u16 reg;
  1160. gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);
  1161. reg = gma_read16(hw, port, GM_GP_CTRL);
  1162. reg &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
  1163. gma_write16(hw, port, GM_GP_CTRL, reg);
  1164. netdev_link_down(sky2->netdev);
  1165. /* Turn on link LED */
  1166. sky2_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF);
  1167. DBG(PFX "%s: Link is down.\n", sky2->netdev->name);
  1168. sky2_phy_init(hw, port);
  1169. }
  1170. static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux)
  1171. {
  1172. struct sky2_hw *hw = sky2->hw;
  1173. unsigned port = sky2->port;
  1174. u16 advert, lpa;
  1175. advert = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV);
  1176. lpa = gm_phy_read(hw, port, PHY_MARV_AUNE_LP);
  1177. if (lpa & PHY_M_AN_RF) {
  1178. DBG(PFX "%s: remote fault\n", sky2->netdev->name);
  1179. return -1;
  1180. }
  1181. if (!(aux & PHY_M_PS_SPDUP_RES)) {
  1182. DBG(PFX "%s: speed/duplex mismatch\n", sky2->netdev->name);
  1183. return -1;
  1184. }
  1185. sky2->speed = sky2_phy_speed(hw, aux);
  1186. sky2->duplex = (aux & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
  1187. /* Since the pause result bits seem to in different positions on
  1188. * different chips. look at registers.
  1189. */
  1190. sky2->flow_status = FC_NONE;
  1191. if (advert & ADVERTISE_PAUSE_CAP) {
  1192. if (lpa & LPA_PAUSE_CAP)
  1193. sky2->flow_status = FC_BOTH;
  1194. else if (advert & ADVERTISE_PAUSE_ASYM)
  1195. sky2->flow_status = FC_RX;
  1196. } else if (advert & ADVERTISE_PAUSE_ASYM) {
  1197. if ((lpa & LPA_PAUSE_CAP) && (lpa & LPA_PAUSE_ASYM))
  1198. sky2->flow_status = FC_TX;
  1199. }
  1200. if (sky2->duplex == DUPLEX_HALF && sky2->speed < SPEED_1000
  1201. && !(hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX))
  1202. sky2->flow_status = FC_NONE;
  1203. if (sky2->flow_status & FC_TX)
  1204. sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
  1205. else
  1206. sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
  1207. return 0;
  1208. }
  1209. /* Interrupt from PHY */
  1210. static void sky2_phy_intr(struct sky2_hw *hw, unsigned port)
  1211. {
  1212. struct net_device *dev = hw->dev[port];
  1213. struct sky2_port *sky2 = netdev_priv(dev);
  1214. u16 istatus, phystat;
  1215. istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
  1216. phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
  1217. DBGIO(PFX "%s: phy interrupt status 0x%x 0x%x\n",
  1218. sky2->netdev->name, istatus, phystat);
  1219. if (sky2->autoneg == AUTONEG_ENABLE && (istatus & PHY_M_IS_AN_COMPL)) {
  1220. if (sky2_autoneg_done(sky2, phystat) == 0)
  1221. sky2_link_up(sky2);
  1222. return;
  1223. }
  1224. if (istatus & PHY_M_IS_LSP_CHANGE)
  1225. sky2->speed = sky2_phy_speed(hw, phystat);
  1226. if (istatus & PHY_M_IS_DUP_CHANGE)
  1227. sky2->duplex =
  1228. (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
  1229. if (istatus & PHY_M_IS_LST_CHANGE) {
  1230. if (phystat & PHY_M_PS_LINK_UP)
  1231. sky2_link_up(sky2);
  1232. else
  1233. sky2_link_down(sky2);
  1234. }
  1235. }
  1236. /* Normal packet - take iob from ring element and put in a new one */
  1237. static struct io_buffer *receive_new(struct sky2_port *sky2,
  1238. struct rx_ring_info *re,
  1239. unsigned int length)
  1240. {
  1241. struct io_buffer *iob, *niob;
  1242. unsigned hdr_space = sky2->rx_data_size;
  1243. /* Don't be tricky about reusing pages (yet) */
  1244. niob = sky2_rx_alloc(sky2);
  1245. if (!niob)
  1246. return NULL;
  1247. iob = re->iob;
  1248. re->iob = niob;
  1249. sky2_rx_map_iob(sky2->hw->pdev, re, hdr_space);
  1250. iob_put(iob, length);
  1251. return iob;
  1252. }
  1253. /*
  1254. * Receive one packet.
  1255. * For larger packets, get new buffer.
  1256. */
  1257. static struct io_buffer *sky2_receive(struct net_device *dev,
  1258. u16 length, u32 status)
  1259. {
  1260. struct sky2_port *sky2 = netdev_priv(dev);
  1261. struct rx_ring_info *re = sky2->rx_ring + sky2->rx_next;
  1262. struct io_buffer *iob = NULL;
  1263. u16 count = (status & GMR_FS_LEN) >> 16;
  1264. DBGIO(PFX "%s: rx slot %d status 0x%x len %d\n",
  1265. dev->name, sky2->rx_next, status, length);
  1266. sky2->rx_next = (sky2->rx_next + 1) % RX_PENDING;
  1267. /* This chip has hardware problems that generates bogus status.
  1268. * So do only marginal checking and expect higher level protocols
  1269. * to handle crap frames.
  1270. */
  1271. if (sky2->hw->chip_id == CHIP_ID_YUKON_FE_P &&
  1272. sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0 &&
  1273. length == count)
  1274. goto okay;
  1275. if (status & GMR_FS_ANY_ERR)
  1276. goto error;
  1277. if (!(status & GMR_FS_RX_OK))
  1278. goto resubmit;
  1279. /* if length reported by DMA does not match PHY, packet was truncated */
  1280. if (length != count)
  1281. goto len_error;
  1282. okay:
  1283. iob = receive_new(sky2, re, length);
  1284. resubmit:
  1285. sky2_rx_submit(sky2, re);
  1286. return iob;
  1287. len_error:
  1288. /* Truncation of overlength packets
  1289. causes PHY length to not match MAC length */
  1290. DBG2(PFX "%s: rx length error: status %#x length %d\n",
  1291. dev->name, status, length);
  1292. /* Pass NULL as iob because we want to keep our iob in the
  1293. ring for the next packet. */
  1294. netdev_rx_err(dev, NULL, -EINVAL);
  1295. goto resubmit;
  1296. error:
  1297. if (status & GMR_FS_RX_FF_OV) {
  1298. DBG2(PFX "%s: FIFO overflow error\n", dev->name);
  1299. netdev_rx_err(dev, NULL, -EBUSY);
  1300. goto resubmit;
  1301. }
  1302. DBG2(PFX "%s: rx error, status 0x%x length %d\n",
  1303. dev->name, status, length);
  1304. netdev_rx_err(dev, NULL, -EIO);
  1305. goto resubmit;
  1306. }
  1307. /* Transmit complete */
  1308. static inline void sky2_tx_done(struct net_device *dev, u16 last)
  1309. {
  1310. struct sky2_port *sky2 = netdev_priv(dev);
  1311. sky2_tx_complete(sky2, last);
  1312. }
  1313. /* Process status response ring */
  1314. static void sky2_status_intr(struct sky2_hw *hw, u16 idx)
  1315. {
  1316. unsigned rx[2] = { 0, 0 };
  1317. rmb();
  1318. do {
  1319. struct sky2_port *sky2;
  1320. struct sky2_status_le *le = hw->st_le + hw->st_idx;
  1321. unsigned port;
  1322. struct net_device *dev;
  1323. struct io_buffer *iob;
  1324. u32 status;
  1325. u16 length;
  1326. u8 opcode = le->opcode;
  1327. if (!(opcode & HW_OWNER))
  1328. break;
  1329. port = le->css & CSS_LINK_BIT;
  1330. dev = hw->dev[port];
  1331. sky2 = netdev_priv(dev);
  1332. length = le16_to_cpu(le->length);
  1333. status = le32_to_cpu(le->status);
  1334. hw->st_idx = RING_NEXT(hw->st_idx, STATUS_RING_SIZE);
  1335. le->opcode = 0;
  1336. switch (opcode & ~HW_OWNER) {
  1337. case OP_RXSTAT:
  1338. ++rx[port];
  1339. iob = sky2_receive(dev, length, status);
  1340. if (!iob) {
  1341. netdev_rx_err(dev, NULL, -ENOMEM);
  1342. break;
  1343. }
  1344. netdev_rx(dev, iob);
  1345. break;
  1346. case OP_RXCHKS:
  1347. DBG2(PFX "status OP_RXCHKS but checksum offloading disabled\n");
  1348. break;
  1349. case OP_TXINDEXLE:
  1350. /* TX index reports status for both ports */
  1351. assert(TX_RING_SIZE <= 0x1000);
  1352. sky2_tx_done(hw->dev[0], status & 0xfff);
  1353. if (hw->dev[1])
  1354. sky2_tx_done(hw->dev[1],
  1355. ((status >> 24) & 0xff)
  1356. | (u16)(length & 0xf) << 8);
  1357. break;
  1358. default:
  1359. DBG(PFX "unknown status opcode 0x%x\n", opcode);
  1360. }
  1361. } while (hw->st_idx != idx);
  1362. /* Fully processed status ring so clear irq */
  1363. sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
  1364. if (rx[0])
  1365. sky2_rx_update(netdev_priv(hw->dev[0]), Q_R1);
  1366. if (rx[1])
  1367. sky2_rx_update(netdev_priv(hw->dev[1]), Q_R2);
  1368. }
  1369. static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status)
  1370. {
  1371. struct net_device *dev = hw->dev[port];
  1372. DBGIO(PFX "%s: hw error interrupt status 0x%x\n", dev->name, status);
  1373. if (status & Y2_IS_PAR_RD1) {
  1374. DBG(PFX "%s: ram data read parity error\n", dev->name);
  1375. /* Clear IRQ */
  1376. sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR);
  1377. }
  1378. if (status & Y2_IS_PAR_WR1) {
  1379. DBG(PFX "%s: ram data write parity error\n", dev->name);
  1380. sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR);
  1381. }
  1382. if (status & Y2_IS_PAR_MAC1) {
  1383. DBG(PFX "%s: MAC parity error\n", dev->name);
  1384. sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE);
  1385. }
  1386. if (status & Y2_IS_PAR_RX1) {
  1387. DBG(PFX "%s: RX parity error\n", dev->name);
  1388. sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR);
  1389. }
  1390. if (status & Y2_IS_TCP_TXA1) {
  1391. DBG(PFX "%s: TCP segmentation error\n", dev->name);
  1392. sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP);
  1393. }
  1394. }
  1395. static void sky2_hw_intr(struct sky2_hw *hw)
  1396. {
  1397. u32 status = sky2_read32(hw, B0_HWE_ISRC);
  1398. u32 hwmsk = sky2_read32(hw, B0_HWE_IMSK);
  1399. status &= hwmsk;
  1400. if (status & Y2_IS_TIST_OV)
  1401. sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
  1402. if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) {
  1403. u16 pci_err;
  1404. sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
  1405. pci_err = sky2_pci_read16(hw, PCI_STATUS);
  1406. DBG(PFX "PCI hardware error (0x%x)\n", pci_err);
  1407. sky2_pci_write16(hw, PCI_STATUS,
  1408. pci_err | PCI_STATUS_ERROR_BITS);
  1409. sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
  1410. }
  1411. if (status & Y2_IS_PCI_EXP) {
  1412. /* PCI-Express uncorrectable Error occurred */
  1413. u32 err;
  1414. sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
  1415. err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
  1416. sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS,
  1417. 0xfffffffful);
  1418. DBG(PFX "PCI-Express error (0x%x)\n", err);
  1419. sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
  1420. sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
  1421. }
  1422. if (status & Y2_HWE_L1_MASK)
  1423. sky2_hw_error(hw, 0, status);
  1424. status >>= 8;
  1425. if (status & Y2_HWE_L1_MASK)
  1426. sky2_hw_error(hw, 1, status);
  1427. }
  1428. static void sky2_mac_intr(struct sky2_hw *hw, unsigned port)
  1429. {
  1430. struct net_device *dev = hw->dev[port];
  1431. u8 status = sky2_read8(hw, SK_REG(port, GMAC_IRQ_SRC));
  1432. DBGIO(PFX "%s: mac interrupt status 0x%x\n", dev->name, status);
  1433. if (status & GM_IS_RX_CO_OV)
  1434. gma_read16(hw, port, GM_RX_IRQ_SRC);
  1435. if (status & GM_IS_TX_CO_OV)
  1436. gma_read16(hw, port, GM_TX_IRQ_SRC);
  1437. if (status & GM_IS_RX_FF_OR) {
  1438. sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO);
  1439. }
  1440. if (status & GM_IS_TX_FF_UR) {
  1441. sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU);
  1442. }
  1443. }
  1444. /* This should never happen it is a bug. */
  1445. static void sky2_le_error(struct sky2_hw *hw, unsigned port,
  1446. u16 q, unsigned ring_size __unused)
  1447. {
  1448. struct net_device *dev = hw->dev[port];
  1449. struct sky2_port *sky2 = netdev_priv(dev);
  1450. int idx;
  1451. const u64 *le = (q == Q_R1 || q == Q_R2)
  1452. ? (u64 *) sky2->rx_le : (u64 *) sky2->tx_le;
  1453. idx = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX));
  1454. DBG(PFX "%s: descriptor error q=%#x get=%d [%llx] last=%d put=%d should be %d\n",
  1455. dev->name, (unsigned) q, idx, (unsigned long long) le[idx],
  1456. (int) sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_LAST_IDX)),
  1457. (int) sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX)),
  1458. le == (u64 *)sky2->rx_le? sky2->rx_put : sky2->tx_prod);
  1459. sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_IRQ_CHK);
  1460. }
  1461. /* Hardware/software error handling */
  1462. static void sky2_err_intr(struct sky2_hw *hw, u32 status)
  1463. {
  1464. DBG(PFX "error interrupt status=%#x\n", status);
  1465. if (status & Y2_IS_HW_ERR)
  1466. sky2_hw_intr(hw);
  1467. if (status & Y2_IS_IRQ_MAC1)
  1468. sky2_mac_intr(hw, 0);
  1469. if (status & Y2_IS_IRQ_MAC2)
  1470. sky2_mac_intr(hw, 1);
  1471. if (status & Y2_IS_CHK_RX1)
  1472. sky2_le_error(hw, 0, Q_R1, RX_LE_SIZE);
  1473. if (status & Y2_IS_CHK_RX2)
  1474. sky2_le_error(hw, 1, Q_R2, RX_LE_SIZE);
  1475. if (status & Y2_IS_CHK_TXA1)
  1476. sky2_le_error(hw, 0, Q_XA1, TX_RING_SIZE);
  1477. if (status & Y2_IS_CHK_TXA2)
  1478. sky2_le_error(hw, 1, Q_XA2, TX_RING_SIZE);
  1479. }
  1480. static void sky2_poll(struct net_device *dev)
  1481. {
  1482. struct sky2_port *sky2 = netdev_priv(dev);
  1483. struct sky2_hw *hw = sky2->hw;
  1484. u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
  1485. u16 idx;
  1486. if (status & Y2_IS_ERROR)
  1487. sky2_err_intr(hw, status);
  1488. if (status & Y2_IS_IRQ_PHY1)
  1489. sky2_phy_intr(hw, 0);
  1490. if (status & Y2_IS_IRQ_PHY2)
  1491. sky2_phy_intr(hw, 1);
  1492. while ((idx = sky2_read16(hw, STAT_PUT_IDX)) != hw->st_idx) {
  1493. sky2_status_intr(hw, idx);
  1494. }
  1495. /* Bug/Errata workaround?
  1496. * Need to kick the TX irq moderation timer.
  1497. */
  1498. if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_START) {
  1499. sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
  1500. sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
  1501. }
  1502. sky2_read32(hw, B0_Y2_SP_LISR);
  1503. }
  1504. /* Chip internal frequency for clock calculations */
  1505. static u32 sky2_mhz(const struct sky2_hw *hw)
  1506. {
  1507. switch (hw->chip_id) {
  1508. case CHIP_ID_YUKON_EC:
  1509. case CHIP_ID_YUKON_EC_U:
  1510. case CHIP_ID_YUKON_EX:
  1511. case CHIP_ID_YUKON_SUPR:
  1512. case CHIP_ID_YUKON_UL_2:
  1513. return 125;
  1514. case CHIP_ID_YUKON_FE:
  1515. return 100;
  1516. case CHIP_ID_YUKON_FE_P:
  1517. return 50;
  1518. case CHIP_ID_YUKON_XL:
  1519. return 156;
  1520. default:
  1521. DBG(PFX "unknown chip ID!\n");
  1522. return 100; /* bogus */
  1523. }
  1524. }
  1525. static inline u32 sky2_us2clk(const struct sky2_hw *hw, u32 us)
  1526. {
  1527. return sky2_mhz(hw) * us;
  1528. }
  1529. static inline u32 sky2_clk2us(const struct sky2_hw *hw, u32 clk)
  1530. {
  1531. return clk / sky2_mhz(hw);
  1532. }
  1533. static int sky2_init(struct sky2_hw *hw)
  1534. {
  1535. u8 t8;
  1536. /* Enable all clocks and check for bad PCI access */
  1537. sky2_pci_write32(hw, PCI_DEV_REG3, 0);
  1538. sky2_write8(hw, B0_CTST, CS_RST_CLR);
  1539. hw->chip_id = sky2_read8(hw, B2_CHIP_ID);
  1540. hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4;
  1541. switch(hw->chip_id) {
  1542. case CHIP_ID_YUKON_XL:
  1543. hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEWER_PHY;
  1544. break;
  1545. case CHIP_ID_YUKON_EC_U:
  1546. hw->flags = SKY2_HW_GIGABIT
  1547. | SKY2_HW_NEWER_PHY
  1548. | SKY2_HW_ADV_POWER_CTL;
  1549. break;
  1550. case CHIP_ID_YUKON_EX:
  1551. hw->flags = SKY2_HW_GIGABIT
  1552. | SKY2_HW_NEWER_PHY
  1553. | SKY2_HW_NEW_LE
  1554. | SKY2_HW_ADV_POWER_CTL;
  1555. break;
  1556. case CHIP_ID_YUKON_EC:
  1557. /* This rev is really old, and requires untested workarounds */
  1558. if (hw->chip_rev == CHIP_REV_YU_EC_A1) {
  1559. DBG(PFX "unsupported revision Yukon-EC rev A1\n");
  1560. return -EOPNOTSUPP;
  1561. }
  1562. hw->flags = SKY2_HW_GIGABIT;
  1563. break;
  1564. case CHIP_ID_YUKON_FE:
  1565. break;
  1566. case CHIP_ID_YUKON_FE_P:
  1567. hw->flags = SKY2_HW_NEWER_PHY
  1568. | SKY2_HW_NEW_LE
  1569. | SKY2_HW_AUTO_TX_SUM
  1570. | SKY2_HW_ADV_POWER_CTL;
  1571. break;
  1572. case CHIP_ID_YUKON_SUPR:
  1573. hw->flags = SKY2_HW_GIGABIT
  1574. | SKY2_HW_NEWER_PHY
  1575. | SKY2_HW_NEW_LE
  1576. | SKY2_HW_AUTO_TX_SUM
  1577. | SKY2_HW_ADV_POWER_CTL;
  1578. break;
  1579. case CHIP_ID_YUKON_UL_2:
  1580. hw->flags = SKY2_HW_GIGABIT
  1581. | SKY2_HW_ADV_POWER_CTL;
  1582. break;
  1583. default:
  1584. DBG(PFX "unsupported chip type 0x%x\n", hw->chip_id);
  1585. return -EOPNOTSUPP;
  1586. }
  1587. hw->pmd_type = sky2_read8(hw, B2_PMD_TYP);
  1588. if (hw->pmd_type == 'L' || hw->pmd_type == 'S' || hw->pmd_type == 'P')
  1589. hw->flags |= SKY2_HW_FIBRE_PHY;
  1590. hw->ports = 1;
  1591. t8 = sky2_read8(hw, B2_Y2_HW_RES);
  1592. if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) {
  1593. if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
  1594. ++hw->ports;
  1595. }
  1596. return 0;
  1597. }
  1598. static void sky2_reset(struct sky2_hw *hw)
  1599. {
  1600. u16 status;
  1601. int i, cap;
  1602. u32 hwe_mask = Y2_HWE_ALL_MASK;
  1603. /* disable ASF */
  1604. if (hw->chip_id == CHIP_ID_YUKON_EX) {
  1605. status = sky2_read16(hw, HCU_CCSR);
  1606. status &= ~(HCU_CCSR_AHB_RST | HCU_CCSR_CPU_RST_MODE |
  1607. HCU_CCSR_UC_STATE_MSK);
  1608. sky2_write16(hw, HCU_CCSR, status);
  1609. } else
  1610. sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
  1611. sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE);
  1612. /* do a SW reset */
  1613. sky2_write8(hw, B0_CTST, CS_RST_SET);
  1614. sky2_write8(hw, B0_CTST, CS_RST_CLR);
  1615. /* allow writes to PCI config */
  1616. sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
  1617. /* clear PCI errors, if any */
  1618. status = sky2_pci_read16(hw, PCI_STATUS);
  1619. status |= PCI_STATUS_ERROR_BITS;
  1620. sky2_pci_write16(hw, PCI_STATUS, status);
  1621. sky2_write8(hw, B0_CTST, CS_MRST_CLR);
  1622. cap = pci_find_capability(hw->pdev, PCI_CAP_ID_EXP);
  1623. if (cap) {
  1624. sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS,
  1625. 0xfffffffful);
  1626. /* If an error bit is stuck on ignore it */
  1627. if (sky2_read32(hw, B0_HWE_ISRC) & Y2_IS_PCI_EXP)
  1628. DBG(PFX "ignoring stuck error report bit\n");
  1629. else
  1630. hwe_mask |= Y2_IS_PCI_EXP;
  1631. }
  1632. sky2_power_on(hw);
  1633. sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
  1634. for (i = 0; i < hw->ports; i++) {
  1635. sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
  1636. sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
  1637. if (hw->chip_id == CHIP_ID_YUKON_EX ||
  1638. hw->chip_id == CHIP_ID_YUKON_SUPR)
  1639. sky2_write16(hw, SK_REG(i, GMAC_CTRL),
  1640. GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON
  1641. | GMC_BYP_RETR_ON);
  1642. }
  1643. /* Clear I2C IRQ noise */
  1644. sky2_write32(hw, B2_I2C_IRQ, 1);
  1645. /* turn off hardware timer (unused) */
  1646. sky2_write8(hw, B2_TI_CTRL, TIM_STOP);
  1647. sky2_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ);
  1648. sky2_write8(hw, B0_Y2LED, LED_STAT_ON);
  1649. /* Turn off descriptor polling */
  1650. sky2_write32(hw, B28_DPT_CTRL, DPT_STOP);
  1651. /* Turn off receive timestamp */
  1652. sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_STOP);
  1653. sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
  1654. /* enable the Tx Arbiters */
  1655. for (i = 0; i < hw->ports; i++)
  1656. sky2_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB);
  1657. /* Initialize ram interface */
  1658. for (i = 0; i < hw->ports; i++) {
  1659. sky2_write8(hw, RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
  1660. sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R1), SK_RI_TO_53);
  1661. sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA1), SK_RI_TO_53);
  1662. sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS1), SK_RI_TO_53);
  1663. sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R1), SK_RI_TO_53);
  1664. sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA1), SK_RI_TO_53);
  1665. sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS1), SK_RI_TO_53);
  1666. sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R2), SK_RI_TO_53);
  1667. sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA2), SK_RI_TO_53);
  1668. sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS2), SK_RI_TO_53);
  1669. sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R2), SK_RI_TO_53);
  1670. sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA2), SK_RI_TO_53);
  1671. sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS2), SK_RI_TO_53);
  1672. }
  1673. sky2_write32(hw, B0_HWE_IMSK, hwe_mask);
  1674. for (i = 0; i < hw->ports; i++)
  1675. sky2_gmac_reset(hw, i);
  1676. memset(hw->st_le, 0, STATUS_LE_BYTES);
  1677. hw->st_idx = 0;
  1678. sky2_write32(hw, STAT_CTRL, SC_STAT_RST_SET);
  1679. sky2_write32(hw, STAT_CTRL, SC_STAT_RST_CLR);
  1680. sky2_write32(hw, STAT_LIST_ADDR_LO, hw->st_dma);
  1681. sky2_write32(hw, STAT_LIST_ADDR_HI, (u64) hw->st_dma >> 32);
  1682. /* Set the list last index */
  1683. sky2_write16(hw, STAT_LAST_IDX, STATUS_RING_SIZE - 1);
  1684. sky2_write16(hw, STAT_TX_IDX_TH, 10);
  1685. sky2_write8(hw, STAT_FIFO_WM, 16);
  1686. /* set Status-FIFO ISR watermark */
  1687. if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0)
  1688. sky2_write8(hw, STAT_FIFO_ISR_WM, 4);
  1689. else
  1690. sky2_write8(hw, STAT_FIFO_ISR_WM, 16);
  1691. sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000));
  1692. sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 20));
  1693. sky2_write32(hw, STAT_LEV_TIMER_INI, sky2_us2clk(hw, 100));
  1694. /* enable status unit */
  1695. sky2_write32(hw, STAT_CTRL, SC_STAT_OP_ON);
  1696. sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
  1697. sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
  1698. sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START);
  1699. }
  1700. static u32 sky2_supported_modes(const struct sky2_hw *hw)
  1701. {
  1702. if (sky2_is_copper(hw)) {
  1703. u32 modes = SUPPORTED_10baseT_Half
  1704. | SUPPORTED_10baseT_Full
  1705. | SUPPORTED_100baseT_Half
  1706. | SUPPORTED_100baseT_Full
  1707. | SUPPORTED_Autoneg | SUPPORTED_TP;
  1708. if (hw->flags & SKY2_HW_GIGABIT)
  1709. modes |= SUPPORTED_1000baseT_Half
  1710. | SUPPORTED_1000baseT_Full;
  1711. return modes;
  1712. } else
  1713. return SUPPORTED_1000baseT_Half
  1714. | SUPPORTED_1000baseT_Full
  1715. | SUPPORTED_Autoneg
  1716. | SUPPORTED_FIBRE;
  1717. }
  1718. static void sky2_set_multicast(struct net_device *dev)
  1719. {
  1720. struct sky2_port *sky2 = netdev_priv(dev);
  1721. struct sky2_hw *hw = sky2->hw;
  1722. unsigned port = sky2->port;
  1723. u16 reg;
  1724. u8 filter[8];
  1725. int rx_pause;
  1726. rx_pause = (sky2->flow_status == FC_RX || sky2->flow_status == FC_BOTH);
  1727. reg = gma_read16(hw, port, GM_RX_CTRL);
  1728. reg |= GM_RXCR_UCF_ENA;
  1729. memset(filter, 0xff, sizeof(filter));
  1730. gma_write16(hw, port, GM_MC_ADDR_H1,
  1731. (u16) filter[0] | ((u16) filter[1] << 8));
  1732. gma_write16(hw, port, GM_MC_ADDR_H2,
  1733. (u16) filter[2] | ((u16) filter[3] << 8));
  1734. gma_write16(hw, port, GM_MC_ADDR_H3,
  1735. (u16) filter[4] | ((u16) filter[5] << 8));
  1736. gma_write16(hw, port, GM_MC_ADDR_H4,
  1737. (u16) filter[6] | ((u16) filter[7] << 8));
  1738. gma_write16(hw, port, GM_RX_CTRL, reg);
  1739. }
  1740. /* Initialize network device */
  1741. static struct net_device *sky2_init_netdev(struct sky2_hw *hw,
  1742. unsigned port)
  1743. {
  1744. struct sky2_port *sky2;
  1745. struct net_device *dev = alloc_etherdev(sizeof(*sky2));
  1746. if (!dev) {
  1747. DBG(PFX "etherdev alloc failed\n");
  1748. return NULL;
  1749. }
  1750. dev->dev = &hw->pdev->dev;
  1751. sky2 = netdev_priv(dev);
  1752. sky2->netdev = dev;
  1753. sky2->hw = hw;
  1754. /* Auto speed and flow control */
  1755. sky2->autoneg = AUTONEG_ENABLE;
  1756. sky2->flow_mode = FC_BOTH;
  1757. sky2->duplex = -1;
  1758. sky2->speed = -1;
  1759. sky2->advertising = sky2_supported_modes(hw);
  1760. hw->dev[port] = dev;
  1761. sky2->port = port;
  1762. /* read the mac address */
  1763. memcpy(dev->hw_addr, (void *)(hw->regs + B2_MAC_1 + port * 8), ETH_ALEN);
  1764. return dev;
  1765. }
  1766. static void sky2_show_addr(struct net_device *dev)
  1767. {
  1768. DBG2(PFX "%s: addr %s\n", dev->name, netdev_addr(dev));
  1769. }
  1770. #if DBGLVL_MAX
  1771. /* This driver supports yukon2 chipset only */
  1772. static const char *sky2_name(u8 chipid, char *buf, int sz)
  1773. {
  1774. const char *name[] = {
  1775. "XL", /* 0xb3 */
  1776. "EC Ultra", /* 0xb4 */
  1777. "Extreme", /* 0xb5 */
  1778. "EC", /* 0xb6 */
  1779. "FE", /* 0xb7 */
  1780. "FE+", /* 0xb8 */
  1781. "Supreme", /* 0xb9 */
  1782. "UL 2", /* 0xba */
  1783. };
  1784. if (chipid >= CHIP_ID_YUKON_XL && chipid <= CHIP_ID_YUKON_UL_2)
  1785. strncpy(buf, name[chipid - CHIP_ID_YUKON_XL], sz);
  1786. else
  1787. snprintf(buf, sz, "(chip %#x)", chipid);
  1788. return buf;
  1789. }
  1790. #endif
  1791. static void sky2_net_irq(struct net_device *dev, int enable)
  1792. {
  1793. struct sky2_port *sky2 = netdev_priv(dev);
  1794. struct sky2_hw *hw = sky2->hw;
  1795. u32 imask = sky2_read32(hw, B0_IMSK);
  1796. if (enable)
  1797. imask |= portirq_msk[sky2->port];
  1798. else
  1799. imask &= ~portirq_msk[sky2->port];
  1800. sky2_write32(hw, B0_IMSK, imask);
  1801. }
  1802. static struct net_device_operations sky2_operations = {
  1803. .open = sky2_up,
  1804. .close = sky2_down,
  1805. .transmit = sky2_xmit_frame,
  1806. .poll = sky2_poll,
  1807. .irq = sky2_net_irq
  1808. };
  1809. static int sky2_probe(struct pci_device *pdev,
  1810. const struct pci_device_id *ent __unused)
  1811. {
  1812. struct net_device *dev;
  1813. struct sky2_hw *hw;
  1814. int err;
  1815. char buf1[16] __unused; /* only for debugging */
  1816. adjust_pci_device(pdev);
  1817. err = -ENOMEM;
  1818. hw = zalloc(sizeof(*hw));
  1819. if (!hw) {
  1820. DBG(PFX "cannot allocate hardware struct\n");
  1821. goto err_out;
  1822. }
  1823. hw->pdev = pdev;
  1824. hw->regs = (unsigned long)ioremap(pci_bar_start(pdev, PCI_BASE_ADDRESS_0), 0x4000);
  1825. if (!hw->regs) {
  1826. DBG(PFX "cannot map device registers\n");
  1827. goto err_out_free_hw;
  1828. }
  1829. /* ring for status responses */
  1830. hw->st_le = malloc_dma(STATUS_LE_BYTES, STATUS_RING_ALIGN);
  1831. if (!hw->st_le)
  1832. goto err_out_iounmap;
  1833. hw->st_dma = virt_to_bus(hw->st_le);
  1834. memset(hw->st_le, 0, STATUS_LE_BYTES);
  1835. err = sky2_init(hw);
  1836. if (err)
  1837. goto err_out_iounmap;
  1838. #if DBGLVL_MAX
  1839. DBG2(PFX "Yukon-2 %s chip revision %d\n",
  1840. sky2_name(hw->chip_id, buf1, sizeof(buf1)), hw->chip_rev);
  1841. #endif
  1842. sky2_reset(hw);
  1843. dev = sky2_init_netdev(hw, 0);
  1844. if (!dev) {
  1845. err = -ENOMEM;
  1846. goto err_out_free_pci;
  1847. }
  1848. netdev_init(dev, &sky2_operations);
  1849. err = register_netdev(dev);
  1850. if (err) {
  1851. DBG(PFX "cannot register net device\n");
  1852. goto err_out_free_netdev;
  1853. }
  1854. sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
  1855. sky2_show_addr(dev);
  1856. if (hw->ports > 1) {
  1857. struct net_device *dev1;
  1858. dev1 = sky2_init_netdev(hw, 1);
  1859. if (!dev1)
  1860. DBG(PFX "allocation for second device failed\n");
  1861. else if ((err = register_netdev(dev1))) {
  1862. DBG(PFX "register of second port failed (%d)\n", err);
  1863. hw->dev[1] = NULL;
  1864. netdev_nullify(dev1);
  1865. netdev_put(dev1);
  1866. } else
  1867. sky2_show_addr(dev1);
  1868. }
  1869. pci_set_drvdata(pdev, dev);
  1870. return 0;
  1871. err_out_free_netdev:
  1872. netdev_nullify(dev);
  1873. netdev_put(dev);
  1874. err_out_free_pci:
  1875. sky2_write8(hw, B0_CTST, CS_RST_SET);
  1876. free_dma(hw->st_le, STATUS_LE_BYTES);
  1877. err_out_iounmap:
  1878. iounmap((void *)hw->regs);
  1879. err_out_free_hw:
  1880. free(hw);
  1881. err_out:
  1882. pci_set_drvdata(pdev, NULL);
  1883. return err;
  1884. }
  1885. static void sky2_remove(struct pci_device *pdev)
  1886. {
  1887. struct sky2_hw *hw = pci_get_drvdata(pdev);
  1888. int i;
  1889. if (!hw)
  1890. return;
  1891. for (i = hw->ports-1; i >= 0; --i)
  1892. unregister_netdev(hw->dev[i]);
  1893. sky2_write32(hw, B0_IMSK, 0);
  1894. sky2_power_aux(hw);
  1895. sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
  1896. sky2_write8(hw, B0_CTST, CS_RST_SET);
  1897. sky2_read8(hw, B0_CTST);
  1898. free_dma(hw->st_le, STATUS_LE_BYTES);
  1899. for (i = hw->ports-1; i >= 0; --i) {
  1900. netdev_nullify(hw->dev[i]);
  1901. netdev_put(hw->dev[i]);
  1902. }
  1903. iounmap((void *)hw->regs);
  1904. free(hw);
  1905. pci_set_drvdata(pdev, NULL);
  1906. }
  1907. struct pci_driver sky2_driver __pci_driver = {
  1908. .ids = sky2_id_table,
  1909. .id_count = (sizeof (sky2_id_table) / sizeof (sky2_id_table[0])),
  1910. .probe = sky2_probe,
  1911. .remove = sky2_remove
  1912. };