You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

tg3_hw.c 76KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661
  1. /*
  2. * tg3.c: Broadcom Tigon3 ethernet driver.
  3. *
  4. * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
  5. * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
  6. * Copyright (C) 2004 Sun Microsystems Inc.
  7. * Copyright (C) 2005-2011 Broadcom Corporation.
  8. *
  9. * Firmware is:
  10. * Derived from proprietary unpublished source code,
  11. * Copyright (C) 2000-2003 Broadcom Corporation.
  12. *
  13. * Permission is hereby granted for the distribution of this firmware
  14. * data in hexadecimal or equivalent format, provided this copyright
  15. * notice is accompanying it.
  16. */
  17. FILE_LICENCE ( GPL2_ONLY );
  18. #include <mii.h>
  19. #include <stdio.h>
  20. #include <errno.h>
  21. #include <unistd.h>
  22. #include <byteswap.h>
  23. #include <ipxe/pci.h>
  24. #include <ipxe/iobuf.h>
  25. #include <ipxe/timer.h>
  26. #include <ipxe/malloc.h>
  27. #include <ipxe/if_ether.h>
  28. #include <ipxe/ethernet.h>
  29. #include <ipxe/netdevice.h>
  30. #include "tg3.h"
  31. #define RESET_KIND_SHUTDOWN 0
  32. #define RESET_KIND_INIT 1
  33. #define RESET_KIND_SUSPEND 2
  34. #define TG3_DEF_MAC_MODE 0
  35. void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
  36. { DBGP("%s\n", __func__);
  37. pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
  38. pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
  39. }
  40. u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
  41. { DBGP("%s\n", __func__);
  42. u32 val;
  43. pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
  44. pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
  45. return val;
  46. }
  47. static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
  48. { DBGP("%s\n", __func__);
  49. return readl(tp->regs + off + GRCMBOX_BASE);
  50. }
  51. static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
  52. { DBGP("%s\n", __func__);
  53. writel(val, tp->regs + off + GRCMBOX_BASE);
  54. }
  55. void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
  56. { DBGP("%s\n", __func__);
  57. if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
  58. pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
  59. TG3_64BIT_REG_LOW, val);
  60. return;
  61. }
  62. if (off == TG3_RX_STD_PROD_IDX_REG) {
  63. pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
  64. TG3_64BIT_REG_LOW, val);
  65. return;
  66. }
  67. pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
  68. pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
  69. /* In indirect mode when disabling interrupts, we also need
  70. * to clear the interrupt bit in the GRC local ctrl register.
  71. */
  72. if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
  73. (val == 0x1)) {
  74. pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
  75. tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
  76. }
  77. }
  78. u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
  79. { DBGP("%s\n", __func__);
  80. u32 val;
  81. pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
  82. pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
  83. return val;
  84. }
  85. /* usec_wait specifies the wait time in usec when writing to certain registers
  86. * where it is unsafe to read back the register without some delay.
  87. * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
  88. * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
  89. */
  90. void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
  91. { DBGP("%s\n", __func__);
  92. tw32(off, val);
  93. if (usec_wait)
  94. udelay(usec_wait);
  95. tr32(off);
  96. /* Wait again after the read for the posted method to guarantee that
  97. * the wait time is met.
  98. */
  99. if (usec_wait)
  100. udelay(usec_wait);
  101. }
  102. /* stolen from legacy etherboot tg3 driver */
  103. void tg3_set_power_state_0(struct tg3 *tp)
  104. { DBGP("%s\n", __func__);
  105. uint16_t power_control;
  106. int pm = tp->pm_cap;
  107. /* Make sure register accesses (indirect or otherwise)
  108. * will function correctly.
  109. */
  110. pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
  111. pci_read_config_word(tp->pdev, pm + PCI_PM_CTRL, &power_control);
  112. power_control |= PCI_PM_CTRL_PME_STATUS;
  113. power_control &= ~(PCI_PM_CTRL_STATE_MASK);
  114. power_control |= 0;
  115. pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
  116. tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
  117. return;
  118. }
  119. void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
  120. { DBGP("%s\n", __func__);
  121. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
  122. (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
  123. *val = 0;
  124. return;
  125. }
  126. pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
  127. pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
  128. /* Always leave this as zero. */
  129. pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
  130. }
  131. #define PCI_VENDOR_ID_ARIMA 0x161f
  132. static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
  133. { DBGP("%s\n", __func__);
  134. u32 val;
  135. u16 pmcsr;
  136. /* On some early chips the SRAM cannot be accessed in D3hot state,
  137. * so need make sure we're in D0.
  138. */
  139. pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
  140. pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
  141. pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
  142. mdelay(1);
  143. /* Make sure register accesses (indirect or otherwise)
  144. * will function correctly.
  145. */
  146. pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
  147. tp->misc_host_ctrl);
  148. /* The memory arbiter has to be enabled in order for SRAM accesses
  149. * to succeed. Normally on powerup the tg3 chip firmware will make
  150. * sure it is enabled, but other entities such as system netboot
  151. * code might disable it.
  152. */
  153. val = tr32(MEMARB_MODE);
  154. tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
  155. tp->phy_id = TG3_PHY_ID_INVALID;
  156. tp->led_ctrl = LED_CTRL_MODE_PHY_1;
  157. /* Assume an onboard device by default. */
  158. tg3_flag_set(tp, EEPROM_WRITE_PROT);
  159. tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
  160. if (val == NIC_SRAM_DATA_SIG_MAGIC) {
  161. u32 nic_cfg, led_cfg;
  162. u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
  163. int eeprom_phy_serdes = 0;
  164. tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
  165. tp->nic_sram_data_cfg = nic_cfg;
  166. tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
  167. ver >>= NIC_SRAM_DATA_VER_SHIFT;
  168. if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
  169. GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
  170. GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
  171. (ver > 0) && (ver < 0x100))
  172. tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
  173. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
  174. tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
  175. if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
  176. NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
  177. eeprom_phy_serdes = 1;
  178. tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
  179. if (nic_phy_id != 0) {
  180. u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
  181. u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
  182. eeprom_phy_id = (id1 >> 16) << 10;
  183. eeprom_phy_id |= (id2 & 0xfc00) << 16;
  184. eeprom_phy_id |= (id2 & 0x03ff) << 0;
  185. } else
  186. eeprom_phy_id = 0;
  187. tp->phy_id = eeprom_phy_id;
  188. if (eeprom_phy_serdes) {
  189. if (!tg3_flag(tp, 5705_PLUS))
  190. tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
  191. else
  192. tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
  193. }
  194. if (tg3_flag(tp, 5750_PLUS))
  195. led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
  196. SHASTA_EXT_LED_MODE_MASK);
  197. else
  198. led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
  199. switch (led_cfg) {
  200. default:
  201. case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
  202. tp->led_ctrl = LED_CTRL_MODE_PHY_1;
  203. break;
  204. case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
  205. tp->led_ctrl = LED_CTRL_MODE_PHY_2;
  206. break;
  207. case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
  208. tp->led_ctrl = LED_CTRL_MODE_MAC;
  209. /* Default to PHY_1_MODE if 0 (MAC_MODE) is
  210. * read on some older 5700/5701 bootcode.
  211. */
  212. if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
  213. ASIC_REV_5700 ||
  214. GET_ASIC_REV(tp->pci_chip_rev_id) ==
  215. ASIC_REV_5701)
  216. tp->led_ctrl = LED_CTRL_MODE_PHY_1;
  217. break;
  218. case SHASTA_EXT_LED_SHARED:
  219. tp->led_ctrl = LED_CTRL_MODE_SHARED;
  220. if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
  221. tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
  222. tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
  223. LED_CTRL_MODE_PHY_2);
  224. break;
  225. case SHASTA_EXT_LED_MAC:
  226. tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
  227. break;
  228. case SHASTA_EXT_LED_COMBO:
  229. tp->led_ctrl = LED_CTRL_MODE_COMBO;
  230. if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
  231. tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
  232. LED_CTRL_MODE_PHY_2);
  233. break;
  234. }
  235. if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
  236. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
  237. tp->subsystem_vendor == PCI_VENDOR_ID_DELL)
  238. tp->led_ctrl = LED_CTRL_MODE_PHY_2;
  239. if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
  240. tp->led_ctrl = LED_CTRL_MODE_PHY_1;
  241. if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
  242. tg3_flag_set(tp, EEPROM_WRITE_PROT);
  243. if ((tp->subsystem_vendor ==
  244. PCI_VENDOR_ID_ARIMA) &&
  245. (tp->subsystem_device == 0x205a ||
  246. tp->subsystem_device == 0x2063))
  247. tg3_flag_clear(tp, EEPROM_WRITE_PROT);
  248. } else {
  249. tg3_flag_clear(tp, EEPROM_WRITE_PROT);
  250. tg3_flag_set(tp, IS_NIC);
  251. }
  252. if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
  253. tg3_flag_set(tp, ENABLE_ASF);
  254. if (tg3_flag(tp, 5750_PLUS))
  255. tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
  256. }
  257. if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
  258. tg3_flag(tp, ENABLE_ASF))
  259. tg3_flag_set(tp, ENABLE_APE);
  260. if (cfg2 & (1 << 17))
  261. tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
  262. /* serdes signal pre-emphasis in register 0x590 set by */
  263. /* bootcode if bit 18 is set */
  264. if (cfg2 & (1 << 18))
  265. tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
  266. if ((tg3_flag(tp, 57765_PLUS) ||
  267. (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
  268. GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
  269. (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
  270. tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
  271. if (tg3_flag(tp, PCI_EXPRESS) &&
  272. GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
  273. !tg3_flag(tp, 57765_PLUS)) {
  274. u32 cfg3;
  275. tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
  276. }
  277. if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
  278. tg3_flag_set(tp, RGMII_INBAND_DISABLE);
  279. if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
  280. tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
  281. if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
  282. tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
  283. }
  284. }
  285. static void tg3_switch_clocks(struct tg3 *tp)
  286. { DBGP("%s\n", __func__);
  287. u32 clock_ctrl;
  288. u32 orig_clock_ctrl;
  289. if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
  290. return;
  291. clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
  292. orig_clock_ctrl = clock_ctrl;
  293. clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
  294. CLOCK_CTRL_CLKRUN_OENABLE |
  295. 0x1f);
  296. tp->pci_clock_ctrl = clock_ctrl;
  297. if (tg3_flag(tp, 5705_PLUS)) {
  298. if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
  299. tw32_wait_f(TG3PCI_CLOCK_CTRL,
  300. clock_ctrl | CLOCK_CTRL_625_CORE, 40);
  301. }
  302. } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
  303. tw32_wait_f(TG3PCI_CLOCK_CTRL,
  304. clock_ctrl |
  305. (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
  306. 40);
  307. tw32_wait_f(TG3PCI_CLOCK_CTRL,
  308. clock_ctrl | (CLOCK_CTRL_ALTCLK),
  309. 40);
  310. }
  311. tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
  312. }
  313. int tg3_get_invariants(struct tg3 *tp)
  314. { DBGP("%s\n", __func__);
  315. u32 misc_ctrl_reg;
  316. u32 pci_state_reg, grc_misc_cfg;
  317. u32 val;
  318. u16 pci_cmd;
  319. int err;
  320. /* Force memory write invalidate off. If we leave it on,
  321. * then on 5700_BX chips we have to enable a workaround.
  322. * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
  323. * to match the cacheline size. The Broadcom driver have this
  324. * workaround but turns MWI off all the times so never uses
  325. * it. This seems to suggest that the workaround is insufficient.
  326. */
  327. pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
  328. pci_cmd &= ~PCI_COMMAND_INVALIDATE;
  329. pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
  330. /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
  331. * has the register indirect write enable bit set before
  332. * we try to access any of the MMIO registers. It is also
  333. * critical that the PCI-X hw workaround situation is decided
  334. * before that as well.
  335. */
  336. pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
  337. &misc_ctrl_reg);
  338. tp->pci_chip_rev_id = (misc_ctrl_reg >>
  339. MISC_HOST_CTRL_CHIPREV_SHIFT);
  340. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
  341. u32 prod_id_asic_rev;
  342. if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
  343. tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
  344. tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
  345. tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
  346. pci_read_config_dword(tp->pdev,
  347. TG3PCI_GEN2_PRODID_ASICREV,
  348. &prod_id_asic_rev);
  349. else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
  350. tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
  351. tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
  352. tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
  353. tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
  354. tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
  355. tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
  356. pci_read_config_dword(tp->pdev,
  357. TG3PCI_GEN15_PRODID_ASICREV,
  358. &prod_id_asic_rev);
  359. else
  360. pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
  361. &prod_id_asic_rev);
  362. tp->pci_chip_rev_id = prod_id_asic_rev;
  363. }
  364. /* Wrong chip ID in 5752 A0. This code can be removed later
  365. * as A0 is not in production.
  366. */
  367. if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
  368. tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
  369. /* Initialize misc host control in PCI block. */
  370. tp->misc_host_ctrl |= (misc_ctrl_reg &
  371. MISC_HOST_CTRL_CHIPREV);
  372. pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
  373. tp->misc_host_ctrl);
  374. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
  375. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
  376. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
  377. tg3_flag_set(tp, 5717_PLUS);
  378. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
  379. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766 ||
  380. tg3_flag(tp, 5717_PLUS))
  381. tg3_flag_set(tp, 57765_PLUS);
  382. /* Intentionally exclude ASIC_REV_5906 */
  383. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
  384. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
  385. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
  386. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
  387. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
  388. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
  389. tg3_flag(tp, 57765_PLUS))
  390. tg3_flag_set(tp, 5755_PLUS);
  391. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
  392. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
  393. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
  394. tg3_flag(tp, 5755_PLUS) ||
  395. tg3_flag(tp, 5780_CLASS))
  396. tg3_flag_set(tp, 5750_PLUS);
  397. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
  398. tg3_flag(tp, 5750_PLUS))
  399. tg3_flag_set(tp, 5705_PLUS);
  400. if (tg3_flag(tp, 5717_PLUS))
  401. tg3_flag_set(tp, LRG_PROD_RING_CAP);
  402. pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
  403. &pci_state_reg);
  404. tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
  405. if (tp->pcie_cap != 0) {
  406. u16 lnkctl;
  407. tg3_flag_set(tp, PCI_EXPRESS);
  408. pci_read_config_word(tp->pdev,
  409. tp->pcie_cap + PCI_EXP_LNKCTL,
  410. &lnkctl);
  411. if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
  412. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
  413. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
  414. tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
  415. tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
  416. tg3_flag_set(tp, CLKREQ_BUG);
  417. } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
  418. tg3_flag_set(tp, L1PLLPD_EN);
  419. }
  420. } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
  421. tg3_flag_set(tp, PCI_EXPRESS);
  422. } else if (!tg3_flag(tp, 5705_PLUS) ||
  423. tg3_flag(tp, 5780_CLASS)) {
  424. tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
  425. if (!tp->pcix_cap) {
  426. DBGC(&tp->pdev->dev,
  427. "Cannot find PCI-X capability, aborting\n");
  428. return -EIO;
  429. }
  430. if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
  431. tg3_flag_set(tp, PCIX_MODE);
  432. }
  433. /* If we have an AMD 762 or VIA K8T800 chipset, write
  434. * reordering to the mailbox registers done by the host
  435. * controller can cause major troubles. We read back from
  436. * every mailbox register write to force the writes to be
  437. * posted to the chip in order.
  438. */
  439. pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
  440. &tp->pci_cacheline_sz);
  441. pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
  442. &tp->pci_lat_timer);
  443. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
  444. tp->pci_lat_timer < 64) {
  445. tp->pci_lat_timer = 64;
  446. pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
  447. tp->pci_lat_timer);
  448. }
  449. if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
  450. /* 5700 BX chips need to have their TX producer index
  451. * mailboxes written twice to workaround a bug.
  452. */
  453. tg3_flag_set(tp, TXD_MBOX_HWBUG);
  454. /* If we are in PCI-X mode, enable register write workaround.
  455. *
  456. * The workaround is to use indirect register accesses
  457. * for all chip writes not to mailbox registers.
  458. */
  459. if (tg3_flag(tp, PCIX_MODE)) {
  460. u32 pm_reg;
  461. tg3_flag_set(tp, PCIX_TARGET_HWBUG);
  462. /* The chip can have it's power management PCI config
  463. * space registers clobbered due to this bug.
  464. * So explicitly force the chip into D0 here.
  465. */
  466. pci_read_config_dword(tp->pdev,
  467. tp->pm_cap + PCI_PM_CTRL,
  468. &pm_reg);
  469. pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
  470. pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
  471. pci_write_config_dword(tp->pdev,
  472. tp->pm_cap + PCI_PM_CTRL,
  473. pm_reg);
  474. /* Also, force SERR#/PERR# in PCI command. */
  475. pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
  476. pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
  477. pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
  478. }
  479. }
  480. if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
  481. tg3_flag_set(tp, PCI_HIGH_SPEED);
  482. if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
  483. tg3_flag_set(tp, PCI_32BIT);
  484. /* Chip-specific fixup from Broadcom driver */
  485. if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
  486. (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
  487. pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
  488. pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
  489. }
  490. tp->write32_mbox = tg3_write_indirect_reg32;
  491. tp->write32_rx_mbox = tg3_write_indirect_mbox;
  492. tp->write32_tx_mbox = tg3_write_indirect_mbox;
  493. tp->read32_mbox = tg3_read_indirect_mbox;
  494. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
  495. tp->read32_mbox = tg3_read32_mbox_5906;
  496. tp->write32_mbox = tg3_write32_mbox_5906;
  497. tp->write32_tx_mbox = tg3_write32_mbox_5906;
  498. tp->write32_rx_mbox = tg3_write32_mbox_5906;
  499. }
  500. /* Get eeprom hw config before calling tg3_set_power_state().
  501. * In particular, the TG3_FLAG_IS_NIC flag must be
  502. * determined before calling tg3_set_power_state() so that
  503. * we know whether or not to switch out of Vaux power.
  504. * When the flag is set, it means that GPIO1 is used for eeprom
  505. * write protect and also implies that it is a LOM where GPIOs
  506. * are not used to switch power.
  507. */
  508. tg3_get_eeprom_hw_cfg(tp);
  509. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
  510. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
  511. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
  512. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
  513. tg3_flag(tp, 57765_PLUS))
  514. tg3_flag_set(tp, CPMU_PRESENT);
  515. /* Set up tp->grc_local_ctrl before calling tg3_power_up().
  516. * GPIO1 driven high will bring 5700's external PHY out of reset.
  517. * It is also used as eeprom write protect on LOMs.
  518. */
  519. tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
  520. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
  521. tg3_flag(tp, EEPROM_WRITE_PROT))
  522. tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
  523. GRC_LCLCTRL_GPIO_OUTPUT1);
  524. /* Unused GPIO3 must be driven as output on 5752 because there
  525. * are no pull-up resistors on unused GPIO pins.
  526. */
  527. else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
  528. tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
  529. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
  530. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
  531. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
  532. tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
  533. if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
  534. tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
  535. /* Turn off the debug UART. */
  536. tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
  537. if (tg3_flag(tp, IS_NIC))
  538. /* Keep VMain power. */
  539. tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
  540. GRC_LCLCTRL_GPIO_OUTPUT0;
  541. }
  542. /* Force the chip into D0. */
  543. tg3_set_power_state_0(tp);
  544. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
  545. tp->phy_flags |= TG3_PHYFLG_IS_FET;
  546. /* A few boards don't want Ethernet@WireSpeed phy feature */
  547. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
  548. (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
  549. (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
  550. (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
  551. (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
  552. (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
  553. tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
  554. if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
  555. GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
  556. tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
  557. if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
  558. tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
  559. if (tg3_flag(tp, 5705_PLUS) &&
  560. !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
  561. GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
  562. GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
  563. !tg3_flag(tp, 57765_PLUS)) {
  564. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
  565. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
  566. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
  567. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
  568. if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
  569. tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
  570. tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
  571. if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
  572. tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
  573. } else
  574. tp->phy_flags |= TG3_PHYFLG_BER_BUG;
  575. }
  576. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
  577. GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
  578. tp->phy_otp = tg3_read_otp_phycfg(tp);
  579. if (tp->phy_otp == 0)
  580. tp->phy_otp = TG3_OTP_DEFAULT;
  581. }
  582. if (tg3_flag(tp, CPMU_PRESENT))
  583. tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
  584. else
  585. tp->mi_mode = MAC_MI_MODE_BASE;
  586. tp->coalesce_mode = 0;
  587. if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
  588. GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
  589. tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
  590. /* Set these bits to enable statistics workaround. */
  591. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
  592. tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
  593. tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
  594. tp->coalesce_mode |= HOSTCC_MODE_ATTN;
  595. tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
  596. }
  597. tg3_mdio_init(tp);
  598. /* Initialize data/descriptor byte/word swapping. */
  599. val = tr32(GRC_MODE);
  600. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
  601. val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
  602. GRC_MODE_WORD_SWAP_B2HRX_DATA |
  603. GRC_MODE_B2HRX_ENABLE |
  604. GRC_MODE_HTX2B_ENABLE |
  605. GRC_MODE_HOST_STACKUP);
  606. else
  607. val &= GRC_MODE_HOST_STACKUP;
  608. tw32(GRC_MODE, val | tp->grc_mode);
  609. tg3_switch_clocks(tp);
  610. /* Clear this out for sanity. */
  611. tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
  612. pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
  613. &pci_state_reg);
  614. if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
  615. !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
  616. u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
  617. if (chiprevid == CHIPREV_ID_5701_A0 ||
  618. chiprevid == CHIPREV_ID_5701_B0 ||
  619. chiprevid == CHIPREV_ID_5701_B2 ||
  620. chiprevid == CHIPREV_ID_5701_B5) {
  621. void *sram_base;
  622. /* Write some dummy words into the SRAM status block
  623. * area, see if it reads back correctly. If the return
  624. * value is bad, force enable the PCIX workaround.
  625. */
  626. sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
  627. writel(0x00000000, sram_base);
  628. writel(0x00000000, sram_base + 4);
  629. writel(0xffffffff, sram_base + 4);
  630. if (readl(sram_base) != 0x00000000)
  631. tg3_flag_set(tp, PCIX_TARGET_HWBUG);
  632. }
  633. }
  634. udelay(50);
  635. /* FIXME: do we need nvram access? */
  636. /// tg3_nvram_init(tp);
  637. grc_misc_cfg = tr32(GRC_MISC_CFG);
  638. grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
  639. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
  640. (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
  641. grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
  642. tg3_flag_set(tp, IS_5788);
  643. if (!tg3_flag(tp, IS_5788) &&
  644. GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
  645. tg3_flag_set(tp, TAGGED_STATUS);
  646. if (tg3_flag(tp, TAGGED_STATUS)) {
  647. tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
  648. HOSTCC_MODE_CLRTICK_TXBD);
  649. tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
  650. pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
  651. tp->misc_host_ctrl);
  652. }
  653. /* Preserve the APE MAC_MODE bits */
  654. if (tg3_flag(tp, ENABLE_APE))
  655. tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
  656. else
  657. tp->mac_mode = TG3_DEF_MAC_MODE;
  658. /* these are limited to 10/100 only */
  659. if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
  660. (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
  661. (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
  662. tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
  663. (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
  664. tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
  665. tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
  666. (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
  667. (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
  668. tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
  669. tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
  670. tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
  671. tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
  672. tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
  673. (tp->phy_flags & TG3_PHYFLG_IS_FET))
  674. tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
  675. err = tg3_phy_probe(tp);
  676. if (err) {
  677. DBGC(&tp->pdev->dev, "phy probe failed, err: %s\n", strerror(err));
  678. /* ... but do not return immediately ... */
  679. }
  680. if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
  681. tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
  682. } else {
  683. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
  684. tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
  685. else
  686. tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
  687. }
  688. /* For all SERDES we poll the MAC status register. */
  689. if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
  690. tg3_flag_set(tp, POLL_SERDES);
  691. else
  692. tg3_flag_clear(tp, POLL_SERDES);
  693. /* Increment the rx prod index on the rx std ring by at most
  694. * 8 for these chips to workaround hw errata.
  695. */
  696. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
  697. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
  698. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
  699. tp->rx_std_max_post = 8;
  700. return err;
  701. }
  702. void tg3_init_bufmgr_config(struct tg3 *tp)
  703. { DBGP("%s\n", __func__);
  704. if (tg3_flag(tp, 57765_PLUS)) {
  705. tp->bufmgr_config.mbuf_read_dma_low_water =
  706. DEFAULT_MB_RDMA_LOW_WATER_5705;
  707. tp->bufmgr_config.mbuf_mac_rx_low_water =
  708. DEFAULT_MB_MACRX_LOW_WATER_57765;
  709. tp->bufmgr_config.mbuf_high_water =
  710. DEFAULT_MB_HIGH_WATER_57765;
  711. tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
  712. DEFAULT_MB_RDMA_LOW_WATER_5705;
  713. tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
  714. DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
  715. tp->bufmgr_config.mbuf_high_water_jumbo =
  716. DEFAULT_MB_HIGH_WATER_JUMBO_57765;
  717. } else if (tg3_flag(tp, 5705_PLUS)) {
  718. tp->bufmgr_config.mbuf_read_dma_low_water =
  719. DEFAULT_MB_RDMA_LOW_WATER_5705;
  720. tp->bufmgr_config.mbuf_mac_rx_low_water =
  721. DEFAULT_MB_MACRX_LOW_WATER_5705;
  722. tp->bufmgr_config.mbuf_high_water =
  723. DEFAULT_MB_HIGH_WATER_5705;
  724. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
  725. tp->bufmgr_config.mbuf_mac_rx_low_water =
  726. DEFAULT_MB_MACRX_LOW_WATER_5906;
  727. tp->bufmgr_config.mbuf_high_water =
  728. DEFAULT_MB_HIGH_WATER_5906;
  729. }
  730. tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
  731. DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
  732. tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
  733. DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
  734. tp->bufmgr_config.mbuf_high_water_jumbo =
  735. DEFAULT_MB_HIGH_WATER_JUMBO_5780;
  736. } else {
  737. tp->bufmgr_config.mbuf_read_dma_low_water =
  738. DEFAULT_MB_RDMA_LOW_WATER;
  739. tp->bufmgr_config.mbuf_mac_rx_low_water =
  740. DEFAULT_MB_MACRX_LOW_WATER;
  741. tp->bufmgr_config.mbuf_high_water =
  742. DEFAULT_MB_HIGH_WATER;
  743. tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
  744. DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
  745. tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
  746. DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
  747. tp->bufmgr_config.mbuf_high_water_jumbo =
  748. DEFAULT_MB_HIGH_WATER_JUMBO;
  749. }
  750. tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
  751. tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
  752. }
  753. #define TG3_FW_EVENT_TIMEOUT_USEC 2500
  754. void tg3_wait_for_event_ack(struct tg3 *tp)
  755. { DBGP("%s\n", __func__);
  756. int i;
  757. for (i = 0; i < TG3_FW_EVENT_TIMEOUT_USEC / 10; i++) {
  758. if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
  759. break;
  760. udelay(10);
  761. }
  762. }
  763. void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
  764. { DBGP("%s\n", __func__);
  765. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
  766. (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
  767. return;
  768. pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
  769. pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
  770. /* Always leave this as zero. */
  771. pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
  772. }
  773. static void tg3_stop_fw(struct tg3 *tp)
  774. { DBGP("%s\n", __func__);
  775. if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
  776. /* Wait for RX cpu to ACK the previous event. */
  777. tg3_wait_for_event_ack(tp);
  778. tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
  779. tg3_generate_fw_event(tp);
  780. /* Wait for RX cpu to ACK this event. */
  781. tg3_wait_for_event_ack(tp);
  782. }
  783. }
  784. static void tg3_write_sig_pre_reset(struct tg3 *tp)
  785. { DBGP("%s\n", __func__);
  786. tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
  787. NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
  788. }
  789. void tg3_disable_ints(struct tg3 *tp)
  790. { DBGP("%s\n", __func__);
  791. tw32(TG3PCI_MISC_HOST_CTRL,
  792. (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
  793. tw32_mailbox_f(tp->int_mbox, 0x00000001);
  794. }
  795. void tg3_enable_ints(struct tg3 *tp)
  796. { DBGP("%s\n", __func__);
  797. tw32(TG3PCI_MISC_HOST_CTRL,
  798. (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
  799. tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
  800. tw32_mailbox_f(tp->int_mbox, tp->last_tag << 24);
  801. /* Force an initial interrupt */
  802. if (!tg3_flag(tp, TAGGED_STATUS) &&
  803. (tp->hw_status->status & SD_STATUS_UPDATED))
  804. tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
  805. else
  806. tw32(HOSTCC_MODE, tp->coal_now);
  807. }
  808. #define MAX_WAIT_CNT 1000
  809. /* To stop a block, clear the enable bit and poll till it clears. */
  810. static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit)
  811. { DBGP("%s\n", __func__);
  812. unsigned int i;
  813. u32 val;
  814. if (tg3_flag(tp, 5705_PLUS)) {
  815. switch (ofs) {
  816. case RCVLSC_MODE:
  817. case DMAC_MODE:
  818. case MBFREE_MODE:
  819. case BUFMGR_MODE:
  820. case MEMARB_MODE:
  821. /* We can't enable/disable these bits of the
  822. * 5705/5750, just say success.
  823. */
  824. return 0;
  825. default:
  826. break;
  827. }
  828. }
  829. val = tr32(ofs);
  830. val &= ~enable_bit;
  831. tw32_f(ofs, val);
  832. for (i = 0; i < MAX_WAIT_CNT; i++) {
  833. udelay(100);
  834. val = tr32(ofs);
  835. if ((val & enable_bit) == 0)
  836. break;
  837. }
  838. if (i == MAX_WAIT_CNT) {
  839. DBGC(&tp->pdev->dev,
  840. "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
  841. ofs, enable_bit);
  842. return -ENODEV;
  843. }
  844. return 0;
  845. }
  846. static int tg3_abort_hw(struct tg3 *tp)
  847. { DBGP("%s\n", __func__);
  848. int i, err;
  849. tg3_disable_ints(tp);
  850. tp->rx_mode &= ~RX_MODE_ENABLE;
  851. tw32_f(MAC_RX_MODE, tp->rx_mode);
  852. udelay(10);
  853. err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE);
  854. err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE);
  855. err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE);
  856. err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE);
  857. err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE);
  858. err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE);
  859. err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE);
  860. err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE);
  861. err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
  862. err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE);
  863. err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
  864. err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE);
  865. err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE);
  866. tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
  867. tw32_f(MAC_MODE, tp->mac_mode);
  868. udelay(40);
  869. tp->tx_mode &= ~TX_MODE_ENABLE;
  870. tw32_f(MAC_TX_MODE, tp->tx_mode);
  871. for (i = 0; i < MAX_WAIT_CNT; i++) {
  872. udelay(100);
  873. if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
  874. break;
  875. }
  876. if (i >= MAX_WAIT_CNT) {
  877. DBGC(&tp->pdev->dev,
  878. "%s timed out, TX_MODE_ENABLE will not clear "
  879. "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
  880. err |= -ENODEV;
  881. }
  882. err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE);
  883. err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE);
  884. err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE);
  885. tw32(FTQ_RESET, 0xffffffff);
  886. tw32(FTQ_RESET, 0x00000000);
  887. err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE);
  888. err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE);
  889. if (tp->hw_status)
  890. memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
  891. return err;
  892. }
  893. void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
  894. { DBGP("%s\n", __func__);
  895. u32 addr_high, addr_low;
  896. int i;
  897. addr_high = ((tp->dev->ll_addr[0] << 8) |
  898. tp->dev->ll_addr[1]);
  899. addr_low = ((tp->dev->ll_addr[2] << 24) |
  900. (tp->dev->ll_addr[3] << 16) |
  901. (tp->dev->ll_addr[4] << 8) |
  902. (tp->dev->ll_addr[5] << 0));
  903. for (i = 0; i < 4; i++) {
  904. if (i == 1 && skip_mac_1)
  905. continue;
  906. tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
  907. tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
  908. }
  909. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
  910. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
  911. for (i = 0; i < 12; i++) {
  912. tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
  913. tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
  914. }
  915. }
  916. addr_high = (tp->dev->ll_addr[0] +
  917. tp->dev->ll_addr[1] +
  918. tp->dev->ll_addr[2] +
  919. tp->dev->ll_addr[3] +
  920. tp->dev->ll_addr[4] +
  921. tp->dev->ll_addr[5]) &
  922. TX_BACKOFF_SEED_MASK;
  923. tw32(MAC_TX_BACKOFF_SEED, addr_high);
  924. }
  925. /* Save PCI command register before chip reset */
  926. static void tg3_save_pci_state(struct tg3 *tp)
  927. { DBGP("%s\n", __func__);
  928. pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
  929. }
  930. /* Restore PCI state after chip reset */
  931. static void tg3_restore_pci_state(struct tg3 *tp)
  932. { DBGP("%s\n", __func__);
  933. u32 val;
  934. /* Re-enable indirect register accesses. */
  935. pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
  936. tp->misc_host_ctrl);
  937. /* Set MAX PCI retry to zero. */
  938. val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
  939. if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
  940. tg3_flag(tp, PCIX_MODE))
  941. val |= PCISTATE_RETRY_SAME_DMA;
  942. pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
  943. pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
  944. if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
  945. pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
  946. tp->pci_cacheline_sz);
  947. pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
  948. tp->pci_lat_timer);
  949. }
  950. /* Make sure PCI-X relaxed ordering bit is clear. */
  951. if (tg3_flag(tp, PCIX_MODE)) {
  952. u16 pcix_cmd;
  953. pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
  954. &pcix_cmd);
  955. pcix_cmd &= ~PCI_X_CMD_ERO;
  956. pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
  957. pcix_cmd);
  958. }
  959. }
  960. static int tg3_poll_fw(struct tg3 *tp)
  961. { DBGP("%s\n", __func__);
  962. int i;
  963. u32 val;
  964. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
  965. /* Wait up to 20ms for init done. */
  966. for (i = 0; i < 200; i++) {
  967. if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
  968. return 0;
  969. udelay(100);
  970. }
  971. return -ENODEV;
  972. }
  973. /* Wait for firmware initialization to complete. */
  974. for (i = 0; i < 100000; i++) {
  975. tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
  976. if (val == (u32)~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
  977. break;
  978. udelay(10);
  979. }
  980. /* Chip might not be fitted with firmware. Some Sun onboard
  981. * parts are configured like that. So don't signal the timeout
  982. * of the above loop as an error, but do report the lack of
  983. * running firmware once.
  984. */
  985. if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
  986. tg3_flag_set(tp, NO_FWARE_REPORTED);
  987. DBGC(tp->dev, "No firmware running\n");
  988. }
  989. if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
  990. /* The 57765 A0 needs a little more
  991. * time to do some important work.
  992. */
  993. mdelay(10);
  994. }
  995. return 0;
  996. }
  997. static int tg3_nvram_lock(struct tg3 *tp)
  998. { DBGP("%s\n", __func__);
  999. if (tg3_flag(tp, NVRAM)) {
  1000. int i;
  1001. if (tp->nvram_lock_cnt == 0) {
  1002. tw32(NVRAM_SWARB, SWARB_REQ_SET1);
  1003. for (i = 0; i < 8000; i++) {
  1004. if (tr32(NVRAM_SWARB) & SWARB_GNT1)
  1005. break;
  1006. udelay(20);
  1007. }
  1008. if (i == 8000) {
  1009. tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
  1010. return -ENODEV;
  1011. }
  1012. }
  1013. tp->nvram_lock_cnt++;
  1014. }
  1015. return 0;
  1016. }
  1017. static void tg3_nvram_unlock(struct tg3 *tp)
  1018. { DBGP("%s\n", __func__);
  1019. if (tg3_flag(tp, NVRAM)) {
  1020. if (tp->nvram_lock_cnt > 0)
  1021. tp->nvram_lock_cnt--;
  1022. if (tp->nvram_lock_cnt == 0)
  1023. tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
  1024. }
  1025. }
  1026. static int tg3_chip_reset(struct tg3 *tp)
  1027. { DBGP("%s\n", __func__);
  1028. u32 val;
  1029. int err;
  1030. tg3_nvram_lock(tp);
  1031. /* No matching tg3_nvram_unlock() after this because
  1032. * chip reset below will undo the nvram lock.
  1033. */
  1034. tp->nvram_lock_cnt = 0;
  1035. /* GRC_MISC_CFG core clock reset will clear the memory
  1036. * enable bit in PCI register 4 and the MSI enable bit
  1037. * on some chips, so we save relevant registers here.
  1038. */
  1039. tg3_save_pci_state(tp);
  1040. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
  1041. tg3_flag(tp, 5755_PLUS))
  1042. tw32(GRC_FASTBOOT_PC, 0);
  1043. #if 0
  1044. /*
  1045. * We must avoid the readl() that normally takes place.
  1046. * It locks machines, causes machine checks, and other
  1047. * fun things. So, temporarily disable the 5701
  1048. * hardware workaround, while we do the reset.
  1049. */
  1050. write_op = tp->write32;
  1051. if (write_op == tg3_write_flush_reg32)
  1052. tp->write32 = tg3_write32;
  1053. #endif
  1054. /* Prevent the irq handler from reading or writing PCI registers
  1055. * during chip reset when the memory enable bit in the PCI command
  1056. * register may be cleared. The chip does not generate interrupt
  1057. * at this time, but the irq handler may still be called due to irq
  1058. * sharing or irqpoll.
  1059. */
  1060. tg3_flag_set(tp, CHIP_RESETTING);
  1061. if (tp->hw_status) {
  1062. tp->hw_status->status = 0;
  1063. tp->hw_status->status_tag = 0;
  1064. }
  1065. tp->last_tag = 0;
  1066. tp->last_irq_tag = 0;
  1067. mb();
  1068. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
  1069. val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
  1070. tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
  1071. }
  1072. /* do the reset */
  1073. val = GRC_MISC_CFG_CORECLK_RESET;
  1074. if (tg3_flag(tp, PCI_EXPRESS)) {
  1075. /* Force PCIe 1.0a mode */
  1076. if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
  1077. !tg3_flag(tp, 57765_PLUS) &&
  1078. tr32(TG3_PCIE_PHY_TSTCTL) ==
  1079. (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
  1080. tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
  1081. if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
  1082. tw32(GRC_MISC_CFG, (1 << 29));
  1083. val |= (1 << 29);
  1084. }
  1085. }
  1086. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
  1087. tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
  1088. tw32(GRC_VCPU_EXT_CTRL,
  1089. tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
  1090. }
  1091. /* Manage gphy power for all CPMU absent PCIe devices. */
  1092. if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
  1093. val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
  1094. tw32(GRC_MISC_CFG, val);
  1095. /* Unfortunately, we have to delay before the PCI read back.
  1096. * Some 575X chips even will not respond to a PCI cfg access
  1097. * when the reset command is given to the chip.
  1098. *
  1099. * How do these hardware designers expect things to work
  1100. * properly if the PCI write is posted for a long period
  1101. * of time? It is always necessary to have some method by
  1102. * which a register read back can occur to push the write
  1103. * out which does the reset.
  1104. *
  1105. * For most tg3 variants the trick below was working.
  1106. * Ho hum...
  1107. */
  1108. udelay(120);
  1109. /* Flush PCI posted writes. The normal MMIO registers
  1110. * are inaccessible at this time so this is the only
  1111. * way to make this reliably (actually, this is no longer
  1112. * the case, see above). I tried to use indirect
  1113. * register read/write but this upset some 5701 variants.
  1114. */
  1115. pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
  1116. udelay(120);
  1117. if (tg3_flag(tp, PCI_EXPRESS) && tp->pcie_cap) {
  1118. u16 val16;
  1119. if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
  1120. int i;
  1121. u32 cfg_val;
  1122. /* Wait for link training to complete. */
  1123. for (i = 0; i < 5000; i++)
  1124. udelay(100);
  1125. pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
  1126. pci_write_config_dword(tp->pdev, 0xc4,
  1127. cfg_val | (1 << 15));
  1128. }
  1129. /* Clear the "no snoop" and "relaxed ordering" bits. */
  1130. pci_read_config_word(tp->pdev,
  1131. tp->pcie_cap + PCI_EXP_DEVCTL,
  1132. &val16);
  1133. val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
  1134. PCI_EXP_DEVCTL_NOSNOOP_EN);
  1135. /*
  1136. * Older PCIe devices only support the 128 byte
  1137. * MPS setting. Enforce the restriction.
  1138. */
  1139. if (!tg3_flag(tp, CPMU_PRESENT))
  1140. val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
  1141. pci_write_config_word(tp->pdev,
  1142. tp->pcie_cap + PCI_EXP_DEVCTL,
  1143. val16);
  1144. /* Clear error status */
  1145. pci_write_config_word(tp->pdev,
  1146. tp->pcie_cap + PCI_EXP_DEVSTA,
  1147. PCI_EXP_DEVSTA_CED |
  1148. PCI_EXP_DEVSTA_NFED |
  1149. PCI_EXP_DEVSTA_FED |
  1150. PCI_EXP_DEVSTA_URD);
  1151. }
  1152. tg3_restore_pci_state(tp);
  1153. tg3_flag_clear(tp, CHIP_RESETTING);
  1154. tg3_flag_clear(tp, ERROR_PROCESSED);
  1155. val = 0;
  1156. if (tg3_flag(tp, 5780_CLASS))
  1157. val = tr32(MEMARB_MODE);
  1158. tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
  1159. if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
  1160. tg3_stop_fw(tp);
  1161. tw32(0x5000, 0x400);
  1162. }
  1163. tw32(GRC_MODE, tp->grc_mode);
  1164. if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
  1165. val = tr32(0xc4);
  1166. tw32(0xc4, val | (1 << 15));
  1167. }
  1168. if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
  1169. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
  1170. tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
  1171. if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
  1172. tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
  1173. tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
  1174. }
  1175. if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
  1176. tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
  1177. val = tp->mac_mode;
  1178. } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
  1179. tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
  1180. val = tp->mac_mode;
  1181. } else
  1182. val = 0;
  1183. tw32_f(MAC_MODE, val);
  1184. udelay(40);
  1185. err = tg3_poll_fw(tp);
  1186. if (err)
  1187. return err;
  1188. if (tg3_flag(tp, PCI_EXPRESS) &&
  1189. tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
  1190. GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
  1191. !tg3_flag(tp, 57765_PLUS)) {
  1192. val = tr32(0x7c00);
  1193. tw32(0x7c00, val | (1 << 25));
  1194. }
  1195. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
  1196. val = tr32(TG3_CPMU_CLCK_ORIDE);
  1197. tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
  1198. }
  1199. if (tg3_flag(tp, CPMU_PRESENT)) {
  1200. tw32(TG3_CPMU_D0_CLCK_POLICY, 0);
  1201. val = tr32(TG3_CPMU_CLCK_ORIDE_EN);
  1202. tw32(TG3_CPMU_CLCK_ORIDE_EN,
  1203. val | CPMU_CLCK_ORIDE_MAC_CLCK_ORIDE_EN);
  1204. }
  1205. return 0;
  1206. }
  1207. int tg3_halt(struct tg3 *tp)
  1208. { DBGP("%s\n", __func__);
  1209. int err;
  1210. tg3_stop_fw(tp);
  1211. tg3_write_sig_pre_reset(tp);
  1212. tg3_abort_hw(tp);
  1213. err = tg3_chip_reset(tp);
  1214. __tg3_set_mac_addr(tp, 0);
  1215. if (err)
  1216. return err;
  1217. return 0;
  1218. }
  1219. static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
  1220. u32 offset, u32 *val)
  1221. { DBGP("%s\n", __func__);
  1222. u32 tmp;
  1223. int i;
  1224. if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
  1225. return -EINVAL;
  1226. tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
  1227. EEPROM_ADDR_DEVID_MASK |
  1228. EEPROM_ADDR_READ);
  1229. tw32(GRC_EEPROM_ADDR,
  1230. tmp |
  1231. (0 << EEPROM_ADDR_DEVID_SHIFT) |
  1232. ((offset << EEPROM_ADDR_ADDR_SHIFT) &
  1233. EEPROM_ADDR_ADDR_MASK) |
  1234. EEPROM_ADDR_READ | EEPROM_ADDR_START);
  1235. for (i = 0; i < 1000; i++) {
  1236. tmp = tr32(GRC_EEPROM_ADDR);
  1237. if (tmp & EEPROM_ADDR_COMPLETE)
  1238. break;
  1239. mdelay(1);
  1240. }
  1241. if (!(tmp & EEPROM_ADDR_COMPLETE))
  1242. return -EBUSY;
  1243. tmp = tr32(GRC_EEPROM_DATA);
  1244. /*
  1245. * The data will always be opposite the native endian
  1246. * format. Perform a blind byteswap to compensate.
  1247. */
  1248. *val = bswap_32(tmp);
  1249. return 0;
  1250. }
  1251. static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
  1252. { DBGP("%s\n", __func__);
  1253. if (tg3_flag(tp, NVRAM) &&
  1254. tg3_flag(tp, NVRAM_BUFFERED) &&
  1255. tg3_flag(tp, FLASH) &&
  1256. !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
  1257. (tp->nvram_jedecnum == JEDEC_ATMEL))
  1258. addr = ((addr / tp->nvram_pagesize) <<
  1259. ATMEL_AT45DB0X1B_PAGE_POS) +
  1260. (addr % tp->nvram_pagesize);
  1261. return addr;
  1262. }
  1263. static void tg3_enable_nvram_access(struct tg3 *tp)
  1264. { DBGP("%s\n", __func__);
  1265. if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
  1266. u32 nvaccess = tr32(NVRAM_ACCESS);
  1267. tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
  1268. }
  1269. }
  1270. static void tg3_disable_nvram_access(struct tg3 *tp)
  1271. { DBGP("%s\n", __func__);
  1272. if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
  1273. u32 nvaccess = tr32(NVRAM_ACCESS);
  1274. tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
  1275. }
  1276. }
  1277. #define NVRAM_CMD_TIMEOUT 10000
  1278. static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
  1279. { DBGP("%s\n", __func__);
  1280. int i;
  1281. tw32(NVRAM_CMD, nvram_cmd);
  1282. for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
  1283. udelay(10);
  1284. if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
  1285. udelay(10);
  1286. break;
  1287. }
  1288. }
  1289. if (i == NVRAM_CMD_TIMEOUT)
  1290. return -EBUSY;
  1291. return 0;
  1292. }
  1293. /* NOTE: Data read in from NVRAM is byteswapped according to
  1294. * the byteswapping settings for all other register accesses.
  1295. * tg3 devices are BE devices, so on a BE machine, the data
  1296. * returned will be exactly as it is seen in NVRAM. On a LE
  1297. * machine, the 32-bit value will be byteswapped.
  1298. */
  1299. static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
  1300. { DBGP("%s\n", __func__);
  1301. int ret;
  1302. if (!tg3_flag(tp, NVRAM))
  1303. return tg3_nvram_read_using_eeprom(tp, offset, val);
  1304. offset = tg3_nvram_phys_addr(tp, offset);
  1305. if (offset > NVRAM_ADDR_MSK)
  1306. return -EINVAL;
  1307. ret = tg3_nvram_lock(tp);
  1308. if (ret)
  1309. return ret;
  1310. tg3_enable_nvram_access(tp);
  1311. tw32(NVRAM_ADDR, offset);
  1312. ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
  1313. NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
  1314. if (ret == 0)
  1315. *val = tr32(NVRAM_RDDATA);
  1316. tg3_disable_nvram_access(tp);
  1317. tg3_nvram_unlock(tp);
  1318. return ret;
  1319. }
  1320. /* Ensures NVRAM data is in bytestream format. */
  1321. static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, u32 *val)
  1322. { DBGP("%s\n", __func__);
  1323. u32 v = 0;
  1324. int res = tg3_nvram_read(tp, offset, &v);
  1325. if (!res)
  1326. *val = cpu_to_be32(v);
  1327. return res;
  1328. }
  1329. int tg3_get_device_address(struct tg3 *tp)
  1330. { DBGP("%s\n", __func__);
  1331. struct net_device *dev = tp->dev;
  1332. u32 hi, lo, mac_offset;
  1333. int addr_ok = 0;
  1334. mac_offset = 0x7c;
  1335. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
  1336. tg3_flag(tp, 5780_CLASS)) {
  1337. if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
  1338. mac_offset = 0xcc;
  1339. if (tg3_nvram_lock(tp))
  1340. tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
  1341. else
  1342. tg3_nvram_unlock(tp);
  1343. } else if (tg3_flag(tp, 5717_PLUS)) {
  1344. if (PCI_FUNC(tp->pdev->busdevfn) & 1)
  1345. mac_offset = 0xcc;
  1346. if (PCI_FUNC(tp->pdev->busdevfn) > 1)
  1347. mac_offset += 0x18c;
  1348. } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
  1349. mac_offset = 0x10;
  1350. /* First try to get it from MAC address mailbox. */
  1351. tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
  1352. if ((hi >> 16) == 0x484b) {
  1353. dev->hw_addr[0] = (hi >> 8) & 0xff;
  1354. dev->hw_addr[1] = (hi >> 0) & 0xff;
  1355. tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
  1356. dev->hw_addr[2] = (lo >> 24) & 0xff;
  1357. dev->hw_addr[3] = (lo >> 16) & 0xff;
  1358. dev->hw_addr[4] = (lo >> 8) & 0xff;
  1359. dev->hw_addr[5] = (lo >> 0) & 0xff;
  1360. /* Some old bootcode may report a 0 MAC address in SRAM */
  1361. addr_ok = is_valid_ether_addr(&dev->hw_addr[0]);
  1362. }
  1363. if (!addr_ok) {
  1364. /* Next, try NVRAM. */
  1365. if (!tg3_flag(tp, NO_NVRAM) &&
  1366. !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
  1367. !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
  1368. memcpy(&dev->hw_addr[0], ((char *)&hi) + 2, 2);
  1369. memcpy(&dev->hw_addr[2], (char *)&lo, sizeof(lo));
  1370. }
  1371. /* Finally just fetch it out of the MAC control regs. */
  1372. else {
  1373. hi = tr32(MAC_ADDR_0_HIGH);
  1374. lo = tr32(MAC_ADDR_0_LOW);
  1375. dev->hw_addr[5] = lo & 0xff;
  1376. dev->hw_addr[4] = (lo >> 8) & 0xff;
  1377. dev->hw_addr[3] = (lo >> 16) & 0xff;
  1378. dev->hw_addr[2] = (lo >> 24) & 0xff;
  1379. dev->hw_addr[1] = hi & 0xff;
  1380. dev->hw_addr[0] = (hi >> 8) & 0xff;
  1381. }
  1382. }
  1383. if (!is_valid_ether_addr(&dev->hw_addr[0])) {
  1384. return -EINVAL;
  1385. }
  1386. return 0;
  1387. }
  1388. static void __tg3_set_rx_mode(struct net_device *dev)
  1389. { DBGP("%s\n", __func__);
  1390. struct tg3 *tp = netdev_priv(dev);
  1391. u32 rx_mode;
  1392. rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
  1393. RX_MODE_KEEP_VLAN_TAG);
  1394. rx_mode |= RX_MODE_KEEP_VLAN_TAG;
  1395. /* Accept all multicast. */
  1396. tw32(MAC_HASH_REG_0, 0xffffffff);
  1397. tw32(MAC_HASH_REG_1, 0xffffffff);
  1398. tw32(MAC_HASH_REG_2, 0xffffffff);
  1399. tw32(MAC_HASH_REG_3, 0xffffffff);
  1400. if (rx_mode != tp->rx_mode) {
  1401. tp->rx_mode = rx_mode;
  1402. tw32_f(MAC_RX_MODE, rx_mode);
  1403. udelay(10);
  1404. }
  1405. }
  1406. static void __tg3_set_coalesce(struct tg3 *tp)
  1407. { DBGP("%s\n", __func__);
  1408. tw32(HOSTCC_RXCOL_TICKS, 0);
  1409. tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
  1410. tw32(HOSTCC_RXMAX_FRAMES, 1);
  1411. /* FIXME: mix between TXMAX and RXMAX taken from legacy driver */
  1412. tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
  1413. tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
  1414. tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
  1415. if (!tg3_flag(tp, 5705_PLUS)) {
  1416. u32 val = DEFAULT_STAT_COAL_TICKS;
  1417. tw32(HOSTCC_RXCOAL_TICK_INT, DEFAULT_RXCOAL_TICK_INT);
  1418. tw32(HOSTCC_TXCOAL_TICK_INT, DEFAULT_TXCOAL_TICK_INT);
  1419. if (!netdev_link_ok(tp->dev))
  1420. val = 0;
  1421. tw32(HOSTCC_STAT_COAL_TICKS, val);
  1422. }
  1423. }
  1424. static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
  1425. dma_addr_t mapping, u32 maxlen_flags,
  1426. u32 nic_addr)
  1427. { DBGP("%s\n", __func__);
  1428. tg3_write_mem(tp,
  1429. (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
  1430. ((u64) mapping >> 32));
  1431. tg3_write_mem(tp,
  1432. (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
  1433. ((u64) mapping & 0xffffffff));
  1434. tg3_write_mem(tp,
  1435. (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
  1436. maxlen_flags);
  1437. if (!tg3_flag(tp, 5705_PLUS))
  1438. tg3_write_mem(tp,
  1439. (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
  1440. nic_addr);
  1441. }
  1442. static void tg3_rings_reset(struct tg3 *tp)
  1443. { DBGP("%s\n", __func__);
  1444. int i;
  1445. u32 txrcb, rxrcb, limit;
  1446. /* Disable all transmit rings but the first. */
  1447. if (!tg3_flag(tp, 5705_PLUS))
  1448. limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
  1449. else if (tg3_flag(tp, 5717_PLUS))
  1450. limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
  1451. else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
  1452. limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
  1453. else
  1454. limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
  1455. for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
  1456. txrcb < limit; txrcb += TG3_BDINFO_SIZE)
  1457. tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
  1458. BDINFO_FLAGS_DISABLED);
  1459. /* Disable all receive return rings but the first. */
  1460. if (tg3_flag(tp, 5717_PLUS))
  1461. limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
  1462. else if (!tg3_flag(tp, 5705_PLUS))
  1463. limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
  1464. else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
  1465. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
  1466. limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
  1467. else
  1468. limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
  1469. for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
  1470. rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
  1471. tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
  1472. BDINFO_FLAGS_DISABLED);
  1473. /* Disable interrupts */
  1474. tw32_mailbox_f(tp->int_mbox, 1);
  1475. tp->tx_prod = 0;
  1476. tp->tx_cons = 0;
  1477. tw32_mailbox(tp->prodmbox, 0);
  1478. tw32_rx_mbox(tp->consmbox, 0);
  1479. /* Make sure the NIC-based send BD rings are disabled. */
  1480. if (!tg3_flag(tp, 5705_PLUS)) {
  1481. u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
  1482. for (i = 0; i < 16; i++)
  1483. tw32_tx_mbox(mbox + i * 8, 0);
  1484. }
  1485. txrcb = NIC_SRAM_SEND_RCB;
  1486. rxrcb = NIC_SRAM_RCV_RET_RCB;
  1487. /* Clear status block in ram. */
  1488. memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
  1489. /* Set status block DMA address */
  1490. tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
  1491. ((u64) tp->status_mapping >> 32));
  1492. tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
  1493. ((u64) tp->status_mapping & 0xffffffff));
  1494. if (tp->tx_ring) {
  1495. tg3_set_bdinfo(tp, txrcb, tp->tx_desc_mapping,
  1496. (TG3_TX_RING_SIZE <<
  1497. BDINFO_FLAGS_MAXLEN_SHIFT),
  1498. NIC_SRAM_TX_BUFFER_DESC);
  1499. txrcb += TG3_BDINFO_SIZE;
  1500. }
  1501. /* FIXME: will TG3_RX_RET_MAX_SIZE_5705 work on all cards? */
  1502. if (tp->rx_rcb) {
  1503. tg3_set_bdinfo(tp, rxrcb, tp->rx_rcb_mapping,
  1504. TG3_RX_RET_MAX_SIZE_5705 <<
  1505. BDINFO_FLAGS_MAXLEN_SHIFT, 0);
  1506. rxrcb += TG3_BDINFO_SIZE;
  1507. }
  1508. }
  1509. static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
  1510. { DBGP("%s\n", __func__);
  1511. u32 val, bdcache_maxcnt;
  1512. if (!tg3_flag(tp, 5750_PLUS) ||
  1513. tg3_flag(tp, 5780_CLASS) ||
  1514. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
  1515. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
  1516. bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
  1517. else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
  1518. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
  1519. bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
  1520. else
  1521. bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
  1522. /* NOTE: legacy driver uses RX_PENDING / 8, we only use 4 descriptors
  1523. * for now, use / 4 so the result is > 0
  1524. */
  1525. val = TG3_DEF_RX_RING_PENDING / 4;
  1526. tw32(RCVBDI_STD_THRESH, val);
  1527. if (tg3_flag(tp, 57765_PLUS))
  1528. tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
  1529. }
  1530. static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
  1531. { DBGP("%s\n", __func__);
  1532. u32 val, rdmac_mode;
  1533. int i, err, limit;
  1534. struct tg3_rx_prodring_set *tpr = &tp->prodring;
  1535. tg3_stop_fw(tp);
  1536. tg3_write_sig_pre_reset(tp);
  1537. if (tg3_flag(tp, INIT_COMPLETE))
  1538. tg3_abort_hw(tp);
  1539. if (reset_phy)
  1540. tg3_phy_reset(tp);
  1541. err = tg3_chip_reset(tp);
  1542. if (err)
  1543. return err;
  1544. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
  1545. val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
  1546. val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
  1547. PCIE_PWR_MGMT_L1_THRESH_4MS;
  1548. tw32(PCIE_PWR_MGMT_THRESH, val);
  1549. val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
  1550. tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
  1551. tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
  1552. val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
  1553. tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
  1554. }
  1555. if (tg3_flag(tp, L1PLLPD_EN)) {
  1556. u32 grc_mode = tr32(GRC_MODE);
  1557. /* Access the lower 1K of PL PCIE block registers. */
  1558. val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
  1559. tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
  1560. val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
  1561. tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
  1562. val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
  1563. tw32(GRC_MODE, grc_mode);
  1564. }
  1565. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
  1566. if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
  1567. u32 grc_mode = tr32(GRC_MODE);
  1568. /* Access the lower 1K of PL PCIE block registers. */
  1569. val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
  1570. tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
  1571. val = tr32(TG3_PCIE_TLDLPL_PORT +
  1572. TG3_PCIE_PL_LO_PHYCTL5);
  1573. tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
  1574. val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
  1575. tw32(GRC_MODE, grc_mode);
  1576. }
  1577. if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
  1578. u32 grc_mode = tr32(GRC_MODE);
  1579. /* Access the lower 1K of DL PCIE block registers. */
  1580. val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
  1581. tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
  1582. val = tr32(TG3_PCIE_TLDLPL_PORT +
  1583. TG3_PCIE_DL_LO_FTSMAX);
  1584. val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
  1585. tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
  1586. val | TG3_PCIE_DL_LO_FTSMAX_VAL);
  1587. tw32(GRC_MODE, grc_mode);
  1588. }
  1589. val = tr32(TG3_CPMU_LSPD_10MB_CLK);
  1590. val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
  1591. val |= CPMU_LSPD_10MB_MACCLK_6_25;
  1592. tw32(TG3_CPMU_LSPD_10MB_CLK, val);
  1593. }
  1594. /* This works around an issue with Athlon chipsets on
  1595. * B3 tigon3 silicon. This bit has no effect on any
  1596. * other revision. But do not set this on PCI Express
  1597. * chips and don't even touch the clocks if the CPMU is present.
  1598. */
  1599. if (!tg3_flag(tp, CPMU_PRESENT)) {
  1600. if (!tg3_flag(tp, PCI_EXPRESS))
  1601. tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
  1602. tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
  1603. }
  1604. if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
  1605. tg3_flag(tp, PCIX_MODE)) {
  1606. val = tr32(TG3PCI_PCISTATE);
  1607. val |= PCISTATE_RETRY_SAME_DMA;
  1608. tw32(TG3PCI_PCISTATE, val);
  1609. }
  1610. if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
  1611. /* Enable some hw fixes. */
  1612. val = tr32(TG3PCI_MSI_DATA);
  1613. val |= (1 << 26) | (1 << 28) | (1 << 29);
  1614. tw32(TG3PCI_MSI_DATA, val);
  1615. }
  1616. /* Descriptor ring init may make accesses to the
  1617. * NIC SRAM area to setup the TX descriptors, so we
  1618. * can only do this after the hardware has been
  1619. * successfully reset.
  1620. */
  1621. err = tg3_init_rings(tp);
  1622. if (err)
  1623. return err;
  1624. if (tg3_flag(tp, 57765_PLUS)) {
  1625. val = tr32(TG3PCI_DMA_RW_CTRL) &
  1626. ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
  1627. if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
  1628. val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
  1629. if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
  1630. GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
  1631. val |= DMA_RWCTRL_TAGGED_STAT_WA;
  1632. tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
  1633. } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
  1634. GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
  1635. /* This value is determined during the probe time DMA
  1636. * engine test, tg3_test_dma.
  1637. */
  1638. tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
  1639. }
  1640. tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
  1641. GRC_MODE_4X_NIC_SEND_RINGS |
  1642. GRC_MODE_NO_TX_PHDR_CSUM |
  1643. GRC_MODE_NO_RX_PHDR_CSUM);
  1644. tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
  1645. tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
  1646. /* Pseudo-header checksum is done by hardware logic and not
  1647. * the offload processers, so make the chip do the pseudo-
  1648. * header checksums on receive. For transmit it is more
  1649. * convenient to do the pseudo-header checksum in software
  1650. * as Linux does that on transmit for us in all cases.
  1651. */
  1652. tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
  1653. tw32(GRC_MODE,
  1654. tp->grc_mode |
  1655. (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
  1656. /* Setup the timer prescalar register. Clock is always 66Mhz. */
  1657. val = tr32(GRC_MISC_CFG);
  1658. val &= ~0xff;
  1659. val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
  1660. tw32(GRC_MISC_CFG, val);
  1661. /* Initialize MBUF/DESC pool. */
  1662. if (tg3_flag(tp, 5750_PLUS)) {
  1663. /* Do nothing. */
  1664. } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
  1665. tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
  1666. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
  1667. tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
  1668. else
  1669. tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
  1670. tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
  1671. tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
  1672. }
  1673. tw32(BUFMGR_MB_RDMA_LOW_WATER,
  1674. tp->bufmgr_config.mbuf_read_dma_low_water);
  1675. tw32(BUFMGR_MB_MACRX_LOW_WATER,
  1676. tp->bufmgr_config.mbuf_mac_rx_low_water);
  1677. tw32(BUFMGR_MB_HIGH_WATER,
  1678. tp->bufmgr_config.mbuf_high_water);
  1679. tw32(BUFMGR_DMA_LOW_WATER,
  1680. tp->bufmgr_config.dma_low_water);
  1681. tw32(BUFMGR_DMA_HIGH_WATER,
  1682. tp->bufmgr_config.dma_high_water);
  1683. val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
  1684. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
  1685. val |= BUFMGR_MODE_NO_TX_UNDERRUN;
  1686. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
  1687. tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
  1688. tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
  1689. val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
  1690. tw32(BUFMGR_MODE, val);
  1691. for (i = 0; i < 2000; i++) {
  1692. if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
  1693. break;
  1694. udelay(10);
  1695. }
  1696. if (i >= 2000) {
  1697. DBGC(tp->dev, "%s cannot enable BUFMGR\n", __func__);
  1698. return -ENODEV;
  1699. }
  1700. if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
  1701. tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
  1702. tg3_setup_rxbd_thresholds(tp);
  1703. /* Initialize TG3_BDINFO's at:
  1704. * RCVDBDI_STD_BD: standard eth size rx ring
  1705. * RCVDBDI_JUMBO_BD: jumbo frame rx ring
  1706. * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
  1707. *
  1708. * like so:
  1709. * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
  1710. * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
  1711. * ring attribute flags
  1712. * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
  1713. *
  1714. * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
  1715. * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
  1716. *
  1717. * The size of each ring is fixed in the firmware, but the location is
  1718. * configurable.
  1719. */
  1720. tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
  1721. ((u64) tpr->rx_std_mapping >> 32));
  1722. tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
  1723. ((u64) tpr->rx_std_mapping & 0xffffffff));
  1724. if (!tg3_flag(tp, 5717_PLUS))
  1725. tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
  1726. NIC_SRAM_RX_BUFFER_DESC);
  1727. /* Disable the mini ring */
  1728. if (!tg3_flag(tp, 5705_PLUS))
  1729. tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
  1730. BDINFO_FLAGS_DISABLED);
  1731. val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
  1732. if (tg3_flag(tp, 57765_PLUS))
  1733. val |= (RX_STD_MAX_SIZE << 2);
  1734. tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
  1735. tpr->rx_std_prod_idx = 0;
  1736. /* std prod index is updated by tg3_refill_prod_ring() */
  1737. tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 0);
  1738. tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, 0);
  1739. tg3_rings_reset(tp);
  1740. __tg3_set_mac_addr(tp,0);
  1741. #define TG3_MAX_MTU 1522
  1742. /* MTU + ethernet header + FCS + optional VLAN tag */
  1743. tw32(MAC_RX_MTU_SIZE, TG3_MAX_MTU);
  1744. /* The slot time is changed by tg3_setup_phy if we
  1745. * run at gigabit with half duplex.
  1746. */
  1747. val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
  1748. (6 << TX_LENGTHS_IPG_SHIFT) |
  1749. (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
  1750. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
  1751. val |= tr32(MAC_TX_LENGTHS) &
  1752. (TX_LENGTHS_JMB_FRM_LEN_MSK |
  1753. TX_LENGTHS_CNT_DWN_VAL_MSK);
  1754. tw32(MAC_TX_LENGTHS, val);
  1755. /* Receive rules. */
  1756. tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
  1757. tw32(RCVLPC_CONFIG, 0x0181);
  1758. /* Calculate RDMAC_MODE setting early, we need it to determine
  1759. * the RCVLPC_STATE_ENABLE mask.
  1760. */
  1761. rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
  1762. RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
  1763. RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
  1764. RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
  1765. RDMAC_MODE_LNGREAD_ENAB);
  1766. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
  1767. rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
  1768. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
  1769. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
  1770. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
  1771. rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
  1772. RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
  1773. RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
  1774. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
  1775. tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
  1776. if (tg3_flag(tp, TSO_CAPABLE) &&
  1777. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
  1778. rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
  1779. } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
  1780. !tg3_flag(tp, IS_5788)) {
  1781. rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
  1782. }
  1783. }
  1784. if (tg3_flag(tp, PCI_EXPRESS))
  1785. rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
  1786. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
  1787. rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
  1788. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
  1789. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
  1790. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
  1791. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
  1792. tg3_flag(tp, 57765_PLUS)) {
  1793. val = tr32(TG3_RDMA_RSRVCTRL_REG);
  1794. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
  1795. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
  1796. val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
  1797. TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
  1798. TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
  1799. val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
  1800. TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
  1801. TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
  1802. }
  1803. tw32(TG3_RDMA_RSRVCTRL_REG,
  1804. val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
  1805. }
  1806. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
  1807. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
  1808. val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
  1809. tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
  1810. TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
  1811. TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
  1812. }
  1813. /* Receive/send statistics. */
  1814. if (tg3_flag(tp, 5750_PLUS)) {
  1815. val = tr32(RCVLPC_STATS_ENABLE);
  1816. val &= ~RCVLPC_STATSENAB_DACK_FIX;
  1817. tw32(RCVLPC_STATS_ENABLE, val);
  1818. } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
  1819. tg3_flag(tp, TSO_CAPABLE)) {
  1820. val = tr32(RCVLPC_STATS_ENABLE);
  1821. val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
  1822. tw32(RCVLPC_STATS_ENABLE, val);
  1823. } else {
  1824. tw32(RCVLPC_STATS_ENABLE, 0xffffff);
  1825. }
  1826. tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
  1827. tw32(SNDDATAI_STATSENAB, 0xffffff);
  1828. tw32(SNDDATAI_STATSCTRL,
  1829. (SNDDATAI_SCTRL_ENABLE |
  1830. SNDDATAI_SCTRL_FASTUPD));
  1831. /* Setup host coalescing engine. */
  1832. tw32(HOSTCC_MODE, 0);
  1833. for (i = 0; i < 2000; i++) {
  1834. if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
  1835. break;
  1836. udelay(10);
  1837. }
  1838. __tg3_set_coalesce(tp);
  1839. if (!tg3_flag(tp, 5705_PLUS)) {
  1840. /* Status/statistics block address. See tg3_timer,
  1841. * the tg3_periodic_fetch_stats call there, and
  1842. * tg3_get_stats to see how this works for 5705/5750 chips.
  1843. * NOTE: stats block removed for iPXE
  1844. */
  1845. tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
  1846. /* Clear statistics and status block memory areas */
  1847. for (i = NIC_SRAM_STATS_BLK;
  1848. i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
  1849. i += sizeof(u32)) {
  1850. tg3_write_mem(tp, i, 0);
  1851. udelay(40);
  1852. }
  1853. }
  1854. tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
  1855. tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
  1856. tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
  1857. if (!tg3_flag(tp, 5705_PLUS))
  1858. tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
  1859. if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
  1860. tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
  1861. /* reset to prevent losing 1st rx packet intermittently */
  1862. tw32_f(MAC_RX_MODE, RX_MODE_RESET);
  1863. udelay(10);
  1864. }
  1865. if (tg3_flag(tp, ENABLE_APE))
  1866. tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
  1867. else
  1868. tp->mac_mode = 0;
  1869. tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
  1870. MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
  1871. if (!tg3_flag(tp, 5705_PLUS) &&
  1872. !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
  1873. GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
  1874. tp->mac_mode |= MAC_MODE_LINK_POLARITY;
  1875. tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
  1876. udelay(40);
  1877. /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
  1878. * If TG3_FLAG_IS_NIC is zero, we should read the
  1879. * register to preserve the GPIO settings for LOMs. The GPIOs,
  1880. * whether used as inputs or outputs, are set by boot code after
  1881. * reset.
  1882. */
  1883. if (!tg3_flag(tp, IS_NIC)) {
  1884. u32 gpio_mask;
  1885. gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
  1886. GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
  1887. GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
  1888. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
  1889. gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
  1890. GRC_LCLCTRL_GPIO_OUTPUT3;
  1891. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
  1892. gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
  1893. tp->grc_local_ctrl &= ~gpio_mask;
  1894. tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
  1895. /* GPIO1 must be driven high for eeprom write protect */
  1896. if (tg3_flag(tp, EEPROM_WRITE_PROT))
  1897. tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
  1898. GRC_LCLCTRL_GPIO_OUTPUT1);
  1899. }
  1900. tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
  1901. udelay(100);
  1902. if (!tg3_flag(tp, 5705_PLUS)) {
  1903. tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
  1904. udelay(40);
  1905. }
  1906. val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
  1907. WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
  1908. WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
  1909. WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
  1910. WDMAC_MODE_LNGREAD_ENAB);
  1911. /* Enable host coalescing bug fix */
  1912. if (tg3_flag(tp, 5755_PLUS))
  1913. val |= WDMAC_MODE_STATUS_TAG_FIX;
  1914. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
  1915. val |= WDMAC_MODE_BURST_ALL_DATA;
  1916. tw32_f(WDMAC_MODE, val);
  1917. udelay(40);
  1918. if (tg3_flag(tp, PCIX_MODE)) {
  1919. u16 pcix_cmd;
  1920. pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
  1921. &pcix_cmd);
  1922. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
  1923. pcix_cmd &= ~PCI_X_CMD_MAX_READ;
  1924. pcix_cmd |= PCI_X_CMD_READ_2K;
  1925. } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
  1926. pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
  1927. pcix_cmd |= PCI_X_CMD_READ_2K;
  1928. }
  1929. pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
  1930. pcix_cmd);
  1931. }
  1932. tw32_f(RDMAC_MODE, rdmac_mode);
  1933. udelay(40);
  1934. tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
  1935. if (!tg3_flag(tp, 5705_PLUS))
  1936. tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
  1937. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
  1938. tw32(SNDDATAC_MODE,
  1939. SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
  1940. else
  1941. tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
  1942. tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
  1943. tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
  1944. val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
  1945. if (tg3_flag(tp, LRG_PROD_RING_CAP))
  1946. val |= RCVDBDI_MODE_LRG_RING_SZ;
  1947. tw32(RCVDBDI_MODE, val);
  1948. tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
  1949. val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
  1950. if (tg3_flag(tp, ENABLE_TSS))
  1951. val |= SNDBDI_MODE_MULTI_TXQ_EN;
  1952. tw32(SNDBDI_MODE, val);
  1953. tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
  1954. /* FIXME: 5701 firmware fix? */
  1955. #if 0
  1956. if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
  1957. err = tg3_load_5701_a0_firmware_fix(tp);
  1958. if (err)
  1959. return err;
  1960. }
  1961. #endif
  1962. tp->tx_mode = TX_MODE_ENABLE;
  1963. if (tg3_flag(tp, 5755_PLUS) ||
  1964. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
  1965. tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
  1966. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
  1967. val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
  1968. tp->tx_mode &= ~val;
  1969. tp->tx_mode |= tr32(MAC_TX_MODE) & val;
  1970. }
  1971. tw32_f(MAC_TX_MODE, tp->tx_mode);
  1972. udelay(100);
  1973. tp->rx_mode = RX_MODE_ENABLE;
  1974. tw32_f(MAC_RX_MODE, tp->rx_mode);
  1975. udelay(10);
  1976. tw32(MAC_LED_CTRL, tp->led_ctrl);
  1977. tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
  1978. if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
  1979. tw32_f(MAC_RX_MODE, RX_MODE_RESET);
  1980. udelay(10);
  1981. }
  1982. tw32_f(MAC_RX_MODE, tp->rx_mode);
  1983. udelay(10);
  1984. if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
  1985. if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
  1986. !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
  1987. /* Set drive transmission level to 1.2V */
  1988. /* only if the signal pre-emphasis bit is not set */
  1989. val = tr32(MAC_SERDES_CFG);
  1990. val &= 0xfffff000;
  1991. val |= 0x880;
  1992. tw32(MAC_SERDES_CFG, val);
  1993. }
  1994. if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
  1995. tw32(MAC_SERDES_CFG, 0x616000);
  1996. }
  1997. /* Prevent chip from dropping frames when flow control
  1998. * is enabled.
  1999. */
  2000. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
  2001. val = 1;
  2002. else
  2003. val = 2;
  2004. tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
  2005. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
  2006. (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
  2007. /* Use hardware link auto-negotiation */
  2008. tg3_flag_set(tp, HW_AUTONEG);
  2009. }
  2010. if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
  2011. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
  2012. u32 tmp;
  2013. tmp = tr32(SERDES_RX_CTRL);
  2014. tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
  2015. tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
  2016. tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
  2017. tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
  2018. }
  2019. err = tg3_setup_phy(tp, 0);
  2020. if (err)
  2021. return err;
  2022. if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
  2023. !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
  2024. u32 tmp;
  2025. /* Clear CRC stats. */
  2026. if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
  2027. tg3_writephy(tp, MII_TG3_TEST1,
  2028. tmp | MII_TG3_TEST1_CRC_EN);
  2029. tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
  2030. }
  2031. }
  2032. __tg3_set_rx_mode(tp->dev);
  2033. /* Initialize receive rules. */
  2034. tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
  2035. tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
  2036. tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
  2037. tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
  2038. if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
  2039. limit = 8;
  2040. else
  2041. limit = 16;
  2042. if (tg3_flag(tp, ENABLE_ASF))
  2043. limit -= 4;
  2044. switch (limit) {
  2045. case 16:
  2046. tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
  2047. case 15:
  2048. tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
  2049. case 14:
  2050. tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
  2051. case 13:
  2052. tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
  2053. case 12:
  2054. tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
  2055. case 11:
  2056. tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
  2057. case 10:
  2058. tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
  2059. case 9:
  2060. tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
  2061. case 8:
  2062. tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
  2063. case 7:
  2064. tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
  2065. case 6:
  2066. tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
  2067. case 5:
  2068. tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
  2069. case 4:
  2070. /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
  2071. case 3:
  2072. /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
  2073. case 2:
  2074. case 1:
  2075. default:
  2076. break;
  2077. }
  2078. return 0;
  2079. }
  2080. /* Called at device open time to get the chip ready for
  2081. * packet processing. Invoked with tp->lock held.
  2082. */
  2083. int tg3_init_hw(struct tg3 *tp, int reset_phy)
  2084. { DBGP("%s\n", __func__);
  2085. tg3_switch_clocks(tp);
  2086. tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
  2087. return tg3_reset_hw(tp, reset_phy);
  2088. }
  2089. void tg3_set_txd(struct tg3 *tp, int entry,
  2090. dma_addr_t mapping, int len, u32 flags)
  2091. { DBGP("%s\n", __func__);
  2092. struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
  2093. txd->addr_hi = ((u64) mapping >> 32);
  2094. txd->addr_lo = ((u64) mapping & 0xffffffff);
  2095. txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
  2096. txd->vlan_tag = 0;
  2097. }
  2098. int tg3_do_test_dma(struct tg3 *tp, u32 __unused *buf, dma_addr_t buf_dma, int size, int to_device)
  2099. { DBGP("%s\n", __func__);
  2100. struct tg3_internal_buffer_desc test_desc;
  2101. u32 sram_dma_descs;
  2102. int ret;
  2103. unsigned int i;
  2104. sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
  2105. tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
  2106. tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
  2107. tw32(RDMAC_STATUS, 0);
  2108. tw32(WDMAC_STATUS, 0);
  2109. tw32(BUFMGR_MODE, 0);
  2110. tw32(FTQ_RESET, 0);
  2111. test_desc.addr_hi = ((u64) buf_dma) >> 32;
  2112. test_desc.addr_lo = buf_dma & 0xffffffff;
  2113. test_desc.nic_mbuf = 0x00002100;
  2114. test_desc.len = size;
  2115. /*
  2116. * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
  2117. * the *second* time the tg3 driver was getting loaded after an
  2118. * initial scan.
  2119. *
  2120. * Broadcom tells me:
  2121. * ...the DMA engine is connected to the GRC block and a DMA
  2122. * reset may affect the GRC block in some unpredictable way...
  2123. * The behavior of resets to individual blocks has not been tested.
  2124. *
  2125. * Broadcom noted the GRC reset will also reset all sub-components.
  2126. */
  2127. if (to_device) {
  2128. test_desc.cqid_sqid = (13 << 8) | 2;
  2129. tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
  2130. udelay(40);
  2131. } else {
  2132. test_desc.cqid_sqid = (16 << 8) | 7;
  2133. tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
  2134. udelay(40);
  2135. }
  2136. test_desc.flags = 0x00000005;
  2137. for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
  2138. u32 val;
  2139. val = *(((u32 *)&test_desc) + i);
  2140. pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
  2141. sram_dma_descs + (i * sizeof(u32)));
  2142. pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
  2143. }
  2144. pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
  2145. if (to_device)
  2146. tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
  2147. else
  2148. tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
  2149. ret = -ENODEV;
  2150. for (i = 0; i < 40; i++) {
  2151. u32 val;
  2152. if (to_device)
  2153. val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
  2154. else
  2155. val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
  2156. if ((val & 0xffff) == sram_dma_descs) {
  2157. ret = 0;
  2158. break;
  2159. }
  2160. udelay(100);
  2161. }
  2162. return ret;
  2163. }