You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

tg3_hw.c 76KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674
  1. /*
  2. * tg3.c: Broadcom Tigon3 ethernet driver.
  3. *
  4. * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
  5. * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
  6. * Copyright (C) 2004 Sun Microsystems Inc.
  7. * Copyright (C) 2005-2011 Broadcom Corporation.
  8. *
  9. * Firmware is:
  10. * Derived from proprietary unpublished source code,
  11. * Copyright (C) 2000-2003 Broadcom Corporation.
  12. *
  13. * Permission is hereby granted for the distribution of this firmware
  14. * data in hexadecimal or equivalent format, provided this copyright
  15. * notice is accompanying it.
  16. */
  17. FILE_LICENCE ( GPL2_ONLY );
  18. #include <mii.h>
  19. #include <stdio.h>
  20. #include <errno.h>
  21. #include <unistd.h>
  22. #include <byteswap.h>
  23. #include <ipxe/pci.h>
  24. #include <ipxe/iobuf.h>
  25. #include <ipxe/timer.h>
  26. #include <ipxe/malloc.h>
  27. #include <ipxe/if_ether.h>
  28. #include <ipxe/ethernet.h>
  29. #include <ipxe/netdevice.h>
  30. #include "tg3.h"
  31. #define RESET_KIND_SHUTDOWN 0
  32. #define RESET_KIND_INIT 1
  33. #define RESET_KIND_SUSPEND 2
  34. #define TG3_DEF_MAC_MODE 0
  35. void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
  36. { DBGP("%s\n", __func__);
  37. pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
  38. pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
  39. }
  40. u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
  41. { DBGP("%s\n", __func__);
  42. u32 val;
  43. pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
  44. pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
  45. return val;
  46. }
  47. static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
  48. { DBGP("%s\n", __func__);
  49. return readl(tp->regs + off + GRCMBOX_BASE);
  50. }
  51. static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
  52. { DBGP("%s\n", __func__);
  53. writel(val, tp->regs + off + GRCMBOX_BASE);
  54. }
  55. void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
  56. { DBGP("%s\n", __func__);
  57. if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
  58. pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
  59. TG3_64BIT_REG_LOW, val);
  60. return;
  61. }
  62. if (off == TG3_RX_STD_PROD_IDX_REG) {
  63. pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
  64. TG3_64BIT_REG_LOW, val);
  65. return;
  66. }
  67. pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
  68. pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
  69. /* In indirect mode when disabling interrupts, we also need
  70. * to clear the interrupt bit in the GRC local ctrl register.
  71. */
  72. if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
  73. (val == 0x1)) {
  74. pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
  75. tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
  76. }
  77. }
  78. u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
  79. { DBGP("%s\n", __func__);
  80. u32 val;
  81. pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
  82. pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
  83. return val;
  84. }
  85. /* usec_wait specifies the wait time in usec when writing to certain registers
  86. * where it is unsafe to read back the register without some delay.
  87. * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
  88. * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
  89. */
  90. void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
  91. { DBGP("%s\n", __func__);
  92. tw32(off, val);
  93. if (usec_wait)
  94. udelay(usec_wait);
  95. tr32(off);
  96. /* Wait again after the read for the posted method to guarantee that
  97. * the wait time is met.
  98. */
  99. if (usec_wait)
  100. udelay(usec_wait);
  101. }
  102. /* stolen from legacy etherboot tg3 driver */
  103. void tg3_set_power_state_0(struct tg3 *tp)
  104. { DBGP("%s\n", __func__);
  105. uint16_t power_control;
  106. int pm = tp->pm_cap;
  107. /* Make sure register accesses (indirect or otherwise)
  108. * will function correctly.
  109. */
  110. pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
  111. pci_read_config_word(tp->pdev, pm + PCI_PM_CTRL, &power_control);
  112. power_control |= PCI_PM_CTRL_PME_STATUS;
  113. power_control &= ~(PCI_PM_CTRL_STATE_MASK);
  114. power_control |= 0;
  115. pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
  116. tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
  117. return;
  118. }
  119. void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
  120. { DBGP("%s\n", __func__);
  121. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
  122. (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
  123. *val = 0;
  124. return;
  125. }
  126. pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
  127. pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
  128. /* Always leave this as zero. */
  129. pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
  130. }
  131. #define PCI_VENDOR_ID_ARIMA 0x161f
  132. static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
  133. { DBGP("%s\n", __func__);
  134. u32 val;
  135. u16 pmcsr;
  136. /* On some early chips the SRAM cannot be accessed in D3hot state,
  137. * so need make sure we're in D0.
  138. */
  139. pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
  140. pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
  141. pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
  142. mdelay(1);
  143. /* Make sure register accesses (indirect or otherwise)
  144. * will function correctly.
  145. */
  146. pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
  147. tp->misc_host_ctrl);
  148. /* The memory arbiter has to be enabled in order for SRAM accesses
  149. * to succeed. Normally on powerup the tg3 chip firmware will make
  150. * sure it is enabled, but other entities such as system netboot
  151. * code might disable it.
  152. */
  153. val = tr32(MEMARB_MODE);
  154. tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
  155. tp->phy_id = TG3_PHY_ID_INVALID;
  156. tp->led_ctrl = LED_CTRL_MODE_PHY_1;
  157. /* Assume an onboard device by default. */
  158. tg3_flag_set(tp, EEPROM_WRITE_PROT);
  159. tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
  160. if (val == NIC_SRAM_DATA_SIG_MAGIC) {
  161. u32 nic_cfg, led_cfg;
  162. u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
  163. int eeprom_phy_serdes = 0;
  164. tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
  165. tp->nic_sram_data_cfg = nic_cfg;
  166. tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
  167. ver >>= NIC_SRAM_DATA_VER_SHIFT;
  168. if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
  169. GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
  170. GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
  171. (ver > 0) && (ver < 0x100))
  172. tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
  173. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
  174. tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
  175. if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
  176. NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
  177. eeprom_phy_serdes = 1;
  178. tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
  179. if (nic_phy_id != 0) {
  180. u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
  181. u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
  182. eeprom_phy_id = (id1 >> 16) << 10;
  183. eeprom_phy_id |= (id2 & 0xfc00) << 16;
  184. eeprom_phy_id |= (id2 & 0x03ff) << 0;
  185. } else
  186. eeprom_phy_id = 0;
  187. tp->phy_id = eeprom_phy_id;
  188. if (eeprom_phy_serdes) {
  189. if (!tg3_flag(tp, 5705_PLUS))
  190. tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
  191. else
  192. tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
  193. }
  194. if (tg3_flag(tp, 5750_PLUS))
  195. led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
  196. SHASTA_EXT_LED_MODE_MASK);
  197. else
  198. led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
  199. switch (led_cfg) {
  200. default:
  201. case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
  202. tp->led_ctrl = LED_CTRL_MODE_PHY_1;
  203. break;
  204. case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
  205. tp->led_ctrl = LED_CTRL_MODE_PHY_2;
  206. break;
  207. case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
  208. tp->led_ctrl = LED_CTRL_MODE_MAC;
  209. /* Default to PHY_1_MODE if 0 (MAC_MODE) is
  210. * read on some older 5700/5701 bootcode.
  211. */
  212. if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
  213. ASIC_REV_5700 ||
  214. GET_ASIC_REV(tp->pci_chip_rev_id) ==
  215. ASIC_REV_5701)
  216. tp->led_ctrl = LED_CTRL_MODE_PHY_1;
  217. break;
  218. case SHASTA_EXT_LED_SHARED:
  219. tp->led_ctrl = LED_CTRL_MODE_SHARED;
  220. if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
  221. tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
  222. tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
  223. LED_CTRL_MODE_PHY_2);
  224. break;
  225. case SHASTA_EXT_LED_MAC:
  226. tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
  227. break;
  228. case SHASTA_EXT_LED_COMBO:
  229. tp->led_ctrl = LED_CTRL_MODE_COMBO;
  230. if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
  231. tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
  232. LED_CTRL_MODE_PHY_2);
  233. break;
  234. }
  235. if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
  236. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
  237. tp->subsystem_vendor == PCI_VENDOR_ID_DELL)
  238. tp->led_ctrl = LED_CTRL_MODE_PHY_2;
  239. if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
  240. tp->led_ctrl = LED_CTRL_MODE_PHY_1;
  241. if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
  242. tg3_flag_set(tp, EEPROM_WRITE_PROT);
  243. if ((tp->subsystem_vendor ==
  244. PCI_VENDOR_ID_ARIMA) &&
  245. (tp->subsystem_device == 0x205a ||
  246. tp->subsystem_device == 0x2063))
  247. tg3_flag_clear(tp, EEPROM_WRITE_PROT);
  248. } else {
  249. tg3_flag_clear(tp, EEPROM_WRITE_PROT);
  250. tg3_flag_set(tp, IS_NIC);
  251. }
  252. if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
  253. tg3_flag_set(tp, ENABLE_ASF);
  254. if (tg3_flag(tp, 5750_PLUS))
  255. tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
  256. }
  257. if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
  258. tg3_flag(tp, ENABLE_ASF))
  259. tg3_flag_set(tp, ENABLE_APE);
  260. if (cfg2 & (1 << 17))
  261. tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
  262. /* serdes signal pre-emphasis in register 0x590 set by */
  263. /* bootcode if bit 18 is set */
  264. if (cfg2 & (1 << 18))
  265. tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
  266. if ((tg3_flag(tp, 57765_PLUS) ||
  267. (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
  268. GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
  269. (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
  270. tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
  271. if (tg3_flag(tp, PCI_EXPRESS) &&
  272. GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
  273. !tg3_flag(tp, 57765_PLUS)) {
  274. u32 cfg3;
  275. tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
  276. }
  277. if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
  278. tg3_flag_set(tp, RGMII_INBAND_DISABLE);
  279. if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
  280. tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
  281. if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
  282. tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
  283. }
  284. }
  285. static void tg3_switch_clocks(struct tg3 *tp)
  286. { DBGP("%s\n", __func__);
  287. u32 clock_ctrl;
  288. u32 orig_clock_ctrl;
  289. if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
  290. return;
  291. clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
  292. orig_clock_ctrl = clock_ctrl;
  293. clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
  294. CLOCK_CTRL_CLKRUN_OENABLE |
  295. 0x1f);
  296. tp->pci_clock_ctrl = clock_ctrl;
  297. if (tg3_flag(tp, 5705_PLUS)) {
  298. if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
  299. tw32_wait_f(TG3PCI_CLOCK_CTRL,
  300. clock_ctrl | CLOCK_CTRL_625_CORE, 40);
  301. }
  302. } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
  303. tw32_wait_f(TG3PCI_CLOCK_CTRL,
  304. clock_ctrl |
  305. (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
  306. 40);
  307. tw32_wait_f(TG3PCI_CLOCK_CTRL,
  308. clock_ctrl | (CLOCK_CTRL_ALTCLK),
  309. 40);
  310. }
  311. tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
  312. }
  313. int tg3_get_invariants(struct tg3 *tp)
  314. { DBGP("%s\n", __func__);
  315. u32 misc_ctrl_reg;
  316. u32 pci_state_reg, grc_misc_cfg;
  317. u32 val;
  318. u16 pci_cmd;
  319. int err;
  320. /* Force memory write invalidate off. If we leave it on,
  321. * then on 5700_BX chips we have to enable a workaround.
  322. * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
  323. * to match the cacheline size. The Broadcom driver have this
  324. * workaround but turns MWI off all the times so never uses
  325. * it. This seems to suggest that the workaround is insufficient.
  326. */
  327. pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
  328. pci_cmd &= ~PCI_COMMAND_INVALIDATE;
  329. pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
  330. /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
  331. * has the register indirect write enable bit set before
  332. * we try to access any of the MMIO registers. It is also
  333. * critical that the PCI-X hw workaround situation is decided
  334. * before that as well.
  335. */
  336. pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
  337. &misc_ctrl_reg);
  338. tp->pci_chip_rev_id = (misc_ctrl_reg >>
  339. MISC_HOST_CTRL_CHIPREV_SHIFT);
  340. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
  341. u32 prod_id_asic_rev;
  342. if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
  343. tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
  344. tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
  345. tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
  346. pci_read_config_dword(tp->pdev,
  347. TG3PCI_GEN2_PRODID_ASICREV,
  348. &prod_id_asic_rev);
  349. else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
  350. tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
  351. tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
  352. tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
  353. tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
  354. tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
  355. tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
  356. tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
  357. pci_read_config_dword(tp->pdev,
  358. TG3PCI_GEN15_PRODID_ASICREV,
  359. &prod_id_asic_rev);
  360. else
  361. pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
  362. &prod_id_asic_rev);
  363. tp->pci_chip_rev_id = prod_id_asic_rev;
  364. }
  365. /* Wrong chip ID in 5752 A0. This code can be removed later
  366. * as A0 is not in production.
  367. */
  368. if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
  369. tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
  370. /* Initialize misc host control in PCI block. */
  371. tp->misc_host_ctrl |= (misc_ctrl_reg &
  372. MISC_HOST_CTRL_CHIPREV);
  373. pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
  374. tp->misc_host_ctrl);
  375. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
  376. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
  377. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
  378. tg3_flag_set(tp, 5717_PLUS);
  379. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
  380. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766 ||
  381. tg3_flag(tp, 5717_PLUS))
  382. tg3_flag_set(tp, 57765_PLUS);
  383. /* Intentionally exclude ASIC_REV_5906 */
  384. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
  385. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
  386. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
  387. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
  388. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
  389. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
  390. tg3_flag(tp, 57765_PLUS))
  391. tg3_flag_set(tp, 5755_PLUS);
  392. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
  393. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
  394. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
  395. tg3_flag(tp, 5755_PLUS) ||
  396. tg3_flag(tp, 5780_CLASS))
  397. tg3_flag_set(tp, 5750_PLUS);
  398. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
  399. tg3_flag(tp, 5750_PLUS))
  400. tg3_flag_set(tp, 5705_PLUS);
  401. if (tg3_flag(tp, 5717_PLUS))
  402. tg3_flag_set(tp, LRG_PROD_RING_CAP);
  403. pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
  404. &pci_state_reg);
  405. tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
  406. if (tp->pcie_cap != 0) {
  407. u16 lnkctl;
  408. tg3_flag_set(tp, PCI_EXPRESS);
  409. pci_read_config_word(tp->pdev,
  410. tp->pcie_cap + PCI_EXP_LNKCTL,
  411. &lnkctl);
  412. if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
  413. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
  414. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
  415. tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
  416. tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
  417. tg3_flag_set(tp, CLKREQ_BUG);
  418. } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
  419. tg3_flag_set(tp, L1PLLPD_EN);
  420. }
  421. } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
  422. tg3_flag_set(tp, PCI_EXPRESS);
  423. } else if (!tg3_flag(tp, 5705_PLUS) ||
  424. tg3_flag(tp, 5780_CLASS)) {
  425. tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
  426. if (!tp->pcix_cap) {
  427. DBGC(&tp->pdev->dev,
  428. "Cannot find PCI-X capability, aborting\n");
  429. return -EIO;
  430. }
  431. if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
  432. tg3_flag_set(tp, PCIX_MODE);
  433. }
  434. /* If we have an AMD 762 or VIA K8T800 chipset, write
  435. * reordering to the mailbox registers done by the host
  436. * controller can cause major troubles. We read back from
  437. * every mailbox register write to force the writes to be
  438. * posted to the chip in order.
  439. */
  440. pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
  441. &tp->pci_cacheline_sz);
  442. pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
  443. &tp->pci_lat_timer);
  444. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
  445. tp->pci_lat_timer < 64) {
  446. tp->pci_lat_timer = 64;
  447. pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
  448. tp->pci_lat_timer);
  449. }
  450. if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
  451. /* 5700 BX chips need to have their TX producer index
  452. * mailboxes written twice to workaround a bug.
  453. */
  454. tg3_flag_set(tp, TXD_MBOX_HWBUG);
  455. /* If we are in PCI-X mode, enable register write workaround.
  456. *
  457. * The workaround is to use indirect register accesses
  458. * for all chip writes not to mailbox registers.
  459. */
  460. if (tg3_flag(tp, PCIX_MODE)) {
  461. u32 pm_reg;
  462. tg3_flag_set(tp, PCIX_TARGET_HWBUG);
  463. /* The chip can have it's power management PCI config
  464. * space registers clobbered due to this bug.
  465. * So explicitly force the chip into D0 here.
  466. */
  467. pci_read_config_dword(tp->pdev,
  468. tp->pm_cap + PCI_PM_CTRL,
  469. &pm_reg);
  470. pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
  471. pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
  472. pci_write_config_dword(tp->pdev,
  473. tp->pm_cap + PCI_PM_CTRL,
  474. pm_reg);
  475. /* Also, force SERR#/PERR# in PCI command. */
  476. pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
  477. pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
  478. pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
  479. }
  480. }
  481. if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
  482. tg3_flag_set(tp, PCI_HIGH_SPEED);
  483. if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
  484. tg3_flag_set(tp, PCI_32BIT);
  485. /* Chip-specific fixup from Broadcom driver */
  486. if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
  487. (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
  488. pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
  489. pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
  490. }
  491. tp->write32_mbox = tg3_write_indirect_reg32;
  492. tp->write32_rx_mbox = tg3_write_indirect_mbox;
  493. tp->write32_tx_mbox = tg3_write_indirect_mbox;
  494. tp->read32_mbox = tg3_read_indirect_mbox;
  495. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
  496. tp->read32_mbox = tg3_read32_mbox_5906;
  497. tp->write32_mbox = tg3_write32_mbox_5906;
  498. tp->write32_tx_mbox = tg3_write32_mbox_5906;
  499. tp->write32_rx_mbox = tg3_write32_mbox_5906;
  500. }
  501. /* Get eeprom hw config before calling tg3_set_power_state().
  502. * In particular, the TG3_FLAG_IS_NIC flag must be
  503. * determined before calling tg3_set_power_state() so that
  504. * we know whether or not to switch out of Vaux power.
  505. * When the flag is set, it means that GPIO1 is used for eeprom
  506. * write protect and also implies that it is a LOM where GPIOs
  507. * are not used to switch power.
  508. */
  509. tg3_get_eeprom_hw_cfg(tp);
  510. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
  511. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
  512. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
  513. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
  514. tg3_flag(tp, 57765_PLUS))
  515. tg3_flag_set(tp, CPMU_PRESENT);
  516. /* Set up tp->grc_local_ctrl before calling tg3_power_up().
  517. * GPIO1 driven high will bring 5700's external PHY out of reset.
  518. * It is also used as eeprom write protect on LOMs.
  519. */
  520. tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
  521. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
  522. tg3_flag(tp, EEPROM_WRITE_PROT))
  523. tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
  524. GRC_LCLCTRL_GPIO_OUTPUT1);
  525. /* Unused GPIO3 must be driven as output on 5752 because there
  526. * are no pull-up resistors on unused GPIO pins.
  527. */
  528. else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
  529. tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
  530. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
  531. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
  532. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
  533. tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
  534. if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
  535. tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
  536. /* Turn off the debug UART. */
  537. tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
  538. if (tg3_flag(tp, IS_NIC))
  539. /* Keep VMain power. */
  540. tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
  541. GRC_LCLCTRL_GPIO_OUTPUT0;
  542. }
  543. /* Force the chip into D0. */
  544. tg3_set_power_state_0(tp);
  545. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
  546. tp->phy_flags |= TG3_PHYFLG_IS_FET;
  547. /* A few boards don't want Ethernet@WireSpeed phy feature */
  548. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
  549. (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
  550. (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
  551. (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
  552. (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
  553. (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
  554. tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
  555. if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
  556. GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
  557. tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
  558. if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
  559. tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
  560. if (tg3_flag(tp, 5705_PLUS) &&
  561. !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
  562. GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
  563. GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
  564. !tg3_flag(tp, 57765_PLUS)) {
  565. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
  566. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
  567. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
  568. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
  569. if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
  570. tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
  571. tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
  572. if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
  573. tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
  574. } else
  575. tp->phy_flags |= TG3_PHYFLG_BER_BUG;
  576. }
  577. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
  578. GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
  579. tp->phy_otp = tg3_read_otp_phycfg(tp);
  580. if (tp->phy_otp == 0)
  581. tp->phy_otp = TG3_OTP_DEFAULT;
  582. }
  583. if (tg3_flag(tp, CPMU_PRESENT))
  584. tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
  585. else
  586. tp->mi_mode = MAC_MI_MODE_BASE;
  587. tp->coalesce_mode = 0;
  588. if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
  589. GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
  590. tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
  591. /* Set these bits to enable statistics workaround. */
  592. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
  593. tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
  594. tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
  595. tp->coalesce_mode |= HOSTCC_MODE_ATTN;
  596. tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
  597. }
  598. tg3_mdio_init(tp);
  599. /* Initialize data/descriptor byte/word swapping. */
  600. val = tr32(GRC_MODE);
  601. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
  602. val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
  603. GRC_MODE_WORD_SWAP_B2HRX_DATA |
  604. GRC_MODE_B2HRX_ENABLE |
  605. GRC_MODE_HTX2B_ENABLE |
  606. GRC_MODE_HOST_STACKUP);
  607. else
  608. val &= GRC_MODE_HOST_STACKUP;
  609. tw32(GRC_MODE, val | tp->grc_mode);
  610. tg3_switch_clocks(tp);
  611. /* Clear this out for sanity. */
  612. tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
  613. pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
  614. &pci_state_reg);
  615. if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
  616. !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
  617. u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
  618. if (chiprevid == CHIPREV_ID_5701_A0 ||
  619. chiprevid == CHIPREV_ID_5701_B0 ||
  620. chiprevid == CHIPREV_ID_5701_B2 ||
  621. chiprevid == CHIPREV_ID_5701_B5) {
  622. void *sram_base;
  623. /* Write some dummy words into the SRAM status block
  624. * area, see if it reads back correctly. If the return
  625. * value is bad, force enable the PCIX workaround.
  626. */
  627. sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
  628. writel(0x00000000, sram_base);
  629. writel(0x00000000, sram_base + 4);
  630. writel(0xffffffff, sram_base + 4);
  631. if (readl(sram_base) != 0x00000000)
  632. tg3_flag_set(tp, PCIX_TARGET_HWBUG);
  633. }
  634. }
  635. udelay(50);
  636. /* FIXME: do we need nvram access? */
  637. /// tg3_nvram_init(tp);
  638. grc_misc_cfg = tr32(GRC_MISC_CFG);
  639. grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
  640. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
  641. (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
  642. grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
  643. tg3_flag_set(tp, IS_5788);
  644. if (!tg3_flag(tp, IS_5788) &&
  645. GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
  646. tg3_flag_set(tp, TAGGED_STATUS);
  647. if (tg3_flag(tp, TAGGED_STATUS)) {
  648. tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
  649. HOSTCC_MODE_CLRTICK_TXBD);
  650. tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
  651. pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
  652. tp->misc_host_ctrl);
  653. }
  654. /* Preserve the APE MAC_MODE bits */
  655. if (tg3_flag(tp, ENABLE_APE))
  656. tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
  657. else
  658. tp->mac_mode = TG3_DEF_MAC_MODE;
  659. /* these are limited to 10/100 only */
  660. if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
  661. (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
  662. (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
  663. tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
  664. (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
  665. tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
  666. tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
  667. (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
  668. (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
  669. tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
  670. tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
  671. tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
  672. tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
  673. tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
  674. (tp->phy_flags & TG3_PHYFLG_IS_FET))
  675. tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
  676. err = tg3_phy_probe(tp);
  677. if (err) {
  678. DBGC(&tp->pdev->dev, "phy probe failed, err: %s\n", strerror(err));
  679. /* ... but do not return immediately ... */
  680. }
  681. if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
  682. tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
  683. } else {
  684. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
  685. tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
  686. else
  687. tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
  688. }
  689. /* For all SERDES we poll the MAC status register. */
  690. if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
  691. tg3_flag_set(tp, POLL_SERDES);
  692. else
  693. tg3_flag_clear(tp, POLL_SERDES);
  694. /* Increment the rx prod index on the rx std ring by at most
  695. * 8 for these chips to workaround hw errata.
  696. */
  697. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
  698. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
  699. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
  700. tp->rx_std_max_post = 8;
  701. return err;
  702. }
  703. void tg3_init_bufmgr_config(struct tg3 *tp)
  704. { DBGP("%s\n", __func__);
  705. if (tg3_flag(tp, 57765_PLUS)) {
  706. tp->bufmgr_config.mbuf_read_dma_low_water =
  707. DEFAULT_MB_RDMA_LOW_WATER_5705;
  708. tp->bufmgr_config.mbuf_mac_rx_low_water =
  709. DEFAULT_MB_MACRX_LOW_WATER_57765;
  710. tp->bufmgr_config.mbuf_high_water =
  711. DEFAULT_MB_HIGH_WATER_57765;
  712. tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
  713. DEFAULT_MB_RDMA_LOW_WATER_5705;
  714. tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
  715. DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
  716. tp->bufmgr_config.mbuf_high_water_jumbo =
  717. DEFAULT_MB_HIGH_WATER_JUMBO_57765;
  718. } else if (tg3_flag(tp, 5705_PLUS)) {
  719. tp->bufmgr_config.mbuf_read_dma_low_water =
  720. DEFAULT_MB_RDMA_LOW_WATER_5705;
  721. tp->bufmgr_config.mbuf_mac_rx_low_water =
  722. DEFAULT_MB_MACRX_LOW_WATER_5705;
  723. tp->bufmgr_config.mbuf_high_water =
  724. DEFAULT_MB_HIGH_WATER_5705;
  725. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
  726. tp->bufmgr_config.mbuf_mac_rx_low_water =
  727. DEFAULT_MB_MACRX_LOW_WATER_5906;
  728. tp->bufmgr_config.mbuf_high_water =
  729. DEFAULT_MB_HIGH_WATER_5906;
  730. }
  731. tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
  732. DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
  733. tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
  734. DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
  735. tp->bufmgr_config.mbuf_high_water_jumbo =
  736. DEFAULT_MB_HIGH_WATER_JUMBO_5780;
  737. } else {
  738. tp->bufmgr_config.mbuf_read_dma_low_water =
  739. DEFAULT_MB_RDMA_LOW_WATER;
  740. tp->bufmgr_config.mbuf_mac_rx_low_water =
  741. DEFAULT_MB_MACRX_LOW_WATER;
  742. tp->bufmgr_config.mbuf_high_water =
  743. DEFAULT_MB_HIGH_WATER;
  744. tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
  745. DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
  746. tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
  747. DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
  748. tp->bufmgr_config.mbuf_high_water_jumbo =
  749. DEFAULT_MB_HIGH_WATER_JUMBO;
  750. }
  751. tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
  752. tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
  753. }
  754. #define TG3_FW_EVENT_TIMEOUT_USEC 2500
  755. void tg3_wait_for_event_ack(struct tg3 *tp)
  756. { DBGP("%s\n", __func__);
  757. int i;
  758. for (i = 0; i < TG3_FW_EVENT_TIMEOUT_USEC / 10; i++) {
  759. if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
  760. break;
  761. udelay(10);
  762. }
  763. }
  764. void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
  765. { DBGP("%s\n", __func__);
  766. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
  767. (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
  768. return;
  769. pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
  770. pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
  771. /* Always leave this as zero. */
  772. pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
  773. }
  774. static void tg3_stop_fw(struct tg3 *tp)
  775. { DBGP("%s\n", __func__);
  776. if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
  777. /* Wait for RX cpu to ACK the previous event. */
  778. tg3_wait_for_event_ack(tp);
  779. tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
  780. tg3_generate_fw_event(tp);
  781. /* Wait for RX cpu to ACK this event. */
  782. tg3_wait_for_event_ack(tp);
  783. }
  784. }
  785. static void tg3_write_sig_pre_reset(struct tg3 *tp)
  786. { DBGP("%s\n", __func__);
  787. tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
  788. NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
  789. }
  790. void tg3_disable_ints(struct tg3 *tp)
  791. { DBGP("%s\n", __func__);
  792. tw32(TG3PCI_MISC_HOST_CTRL,
  793. (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
  794. tw32_mailbox_f(tp->int_mbox, 0x00000001);
  795. }
  796. void tg3_enable_ints(struct tg3 *tp)
  797. { DBGP("%s\n", __func__);
  798. tw32(TG3PCI_MISC_HOST_CTRL,
  799. (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
  800. tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
  801. tw32_mailbox_f(tp->int_mbox, tp->last_tag << 24);
  802. /* Force an initial interrupt */
  803. if (!tg3_flag(tp, TAGGED_STATUS) &&
  804. (tp->hw_status->status & SD_STATUS_UPDATED))
  805. tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
  806. else
  807. tw32(HOSTCC_MODE, tp->coal_now);
  808. }
  809. #define MAX_WAIT_CNT 1000
  810. /* To stop a block, clear the enable bit and poll till it clears. */
  811. static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit)
  812. { DBGP("%s\n", __func__);
  813. unsigned int i;
  814. u32 val;
  815. if (tg3_flag(tp, 5705_PLUS)) {
  816. switch (ofs) {
  817. case RCVLSC_MODE:
  818. case DMAC_MODE:
  819. case MBFREE_MODE:
  820. case BUFMGR_MODE:
  821. case MEMARB_MODE:
  822. /* We can't enable/disable these bits of the
  823. * 5705/5750, just say success.
  824. */
  825. return 0;
  826. default:
  827. break;
  828. }
  829. }
  830. val = tr32(ofs);
  831. val &= ~enable_bit;
  832. tw32_f(ofs, val);
  833. for (i = 0; i < MAX_WAIT_CNT; i++) {
  834. udelay(100);
  835. val = tr32(ofs);
  836. if ((val & enable_bit) == 0)
  837. break;
  838. }
  839. if (i == MAX_WAIT_CNT) {
  840. DBGC(&tp->pdev->dev,
  841. "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
  842. ofs, enable_bit);
  843. return -ENODEV;
  844. }
  845. return 0;
  846. }
  847. static int tg3_abort_hw(struct tg3 *tp)
  848. { DBGP("%s\n", __func__);
  849. int i, err;
  850. tg3_disable_ints(tp);
  851. tp->rx_mode &= ~RX_MODE_ENABLE;
  852. tw32_f(MAC_RX_MODE, tp->rx_mode);
  853. udelay(10);
  854. err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE);
  855. err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE);
  856. err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE);
  857. err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE);
  858. err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE);
  859. err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE);
  860. err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE);
  861. err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE);
  862. err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
  863. err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE);
  864. err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
  865. err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE);
  866. err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE);
  867. tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
  868. tw32_f(MAC_MODE, tp->mac_mode);
  869. udelay(40);
  870. tp->tx_mode &= ~TX_MODE_ENABLE;
  871. tw32_f(MAC_TX_MODE, tp->tx_mode);
  872. for (i = 0; i < MAX_WAIT_CNT; i++) {
  873. udelay(100);
  874. if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
  875. break;
  876. }
  877. if (i >= MAX_WAIT_CNT) {
  878. DBGC(&tp->pdev->dev,
  879. "%s timed out, TX_MODE_ENABLE will not clear "
  880. "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
  881. err |= -ENODEV;
  882. }
  883. err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE);
  884. err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE);
  885. err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE);
  886. tw32(FTQ_RESET, 0xffffffff);
  887. tw32(FTQ_RESET, 0x00000000);
  888. err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE);
  889. err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE);
  890. if (tp->hw_status)
  891. memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
  892. return err;
  893. }
  894. void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
  895. { DBGP("%s\n", __func__);
  896. u32 addr_high, addr_low;
  897. int i;
  898. addr_high = ((tp->dev->ll_addr[0] << 8) |
  899. tp->dev->ll_addr[1]);
  900. addr_low = ((tp->dev->ll_addr[2] << 24) |
  901. (tp->dev->ll_addr[3] << 16) |
  902. (tp->dev->ll_addr[4] << 8) |
  903. (tp->dev->ll_addr[5] << 0));
  904. for (i = 0; i < 4; i++) {
  905. if (i == 1 && skip_mac_1)
  906. continue;
  907. tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
  908. tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
  909. }
  910. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
  911. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
  912. for (i = 0; i < 12; i++) {
  913. tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
  914. tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
  915. }
  916. }
  917. addr_high = (tp->dev->ll_addr[0] +
  918. tp->dev->ll_addr[1] +
  919. tp->dev->ll_addr[2] +
  920. tp->dev->ll_addr[3] +
  921. tp->dev->ll_addr[4] +
  922. tp->dev->ll_addr[5]) &
  923. TX_BACKOFF_SEED_MASK;
  924. tw32(MAC_TX_BACKOFF_SEED, addr_high);
  925. }
  926. /* Save PCI command register before chip reset */
  927. static void tg3_save_pci_state(struct tg3 *tp)
  928. { DBGP("%s\n", __func__);
  929. pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
  930. }
  931. /* Restore PCI state after chip reset */
  932. static void tg3_restore_pci_state(struct tg3 *tp)
  933. { DBGP("%s\n", __func__);
  934. u32 val;
  935. /* Re-enable indirect register accesses. */
  936. pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
  937. tp->misc_host_ctrl);
  938. /* Set MAX PCI retry to zero. */
  939. val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
  940. if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
  941. tg3_flag(tp, PCIX_MODE))
  942. val |= PCISTATE_RETRY_SAME_DMA;
  943. pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
  944. pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
  945. if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
  946. pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
  947. tp->pci_cacheline_sz);
  948. pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
  949. tp->pci_lat_timer);
  950. }
  951. /* Make sure PCI-X relaxed ordering bit is clear. */
  952. if (tg3_flag(tp, PCIX_MODE)) {
  953. u16 pcix_cmd;
  954. pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
  955. &pcix_cmd);
  956. pcix_cmd &= ~PCI_X_CMD_ERO;
  957. pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
  958. pcix_cmd);
  959. }
  960. }
  961. static int tg3_poll_fw(struct tg3 *tp)
  962. { DBGP("%s\n", __func__);
  963. int i;
  964. u32 val;
  965. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
  966. /* Wait up to 20ms for init done. */
  967. for (i = 0; i < 200; i++) {
  968. if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
  969. return 0;
  970. udelay(100);
  971. }
  972. return -ENODEV;
  973. }
  974. /* Wait for firmware initialization to complete. */
  975. for (i = 0; i < 100000; i++) {
  976. tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
  977. if (val == (u32)~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
  978. break;
  979. udelay(10);
  980. }
  981. /* Chip might not be fitted with firmware. Some Sun onboard
  982. * parts are configured like that. So don't signal the timeout
  983. * of the above loop as an error, but do report the lack of
  984. * running firmware once.
  985. */
  986. if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
  987. tg3_flag_set(tp, NO_FWARE_REPORTED);
  988. DBGC(tp->dev, "No firmware running\n");
  989. }
  990. if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
  991. /* The 57765 A0 needs a little more
  992. * time to do some important work.
  993. */
  994. mdelay(10);
  995. }
  996. return 0;
  997. }
  998. static int tg3_nvram_lock(struct tg3 *tp)
  999. { DBGP("%s\n", __func__);
  1000. if (tg3_flag(tp, NVRAM)) {
  1001. int i;
  1002. if (tp->nvram_lock_cnt == 0) {
  1003. tw32(NVRAM_SWARB, SWARB_REQ_SET1);
  1004. for (i = 0; i < 8000; i++) {
  1005. if (tr32(NVRAM_SWARB) & SWARB_GNT1)
  1006. break;
  1007. udelay(20);
  1008. }
  1009. if (i == 8000) {
  1010. tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
  1011. return -ENODEV;
  1012. }
  1013. }
  1014. tp->nvram_lock_cnt++;
  1015. }
  1016. return 0;
  1017. }
  1018. static void tg3_nvram_unlock(struct tg3 *tp)
  1019. { DBGP("%s\n", __func__);
  1020. if (tg3_flag(tp, NVRAM)) {
  1021. if (tp->nvram_lock_cnt > 0)
  1022. tp->nvram_lock_cnt--;
  1023. if (tp->nvram_lock_cnt == 0)
  1024. tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
  1025. }
  1026. }
  1027. static int tg3_chip_reset(struct tg3 *tp)
  1028. { DBGP("%s\n", __func__);
  1029. u32 val;
  1030. int err;
  1031. tg3_nvram_lock(tp);
  1032. /* No matching tg3_nvram_unlock() after this because
  1033. * chip reset below will undo the nvram lock.
  1034. */
  1035. tp->nvram_lock_cnt = 0;
  1036. /* GRC_MISC_CFG core clock reset will clear the memory
  1037. * enable bit in PCI register 4 and the MSI enable bit
  1038. * on some chips, so we save relevant registers here.
  1039. */
  1040. tg3_save_pci_state(tp);
  1041. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
  1042. tg3_flag(tp, 5755_PLUS))
  1043. tw32(GRC_FASTBOOT_PC, 0);
  1044. #if 0
  1045. /*
  1046. * We must avoid the readl() that normally takes place.
  1047. * It locks machines, causes machine checks, and other
  1048. * fun things. So, temporarily disable the 5701
  1049. * hardware workaround, while we do the reset.
  1050. */
  1051. write_op = tp->write32;
  1052. if (write_op == tg3_write_flush_reg32)
  1053. tp->write32 = tg3_write32;
  1054. #endif
  1055. /* Prevent the irq handler from reading or writing PCI registers
  1056. * during chip reset when the memory enable bit in the PCI command
  1057. * register may be cleared. The chip does not generate interrupt
  1058. * at this time, but the irq handler may still be called due to irq
  1059. * sharing or irqpoll.
  1060. */
  1061. tg3_flag_set(tp, CHIP_RESETTING);
  1062. if (tp->hw_status) {
  1063. tp->hw_status->status = 0;
  1064. tp->hw_status->status_tag = 0;
  1065. }
  1066. tp->last_tag = 0;
  1067. tp->last_irq_tag = 0;
  1068. mb();
  1069. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
  1070. val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
  1071. tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
  1072. }
  1073. /* do the reset */
  1074. val = GRC_MISC_CFG_CORECLK_RESET;
  1075. if (tg3_flag(tp, PCI_EXPRESS)) {
  1076. /* Force PCIe 1.0a mode */
  1077. if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
  1078. !tg3_flag(tp, 57765_PLUS) &&
  1079. tr32(TG3_PCIE_PHY_TSTCTL) ==
  1080. (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
  1081. tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
  1082. if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
  1083. tw32(GRC_MISC_CFG, (1 << 29));
  1084. val |= (1 << 29);
  1085. }
  1086. }
  1087. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
  1088. tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
  1089. tw32(GRC_VCPU_EXT_CTRL,
  1090. tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
  1091. }
  1092. /* Manage gphy power for all CPMU absent PCIe devices. */
  1093. if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
  1094. val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
  1095. tw32(GRC_MISC_CFG, val);
  1096. /* Unfortunately, we have to delay before the PCI read back.
  1097. * Some 575X chips even will not respond to a PCI cfg access
  1098. * when the reset command is given to the chip.
  1099. *
  1100. * How do these hardware designers expect things to work
  1101. * properly if the PCI write is posted for a long period
  1102. * of time? It is always necessary to have some method by
  1103. * which a register read back can occur to push the write
  1104. * out which does the reset.
  1105. *
  1106. * For most tg3 variants the trick below was working.
  1107. * Ho hum...
  1108. */
  1109. udelay(120);
  1110. /* Flush PCI posted writes. The normal MMIO registers
  1111. * are inaccessible at this time so this is the only
  1112. * way to make this reliably (actually, this is no longer
  1113. * the case, see above). I tried to use indirect
  1114. * register read/write but this upset some 5701 variants.
  1115. */
  1116. pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
  1117. udelay(120);
  1118. if (tg3_flag(tp, PCI_EXPRESS) && tp->pcie_cap) {
  1119. u16 val16;
  1120. if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
  1121. int i;
  1122. u32 cfg_val;
  1123. /* Wait for link training to complete. */
  1124. for (i = 0; i < 5000; i++)
  1125. udelay(100);
  1126. pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
  1127. pci_write_config_dword(tp->pdev, 0xc4,
  1128. cfg_val | (1 << 15));
  1129. }
  1130. /* Clear the "no snoop" and "relaxed ordering" bits. */
  1131. pci_read_config_word(tp->pdev,
  1132. tp->pcie_cap + PCI_EXP_DEVCTL,
  1133. &val16);
  1134. val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
  1135. PCI_EXP_DEVCTL_NOSNOOP_EN);
  1136. /*
  1137. * Older PCIe devices only support the 128 byte
  1138. * MPS setting. Enforce the restriction.
  1139. */
  1140. if (!tg3_flag(tp, CPMU_PRESENT))
  1141. val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
  1142. pci_write_config_word(tp->pdev,
  1143. tp->pcie_cap + PCI_EXP_DEVCTL,
  1144. val16);
  1145. /* Clear error status */
  1146. pci_write_config_word(tp->pdev,
  1147. tp->pcie_cap + PCI_EXP_DEVSTA,
  1148. PCI_EXP_DEVSTA_CED |
  1149. PCI_EXP_DEVSTA_NFED |
  1150. PCI_EXP_DEVSTA_FED |
  1151. PCI_EXP_DEVSTA_URD);
  1152. }
  1153. tg3_restore_pci_state(tp);
  1154. tg3_flag_clear(tp, CHIP_RESETTING);
  1155. tg3_flag_clear(tp, ERROR_PROCESSED);
  1156. val = 0;
  1157. if (tg3_flag(tp, 5780_CLASS))
  1158. val = tr32(MEMARB_MODE);
  1159. tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
  1160. if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
  1161. tg3_stop_fw(tp);
  1162. tw32(0x5000, 0x400);
  1163. }
  1164. tw32(GRC_MODE, tp->grc_mode);
  1165. if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
  1166. val = tr32(0xc4);
  1167. tw32(0xc4, val | (1 << 15));
  1168. }
  1169. if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
  1170. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
  1171. tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
  1172. if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
  1173. tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
  1174. tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
  1175. }
  1176. if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
  1177. tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
  1178. val = tp->mac_mode;
  1179. } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
  1180. tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
  1181. val = tp->mac_mode;
  1182. } else
  1183. val = 0;
  1184. tw32_f(MAC_MODE, val);
  1185. udelay(40);
  1186. err = tg3_poll_fw(tp);
  1187. if (err)
  1188. return err;
  1189. if (tg3_flag(tp, PCI_EXPRESS) &&
  1190. tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
  1191. GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
  1192. !tg3_flag(tp, 57765_PLUS)) {
  1193. val = tr32(0x7c00);
  1194. tw32(0x7c00, val | (1 << 25));
  1195. }
  1196. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
  1197. val = tr32(TG3_CPMU_CLCK_ORIDE);
  1198. tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
  1199. }
  1200. if (tg3_flag(tp, CPMU_PRESENT)) {
  1201. tw32(TG3_CPMU_D0_CLCK_POLICY, 0);
  1202. val = tr32(TG3_CPMU_CLCK_ORIDE_EN);
  1203. tw32(TG3_CPMU_CLCK_ORIDE_EN,
  1204. val | CPMU_CLCK_ORIDE_MAC_CLCK_ORIDE_EN);
  1205. }
  1206. return 0;
  1207. }
  1208. int tg3_halt(struct tg3 *tp)
  1209. { DBGP("%s\n", __func__);
  1210. int err;
  1211. tg3_stop_fw(tp);
  1212. tg3_write_sig_pre_reset(tp);
  1213. tg3_abort_hw(tp);
  1214. err = tg3_chip_reset(tp);
  1215. __tg3_set_mac_addr(tp, 0);
  1216. if (err)
  1217. return err;
  1218. return 0;
  1219. }
  1220. static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
  1221. u32 offset, u32 *val)
  1222. { DBGP("%s\n", __func__);
  1223. u32 tmp;
  1224. int i;
  1225. if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
  1226. return -EINVAL;
  1227. tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
  1228. EEPROM_ADDR_DEVID_MASK |
  1229. EEPROM_ADDR_READ);
  1230. tw32(GRC_EEPROM_ADDR,
  1231. tmp |
  1232. (0 << EEPROM_ADDR_DEVID_SHIFT) |
  1233. ((offset << EEPROM_ADDR_ADDR_SHIFT) &
  1234. EEPROM_ADDR_ADDR_MASK) |
  1235. EEPROM_ADDR_READ | EEPROM_ADDR_START);
  1236. for (i = 0; i < 1000; i++) {
  1237. tmp = tr32(GRC_EEPROM_ADDR);
  1238. if (tmp & EEPROM_ADDR_COMPLETE)
  1239. break;
  1240. mdelay(1);
  1241. }
  1242. if (!(tmp & EEPROM_ADDR_COMPLETE))
  1243. return -EBUSY;
  1244. tmp = tr32(GRC_EEPROM_DATA);
  1245. /*
  1246. * The data will always be opposite the native endian
  1247. * format. Perform a blind byteswap to compensate.
  1248. */
  1249. *val = bswap_32(tmp);
  1250. return 0;
  1251. }
  1252. static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
  1253. { DBGP("%s\n", __func__);
  1254. if (tg3_flag(tp, NVRAM) &&
  1255. tg3_flag(tp, NVRAM_BUFFERED) &&
  1256. tg3_flag(tp, FLASH) &&
  1257. !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
  1258. (tp->nvram_jedecnum == JEDEC_ATMEL))
  1259. addr = ((addr / tp->nvram_pagesize) <<
  1260. ATMEL_AT45DB0X1B_PAGE_POS) +
  1261. (addr % tp->nvram_pagesize);
  1262. return addr;
  1263. }
  1264. static void tg3_enable_nvram_access(struct tg3 *tp)
  1265. { DBGP("%s\n", __func__);
  1266. if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
  1267. u32 nvaccess = tr32(NVRAM_ACCESS);
  1268. tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
  1269. }
  1270. }
  1271. static void tg3_disable_nvram_access(struct tg3 *tp)
  1272. { DBGP("%s\n", __func__);
  1273. if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
  1274. u32 nvaccess = tr32(NVRAM_ACCESS);
  1275. tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
  1276. }
  1277. }
  1278. #define NVRAM_CMD_TIMEOUT 10000
  1279. static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
  1280. { DBGP("%s\n", __func__);
  1281. int i;
  1282. tw32(NVRAM_CMD, nvram_cmd);
  1283. for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
  1284. udelay(10);
  1285. if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
  1286. udelay(10);
  1287. break;
  1288. }
  1289. }
  1290. if (i == NVRAM_CMD_TIMEOUT)
  1291. return -EBUSY;
  1292. return 0;
  1293. }
  1294. /* NOTE: Data read in from NVRAM is byteswapped according to
  1295. * the byteswapping settings for all other register accesses.
  1296. * tg3 devices are BE devices, so on a BE machine, the data
  1297. * returned will be exactly as it is seen in NVRAM. On a LE
  1298. * machine, the 32-bit value will be byteswapped.
  1299. */
  1300. static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
  1301. { DBGP("%s\n", __func__);
  1302. int ret;
  1303. if (!tg3_flag(tp, NVRAM))
  1304. return tg3_nvram_read_using_eeprom(tp, offset, val);
  1305. offset = tg3_nvram_phys_addr(tp, offset);
  1306. if (offset > NVRAM_ADDR_MSK)
  1307. return -EINVAL;
  1308. ret = tg3_nvram_lock(tp);
  1309. if (ret)
  1310. return ret;
  1311. tg3_enable_nvram_access(tp);
  1312. tw32(NVRAM_ADDR, offset);
  1313. ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
  1314. NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
  1315. if (ret == 0)
  1316. *val = tr32(NVRAM_RDDATA);
  1317. tg3_disable_nvram_access(tp);
  1318. tg3_nvram_unlock(tp);
  1319. return ret;
  1320. }
  1321. /* Ensures NVRAM data is in bytestream format. */
  1322. static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, u32 *val)
  1323. { DBGP("%s\n", __func__);
  1324. u32 v = 0;
  1325. int res = tg3_nvram_read(tp, offset, &v);
  1326. if (!res)
  1327. *val = cpu_to_be32(v);
  1328. return res;
  1329. }
  1330. int tg3_get_device_address(struct tg3 *tp)
  1331. { DBGP("%s\n", __func__);
  1332. struct net_device *dev = tp->dev;
  1333. u32 hi, lo, mac_offset;
  1334. int addr_ok = 0;
  1335. mac_offset = 0x7c;
  1336. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
  1337. tg3_flag(tp, 5780_CLASS)) {
  1338. if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
  1339. mac_offset = 0xcc;
  1340. if (tg3_nvram_lock(tp))
  1341. tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
  1342. else
  1343. tg3_nvram_unlock(tp);
  1344. } else if (tg3_flag(tp, 5717_PLUS)) {
  1345. if (PCI_FUNC(tp->pdev->busdevfn) & 1)
  1346. mac_offset = 0xcc;
  1347. if (PCI_FUNC(tp->pdev->busdevfn) > 1)
  1348. mac_offset += 0x18c;
  1349. } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
  1350. mac_offset = 0x10;
  1351. /* First try to get it from MAC address mailbox. */
  1352. tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
  1353. if ((hi >> 16) == 0x484b) {
  1354. dev->hw_addr[0] = (hi >> 8) & 0xff;
  1355. dev->hw_addr[1] = (hi >> 0) & 0xff;
  1356. tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
  1357. dev->hw_addr[2] = (lo >> 24) & 0xff;
  1358. dev->hw_addr[3] = (lo >> 16) & 0xff;
  1359. dev->hw_addr[4] = (lo >> 8) & 0xff;
  1360. dev->hw_addr[5] = (lo >> 0) & 0xff;
  1361. /* Some old bootcode may report a 0 MAC address in SRAM */
  1362. addr_ok = is_valid_ether_addr(&dev->hw_addr[0]);
  1363. }
  1364. if (!addr_ok) {
  1365. /* Next, try NVRAM. */
  1366. if (!tg3_flag(tp, NO_NVRAM) &&
  1367. !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
  1368. !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
  1369. memcpy(&dev->hw_addr[0], ((char *)&hi) + 2, 2);
  1370. memcpy(&dev->hw_addr[2], (char *)&lo, sizeof(lo));
  1371. }
  1372. /* Finally just fetch it out of the MAC control regs. */
  1373. else {
  1374. hi = tr32(MAC_ADDR_0_HIGH);
  1375. lo = tr32(MAC_ADDR_0_LOW);
  1376. dev->hw_addr[5] = lo & 0xff;
  1377. dev->hw_addr[4] = (lo >> 8) & 0xff;
  1378. dev->hw_addr[3] = (lo >> 16) & 0xff;
  1379. dev->hw_addr[2] = (lo >> 24) & 0xff;
  1380. dev->hw_addr[1] = hi & 0xff;
  1381. dev->hw_addr[0] = (hi >> 8) & 0xff;
  1382. }
  1383. }
  1384. if (!is_valid_ether_addr(&dev->hw_addr[0])) {
  1385. return -EINVAL;
  1386. }
  1387. return 0;
  1388. }
  1389. static void __tg3_set_rx_mode(struct net_device *dev)
  1390. { DBGP("%s\n", __func__);
  1391. struct tg3 *tp = netdev_priv(dev);
  1392. u32 rx_mode;
  1393. rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
  1394. RX_MODE_KEEP_VLAN_TAG);
  1395. rx_mode |= RX_MODE_KEEP_VLAN_TAG;
  1396. /* Accept all multicast. */
  1397. tw32(MAC_HASH_REG_0, 0xffffffff);
  1398. tw32(MAC_HASH_REG_1, 0xffffffff);
  1399. tw32(MAC_HASH_REG_2, 0xffffffff);
  1400. tw32(MAC_HASH_REG_3, 0xffffffff);
  1401. if (rx_mode != tp->rx_mode) {
  1402. tp->rx_mode = rx_mode;
  1403. tw32_f(MAC_RX_MODE, rx_mode);
  1404. udelay(10);
  1405. }
  1406. }
  1407. static void __tg3_set_coalesce(struct tg3 *tp)
  1408. { DBGP("%s\n", __func__);
  1409. tw32(HOSTCC_RXCOL_TICKS, 0);
  1410. tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
  1411. tw32(HOSTCC_RXMAX_FRAMES, 1);
  1412. /* FIXME: mix between TXMAX and RXMAX taken from legacy driver */
  1413. tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
  1414. tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
  1415. tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
  1416. if (!tg3_flag(tp, 5705_PLUS)) {
  1417. u32 val = DEFAULT_STAT_COAL_TICKS;
  1418. tw32(HOSTCC_RXCOAL_TICK_INT, DEFAULT_RXCOAL_TICK_INT);
  1419. tw32(HOSTCC_TXCOAL_TICK_INT, DEFAULT_TXCOAL_TICK_INT);
  1420. if (!netdev_link_ok(tp->dev))
  1421. val = 0;
  1422. tw32(HOSTCC_STAT_COAL_TICKS, val);
  1423. }
  1424. }
  1425. static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
  1426. dma_addr_t mapping, u32 maxlen_flags,
  1427. u32 nic_addr)
  1428. { DBGP("%s\n", __func__);
  1429. tg3_write_mem(tp,
  1430. (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
  1431. ((u64) mapping >> 32));
  1432. tg3_write_mem(tp,
  1433. (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
  1434. ((u64) mapping & 0xffffffff));
  1435. tg3_write_mem(tp,
  1436. (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
  1437. maxlen_flags);
  1438. if (!tg3_flag(tp, 5705_PLUS))
  1439. tg3_write_mem(tp,
  1440. (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
  1441. nic_addr);
  1442. }
  1443. static void tg3_rings_reset(struct tg3 *tp)
  1444. { DBGP("%s\n", __func__);
  1445. int i;
  1446. u32 txrcb, rxrcb, limit;
  1447. /* Disable all transmit rings but the first. */
  1448. if (!tg3_flag(tp, 5705_PLUS))
  1449. limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
  1450. else if (tg3_flag(tp, 5717_PLUS))
  1451. limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
  1452. else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
  1453. limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
  1454. else
  1455. limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
  1456. for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
  1457. txrcb < limit; txrcb += TG3_BDINFO_SIZE)
  1458. tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
  1459. BDINFO_FLAGS_DISABLED);
  1460. /* Disable all receive return rings but the first. */
  1461. if (tg3_flag(tp, 5717_PLUS))
  1462. limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
  1463. else if (!tg3_flag(tp, 5705_PLUS))
  1464. limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
  1465. else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
  1466. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
  1467. limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
  1468. else
  1469. limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
  1470. for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
  1471. rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
  1472. tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
  1473. BDINFO_FLAGS_DISABLED);
  1474. /* Disable interrupts */
  1475. tw32_mailbox_f(tp->int_mbox, 1);
  1476. tp->tx_prod = 0;
  1477. tp->tx_cons = 0;
  1478. tw32_mailbox(tp->prodmbox, 0);
  1479. tw32_rx_mbox(tp->consmbox, 0);
  1480. /* Make sure the NIC-based send BD rings are disabled. */
  1481. if (!tg3_flag(tp, 5705_PLUS)) {
  1482. u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
  1483. for (i = 0; i < 16; i++)
  1484. tw32_tx_mbox(mbox + i * 8, 0);
  1485. }
  1486. txrcb = NIC_SRAM_SEND_RCB;
  1487. rxrcb = NIC_SRAM_RCV_RET_RCB;
  1488. /* Clear status block in ram. */
  1489. memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
  1490. /* Set status block DMA address */
  1491. tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
  1492. ((u64) tp->status_mapping >> 32));
  1493. tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
  1494. ((u64) tp->status_mapping & 0xffffffff));
  1495. if (tp->tx_ring) {
  1496. tg3_set_bdinfo(tp, txrcb, tp->tx_desc_mapping,
  1497. (TG3_TX_RING_SIZE <<
  1498. BDINFO_FLAGS_MAXLEN_SHIFT),
  1499. NIC_SRAM_TX_BUFFER_DESC);
  1500. txrcb += TG3_BDINFO_SIZE;
  1501. }
  1502. /* FIXME: will TG3_RX_RET_MAX_SIZE_5705 work on all cards? */
  1503. if (tp->rx_rcb) {
  1504. tg3_set_bdinfo(tp, rxrcb, tp->rx_rcb_mapping,
  1505. TG3_RX_RET_MAX_SIZE_5705 <<
  1506. BDINFO_FLAGS_MAXLEN_SHIFT, 0);
  1507. rxrcb += TG3_BDINFO_SIZE;
  1508. }
  1509. }
  1510. static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
  1511. { DBGP("%s\n", __func__);
  1512. u32 val, bdcache_maxcnt;
  1513. if (!tg3_flag(tp, 5750_PLUS) ||
  1514. tg3_flag(tp, 5780_CLASS) ||
  1515. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
  1516. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
  1517. bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
  1518. else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
  1519. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
  1520. bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
  1521. else
  1522. bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
  1523. /* NOTE: legacy driver uses RX_PENDING / 8, we only use 4 descriptors
  1524. * for now, use / 4 so the result is > 0
  1525. */
  1526. val = TG3_DEF_RX_RING_PENDING / 4;
  1527. tw32(RCVBDI_STD_THRESH, val);
  1528. if (tg3_flag(tp, 57765_PLUS))
  1529. tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
  1530. }
  1531. static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
  1532. { DBGP("%s\n", __func__);
  1533. u32 val, rdmac_mode;
  1534. int i, err, limit;
  1535. struct tg3_rx_prodring_set *tpr = &tp->prodring;
  1536. tg3_stop_fw(tp);
  1537. tg3_write_sig_pre_reset(tp);
  1538. if (tg3_flag(tp, INIT_COMPLETE))
  1539. tg3_abort_hw(tp);
  1540. if (reset_phy)
  1541. tg3_phy_reset(tp);
  1542. err = tg3_chip_reset(tp);
  1543. if (err)
  1544. return err;
  1545. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
  1546. val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
  1547. val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
  1548. PCIE_PWR_MGMT_L1_THRESH_4MS;
  1549. tw32(PCIE_PWR_MGMT_THRESH, val);
  1550. val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
  1551. tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
  1552. tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
  1553. val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
  1554. tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
  1555. }
  1556. if (tg3_flag(tp, L1PLLPD_EN)) {
  1557. u32 grc_mode = tr32(GRC_MODE);
  1558. /* Access the lower 1K of PL PCIE block registers. */
  1559. val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
  1560. tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
  1561. val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
  1562. tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
  1563. val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
  1564. tw32(GRC_MODE, grc_mode);
  1565. }
  1566. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
  1567. if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
  1568. u32 grc_mode = tr32(GRC_MODE);
  1569. /* Access the lower 1K of PL PCIE block registers. */
  1570. val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
  1571. tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
  1572. val = tr32(TG3_PCIE_TLDLPL_PORT +
  1573. TG3_PCIE_PL_LO_PHYCTL5);
  1574. tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
  1575. val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
  1576. tw32(GRC_MODE, grc_mode);
  1577. }
  1578. if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
  1579. u32 grc_mode = tr32(GRC_MODE);
  1580. /* Access the lower 1K of DL PCIE block registers. */
  1581. val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
  1582. tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
  1583. val = tr32(TG3_PCIE_TLDLPL_PORT +
  1584. TG3_PCIE_DL_LO_FTSMAX);
  1585. val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
  1586. tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
  1587. val | TG3_PCIE_DL_LO_FTSMAX_VAL);
  1588. tw32(GRC_MODE, grc_mode);
  1589. }
  1590. val = tr32(TG3_CPMU_LSPD_10MB_CLK);
  1591. val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
  1592. val |= CPMU_LSPD_10MB_MACCLK_6_25;
  1593. tw32(TG3_CPMU_LSPD_10MB_CLK, val);
  1594. }
  1595. /* This works around an issue with Athlon chipsets on
  1596. * B3 tigon3 silicon. This bit has no effect on any
  1597. * other revision. But do not set this on PCI Express
  1598. * chips and don't even touch the clocks if the CPMU is present.
  1599. */
  1600. if (!tg3_flag(tp, CPMU_PRESENT)) {
  1601. if (!tg3_flag(tp, PCI_EXPRESS))
  1602. tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
  1603. tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
  1604. }
  1605. if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
  1606. tg3_flag(tp, PCIX_MODE)) {
  1607. val = tr32(TG3PCI_PCISTATE);
  1608. val |= PCISTATE_RETRY_SAME_DMA;
  1609. tw32(TG3PCI_PCISTATE, val);
  1610. }
  1611. if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
  1612. /* Enable some hw fixes. */
  1613. val = tr32(TG3PCI_MSI_DATA);
  1614. val |= (1 << 26) | (1 << 28) | (1 << 29);
  1615. tw32(TG3PCI_MSI_DATA, val);
  1616. }
  1617. /* Descriptor ring init may make accesses to the
  1618. * NIC SRAM area to setup the TX descriptors, so we
  1619. * can only do this after the hardware has been
  1620. * successfully reset.
  1621. */
  1622. err = tg3_init_rings(tp);
  1623. if (err)
  1624. return err;
  1625. if (tg3_flag(tp, 57765_PLUS)) {
  1626. val = tr32(TG3PCI_DMA_RW_CTRL) &
  1627. ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
  1628. if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
  1629. val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
  1630. if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
  1631. GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
  1632. val |= DMA_RWCTRL_TAGGED_STAT_WA;
  1633. tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
  1634. } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
  1635. GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
  1636. /* This value is determined during the probe time DMA
  1637. * engine test, tg3_test_dma.
  1638. */
  1639. tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
  1640. }
  1641. tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
  1642. GRC_MODE_4X_NIC_SEND_RINGS |
  1643. GRC_MODE_NO_TX_PHDR_CSUM |
  1644. GRC_MODE_NO_RX_PHDR_CSUM);
  1645. tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
  1646. tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
  1647. /* Pseudo-header checksum is done by hardware logic and not
  1648. * the offload processers, so make the chip do the pseudo-
  1649. * header checksums on receive. For transmit it is more
  1650. * convenient to do the pseudo-header checksum in software
  1651. * as Linux does that on transmit for us in all cases.
  1652. */
  1653. tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
  1654. tw32(GRC_MODE,
  1655. tp->grc_mode |
  1656. (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
  1657. /* Setup the timer prescalar register. Clock is always 66Mhz. */
  1658. val = tr32(GRC_MISC_CFG);
  1659. val &= ~0xff;
  1660. val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
  1661. tw32(GRC_MISC_CFG, val);
  1662. /* Initialize MBUF/DESC pool. */
  1663. if (tg3_flag(tp, 5750_PLUS)) {
  1664. /* Do nothing. */
  1665. } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
  1666. tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
  1667. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
  1668. tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
  1669. else
  1670. tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
  1671. tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
  1672. tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
  1673. }
  1674. tw32(BUFMGR_MB_RDMA_LOW_WATER,
  1675. tp->bufmgr_config.mbuf_read_dma_low_water);
  1676. tw32(BUFMGR_MB_MACRX_LOW_WATER,
  1677. tp->bufmgr_config.mbuf_mac_rx_low_water);
  1678. tw32(BUFMGR_MB_HIGH_WATER,
  1679. tp->bufmgr_config.mbuf_high_water);
  1680. tw32(BUFMGR_DMA_LOW_WATER,
  1681. tp->bufmgr_config.dma_low_water);
  1682. tw32(BUFMGR_DMA_HIGH_WATER,
  1683. tp->bufmgr_config.dma_high_water);
  1684. val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
  1685. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
  1686. val |= BUFMGR_MODE_NO_TX_UNDERRUN;
  1687. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
  1688. tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
  1689. tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
  1690. val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
  1691. tw32(BUFMGR_MODE, val);
  1692. for (i = 0; i < 2000; i++) {
  1693. if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
  1694. break;
  1695. udelay(10);
  1696. }
  1697. if (i >= 2000) {
  1698. DBGC(tp->dev, "%s cannot enable BUFMGR\n", __func__);
  1699. return -ENODEV;
  1700. }
  1701. if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
  1702. tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
  1703. tg3_setup_rxbd_thresholds(tp);
  1704. /* Initialize TG3_BDINFO's at:
  1705. * RCVDBDI_STD_BD: standard eth size rx ring
  1706. * RCVDBDI_JUMBO_BD: jumbo frame rx ring
  1707. * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
  1708. *
  1709. * like so:
  1710. * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
  1711. * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
  1712. * ring attribute flags
  1713. * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
  1714. *
  1715. * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
  1716. * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
  1717. *
  1718. * The size of each ring is fixed in the firmware, but the location is
  1719. * configurable.
  1720. */
  1721. tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
  1722. ((u64) tpr->rx_std_mapping >> 32));
  1723. tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
  1724. ((u64) tpr->rx_std_mapping & 0xffffffff));
  1725. if (!tg3_flag(tp, 5717_PLUS))
  1726. tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
  1727. NIC_SRAM_RX_BUFFER_DESC);
  1728. /* Disable the mini ring */
  1729. if (!tg3_flag(tp, 5705_PLUS))
  1730. tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
  1731. BDINFO_FLAGS_DISABLED);
  1732. val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
  1733. if (tg3_flag(tp, 57765_PLUS))
  1734. val |= (RX_STD_MAX_SIZE << 2);
  1735. tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
  1736. tpr->rx_std_prod_idx = 0;
  1737. /* std prod index is updated by tg3_refill_prod_ring() */
  1738. tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 0);
  1739. tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, 0);
  1740. tg3_rings_reset(tp);
  1741. __tg3_set_mac_addr(tp,0);
  1742. #define TG3_MAX_MTU 1522
  1743. /* MTU + ethernet header + FCS + optional VLAN tag */
  1744. tw32(MAC_RX_MTU_SIZE, TG3_MAX_MTU);
  1745. /* The slot time is changed by tg3_setup_phy if we
  1746. * run at gigabit with half duplex.
  1747. */
  1748. val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
  1749. (6 << TX_LENGTHS_IPG_SHIFT) |
  1750. (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
  1751. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
  1752. val |= tr32(MAC_TX_LENGTHS) &
  1753. (TX_LENGTHS_JMB_FRM_LEN_MSK |
  1754. TX_LENGTHS_CNT_DWN_VAL_MSK);
  1755. tw32(MAC_TX_LENGTHS, val);
  1756. /* Receive rules. */
  1757. tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
  1758. tw32(RCVLPC_CONFIG, 0x0181);
  1759. /* Calculate RDMAC_MODE setting early, we need it to determine
  1760. * the RCVLPC_STATE_ENABLE mask.
  1761. */
  1762. rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
  1763. RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
  1764. RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
  1765. RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
  1766. RDMAC_MODE_LNGREAD_ENAB);
  1767. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
  1768. rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
  1769. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
  1770. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
  1771. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
  1772. rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
  1773. RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
  1774. RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
  1775. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
  1776. tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
  1777. if (tg3_flag(tp, TSO_CAPABLE) &&
  1778. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
  1779. rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
  1780. } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
  1781. !tg3_flag(tp, IS_5788)) {
  1782. rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
  1783. }
  1784. }
  1785. if (tg3_flag(tp, PCI_EXPRESS))
  1786. rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
  1787. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
  1788. rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
  1789. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
  1790. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
  1791. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
  1792. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
  1793. tg3_flag(tp, 57765_PLUS)) {
  1794. val = tr32(TG3_RDMA_RSRVCTRL_REG);
  1795. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
  1796. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
  1797. val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
  1798. TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
  1799. TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
  1800. val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
  1801. TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
  1802. TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
  1803. }
  1804. tw32(TG3_RDMA_RSRVCTRL_REG,
  1805. val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
  1806. }
  1807. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
  1808. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
  1809. val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
  1810. tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
  1811. TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
  1812. TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
  1813. }
  1814. /* Receive/send statistics. */
  1815. if (tg3_flag(tp, 5750_PLUS)) {
  1816. val = tr32(RCVLPC_STATS_ENABLE);
  1817. val &= ~RCVLPC_STATSENAB_DACK_FIX;
  1818. tw32(RCVLPC_STATS_ENABLE, val);
  1819. } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
  1820. tg3_flag(tp, TSO_CAPABLE)) {
  1821. val = tr32(RCVLPC_STATS_ENABLE);
  1822. val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
  1823. tw32(RCVLPC_STATS_ENABLE, val);
  1824. } else {
  1825. tw32(RCVLPC_STATS_ENABLE, 0xffffff);
  1826. }
  1827. tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
  1828. tw32(SNDDATAI_STATSENAB, 0xffffff);
  1829. tw32(SNDDATAI_STATSCTRL,
  1830. (SNDDATAI_SCTRL_ENABLE |
  1831. SNDDATAI_SCTRL_FASTUPD));
  1832. /* Setup host coalescing engine. */
  1833. tw32(HOSTCC_MODE, 0);
  1834. for (i = 0; i < 2000; i++) {
  1835. if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
  1836. break;
  1837. udelay(10);
  1838. }
  1839. __tg3_set_coalesce(tp);
  1840. if (!tg3_flag(tp, 5705_PLUS)) {
  1841. /* Status/statistics block address. See tg3_timer,
  1842. * the tg3_periodic_fetch_stats call there, and
  1843. * tg3_get_stats to see how this works for 5705/5750 chips.
  1844. * NOTE: stats block removed for iPXE
  1845. */
  1846. tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
  1847. /* Clear statistics and status block memory areas */
  1848. for (i = NIC_SRAM_STATS_BLK;
  1849. i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
  1850. i += sizeof(u32)) {
  1851. tg3_write_mem(tp, i, 0);
  1852. udelay(40);
  1853. }
  1854. }
  1855. tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
  1856. tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
  1857. tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
  1858. if (!tg3_flag(tp, 5705_PLUS))
  1859. tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
  1860. if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
  1861. tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
  1862. /* reset to prevent losing 1st rx packet intermittently */
  1863. tw32_f(MAC_RX_MODE, RX_MODE_RESET);
  1864. udelay(10);
  1865. }
  1866. if (tg3_flag(tp, ENABLE_APE))
  1867. tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
  1868. else
  1869. tp->mac_mode = 0;
  1870. tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
  1871. MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
  1872. if (!tg3_flag(tp, 5705_PLUS) &&
  1873. !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
  1874. GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
  1875. tp->mac_mode |= MAC_MODE_LINK_POLARITY;
  1876. tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
  1877. udelay(40);
  1878. /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
  1879. * If TG3_FLAG_IS_NIC is zero, we should read the
  1880. * register to preserve the GPIO settings for LOMs. The GPIOs,
  1881. * whether used as inputs or outputs, are set by boot code after
  1882. * reset.
  1883. */
  1884. if (!tg3_flag(tp, IS_NIC)) {
  1885. u32 gpio_mask;
  1886. gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
  1887. GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
  1888. GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
  1889. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
  1890. gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
  1891. GRC_LCLCTRL_GPIO_OUTPUT3;
  1892. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
  1893. gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
  1894. tp->grc_local_ctrl &= ~gpio_mask;
  1895. tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
  1896. /* GPIO1 must be driven high for eeprom write protect */
  1897. if (tg3_flag(tp, EEPROM_WRITE_PROT))
  1898. tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
  1899. GRC_LCLCTRL_GPIO_OUTPUT1);
  1900. }
  1901. tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
  1902. udelay(100);
  1903. if (!tg3_flag(tp, 5705_PLUS)) {
  1904. tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
  1905. udelay(40);
  1906. }
  1907. val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
  1908. WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
  1909. WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
  1910. WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
  1911. WDMAC_MODE_LNGREAD_ENAB);
  1912. /* Enable host coalescing bug fix */
  1913. if (tg3_flag(tp, 5755_PLUS))
  1914. val |= WDMAC_MODE_STATUS_TAG_FIX;
  1915. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
  1916. val |= WDMAC_MODE_BURST_ALL_DATA;
  1917. tw32_f(WDMAC_MODE, val);
  1918. udelay(40);
  1919. if (tg3_flag(tp, PCIX_MODE)) {
  1920. u16 pcix_cmd;
  1921. pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
  1922. &pcix_cmd);
  1923. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
  1924. pcix_cmd &= ~PCI_X_CMD_MAX_READ;
  1925. pcix_cmd |= PCI_X_CMD_READ_2K;
  1926. } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
  1927. pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
  1928. pcix_cmd |= PCI_X_CMD_READ_2K;
  1929. }
  1930. pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
  1931. pcix_cmd);
  1932. }
  1933. tw32_f(RDMAC_MODE, rdmac_mode);
  1934. udelay(40);
  1935. tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
  1936. if (!tg3_flag(tp, 5705_PLUS))
  1937. tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
  1938. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
  1939. tw32(SNDDATAC_MODE,
  1940. SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
  1941. else
  1942. tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
  1943. tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
  1944. tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
  1945. val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
  1946. if (tg3_flag(tp, LRG_PROD_RING_CAP))
  1947. val |= RCVDBDI_MODE_LRG_RING_SZ;
  1948. tw32(RCVDBDI_MODE, val);
  1949. tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
  1950. val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
  1951. if (tg3_flag(tp, ENABLE_TSS))
  1952. val |= SNDBDI_MODE_MULTI_TXQ_EN;
  1953. tw32(SNDBDI_MODE, val);
  1954. tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
  1955. /* FIXME: 5701 firmware fix? */
  1956. #if 0
  1957. if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
  1958. err = tg3_load_5701_a0_firmware_fix(tp);
  1959. if (err)
  1960. return err;
  1961. }
  1962. #endif
  1963. tp->tx_mode = TX_MODE_ENABLE;
  1964. if (tg3_flag(tp, 5755_PLUS) ||
  1965. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
  1966. tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
  1967. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
  1968. val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
  1969. tp->tx_mode &= ~val;
  1970. tp->tx_mode |= tr32(MAC_TX_MODE) & val;
  1971. }
  1972. tw32_f(MAC_TX_MODE, tp->tx_mode);
  1973. udelay(100);
  1974. tp->rx_mode = RX_MODE_ENABLE;
  1975. tw32_f(MAC_RX_MODE, tp->rx_mode);
  1976. udelay(10);
  1977. tw32(MAC_LED_CTRL, tp->led_ctrl);
  1978. tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
  1979. if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
  1980. tw32_f(MAC_RX_MODE, RX_MODE_RESET);
  1981. udelay(10);
  1982. }
  1983. tw32_f(MAC_RX_MODE, tp->rx_mode);
  1984. udelay(10);
  1985. if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
  1986. if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
  1987. !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
  1988. /* Set drive transmission level to 1.2V */
  1989. /* only if the signal pre-emphasis bit is not set */
  1990. val = tr32(MAC_SERDES_CFG);
  1991. val &= 0xfffff000;
  1992. val |= 0x880;
  1993. tw32(MAC_SERDES_CFG, val);
  1994. }
  1995. if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
  1996. tw32(MAC_SERDES_CFG, 0x616000);
  1997. }
  1998. /* Prevent chip from dropping frames when flow control
  1999. * is enabled.
  2000. */
  2001. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
  2002. val = 1;
  2003. else
  2004. val = 2;
  2005. tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
  2006. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
  2007. (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
  2008. /* Use hardware link auto-negotiation */
  2009. tg3_flag_set(tp, HW_AUTONEG);
  2010. }
  2011. if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
  2012. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
  2013. u32 tmp;
  2014. tmp = tr32(SERDES_RX_CTRL);
  2015. tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
  2016. tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
  2017. tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
  2018. tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
  2019. }
  2020. err = tg3_setup_phy(tp, 0);
  2021. if (err)
  2022. return err;
  2023. if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
  2024. !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
  2025. u32 tmp;
  2026. /* Clear CRC stats. */
  2027. if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
  2028. tg3_writephy(tp, MII_TG3_TEST1,
  2029. tmp | MII_TG3_TEST1_CRC_EN);
  2030. tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
  2031. }
  2032. }
  2033. __tg3_set_rx_mode(tp->dev);
  2034. /* Initialize receive rules. */
  2035. tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
  2036. tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
  2037. tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
  2038. tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
  2039. if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
  2040. limit = 8;
  2041. else
  2042. limit = 16;
  2043. if (tg3_flag(tp, ENABLE_ASF))
  2044. limit -= 4;
  2045. switch (limit) {
  2046. case 16:
  2047. tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
  2048. /* Fall through */
  2049. case 15:
  2050. tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
  2051. /* Fall through */
  2052. case 14:
  2053. tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
  2054. /* Fall through */
  2055. case 13:
  2056. tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
  2057. /* Fall through */
  2058. case 12:
  2059. tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
  2060. /* Fall through */
  2061. case 11:
  2062. tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
  2063. /* Fall through */
  2064. case 10:
  2065. tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
  2066. /* Fall through */
  2067. case 9:
  2068. tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
  2069. /* Fall through */
  2070. case 8:
  2071. tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
  2072. /* Fall through */
  2073. case 7:
  2074. tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
  2075. /* Fall through */
  2076. case 6:
  2077. tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
  2078. /* Fall through */
  2079. case 5:
  2080. tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
  2081. /* Fall through */
  2082. case 4:
  2083. /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
  2084. case 3:
  2085. /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
  2086. case 2:
  2087. case 1:
  2088. default:
  2089. break;
  2090. }
  2091. return 0;
  2092. }
  2093. /* Called at device open time to get the chip ready for
  2094. * packet processing. Invoked with tp->lock held.
  2095. */
  2096. int tg3_init_hw(struct tg3 *tp, int reset_phy)
  2097. { DBGP("%s\n", __func__);
  2098. tg3_switch_clocks(tp);
  2099. tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
  2100. return tg3_reset_hw(tp, reset_phy);
  2101. }
  2102. void tg3_set_txd(struct tg3 *tp, int entry,
  2103. dma_addr_t mapping, int len, u32 flags)
  2104. { DBGP("%s\n", __func__);
  2105. struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
  2106. txd->addr_hi = ((u64) mapping >> 32);
  2107. txd->addr_lo = ((u64) mapping & 0xffffffff);
  2108. txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
  2109. txd->vlan_tag = 0;
  2110. }
  2111. int tg3_do_test_dma(struct tg3 *tp, u32 __unused *buf, dma_addr_t buf_dma, int size, int to_device)
  2112. { DBGP("%s\n", __func__);
  2113. struct tg3_internal_buffer_desc test_desc;
  2114. u32 sram_dma_descs;
  2115. int ret;
  2116. unsigned int i;
  2117. sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
  2118. tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
  2119. tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
  2120. tw32(RDMAC_STATUS, 0);
  2121. tw32(WDMAC_STATUS, 0);
  2122. tw32(BUFMGR_MODE, 0);
  2123. tw32(FTQ_RESET, 0);
  2124. test_desc.addr_hi = ((u64) buf_dma) >> 32;
  2125. test_desc.addr_lo = buf_dma & 0xffffffff;
  2126. test_desc.nic_mbuf = 0x00002100;
  2127. test_desc.len = size;
  2128. /*
  2129. * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
  2130. * the *second* time the tg3 driver was getting loaded after an
  2131. * initial scan.
  2132. *
  2133. * Broadcom tells me:
  2134. * ...the DMA engine is connected to the GRC block and a DMA
  2135. * reset may affect the GRC block in some unpredictable way...
  2136. * The behavior of resets to individual blocks has not been tested.
  2137. *
  2138. * Broadcom noted the GRC reset will also reset all sub-components.
  2139. */
  2140. if (to_device) {
  2141. test_desc.cqid_sqid = (13 << 8) | 2;
  2142. tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
  2143. udelay(40);
  2144. } else {
  2145. test_desc.cqid_sqid = (16 << 8) | 7;
  2146. tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
  2147. udelay(40);
  2148. }
  2149. test_desc.flags = 0x00000005;
  2150. for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
  2151. u32 val;
  2152. val = *(((u32 *)&test_desc) + i);
  2153. pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
  2154. sram_dma_descs + (i * sizeof(u32)));
  2155. pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
  2156. }
  2157. pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
  2158. if (to_device)
  2159. tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
  2160. else
  2161. tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
  2162. ret = -ENODEV;
  2163. for (i = 0; i < 40; i++) {
  2164. u32 val;
  2165. if (to_device)
  2166. val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
  2167. else
  2168. val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
  2169. if ((val & 0xffff) == sram_dma_descs) {
  2170. ret = 0;
  2171. break;
  2172. }
  2173. udelay(100);
  2174. }
  2175. return ret;
  2176. }