You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

tg3_hw.c 76KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653
  1. /*
  2. * tg3.c: Broadcom Tigon3 ethernet driver.
  3. *
  4. * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
  5. * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
  6. * Copyright (C) 2004 Sun Microsystems Inc.
  7. * Copyright (C) 2005-2011 Broadcom Corporation.
  8. *
  9. * Firmware is:
  10. * Derived from proprietary unpublished source code,
  11. * Copyright (C) 2000-2003 Broadcom Corporation.
  12. *
  13. * Permission is hereby granted for the distribution of this firmware
  14. * data in hexadecimal or equivalent format, provided this copyright
  15. * notice is accompanying it.
  16. */
  17. FILE_LICENCE ( GPL2_ONLY );
  18. #include <mii.h>
  19. #include <stdio.h>
  20. #include <errno.h>
  21. #include <unistd.h>
  22. #include <byteswap.h>
  23. #include <ipxe/pci.h>
  24. #include <ipxe/iobuf.h>
  25. #include <ipxe/timer.h>
  26. #include <ipxe/malloc.h>
  27. #include <ipxe/if_ether.h>
  28. #include <ipxe/ethernet.h>
  29. #include <ipxe/netdevice.h>
  30. #include "tg3.h"
  31. #define RESET_KIND_SHUTDOWN 0
  32. #define RESET_KIND_INIT 1
  33. #define RESET_KIND_SUSPEND 2
  34. #define TG3_DEF_MAC_MODE 0
  35. void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
  36. { DBGP("%s\n", __func__);
  37. pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
  38. pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
  39. }
  40. u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
  41. { DBGP("%s\n", __func__);
  42. u32 val;
  43. pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
  44. pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
  45. return val;
  46. }
  47. static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
  48. { DBGP("%s\n", __func__);
  49. return readl(tp->regs + off + GRCMBOX_BASE);
  50. }
  51. static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
  52. { DBGP("%s\n", __func__);
  53. writel(val, tp->regs + off + GRCMBOX_BASE);
  54. }
  55. void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
  56. { DBGP("%s\n", __func__);
  57. if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
  58. pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
  59. TG3_64BIT_REG_LOW, val);
  60. return;
  61. }
  62. if (off == TG3_RX_STD_PROD_IDX_REG) {
  63. pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
  64. TG3_64BIT_REG_LOW, val);
  65. return;
  66. }
  67. pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
  68. pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
  69. /* In indirect mode when disabling interrupts, we also need
  70. * to clear the interrupt bit in the GRC local ctrl register.
  71. */
  72. if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
  73. (val == 0x1)) {
  74. pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
  75. tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
  76. }
  77. }
  78. u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
  79. { DBGP("%s\n", __func__);
  80. u32 val;
  81. pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
  82. pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
  83. return val;
  84. }
  85. /* usec_wait specifies the wait time in usec when writing to certain registers
  86. * where it is unsafe to read back the register without some delay.
  87. * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
  88. * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
  89. */
  90. void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
  91. { DBGP("%s\n", __func__);
  92. tw32(off, val);
  93. if (usec_wait)
  94. udelay(usec_wait);
  95. tr32(off);
  96. /* Wait again after the read for the posted method to guarantee that
  97. * the wait time is met.
  98. */
  99. if (usec_wait)
  100. udelay(usec_wait);
  101. }
  102. /* stolen from legacy etherboot tg3 driver */
  103. void tg3_set_power_state_0(struct tg3 *tp)
  104. { DBGP("%s\n", __func__);
  105. uint16_t power_control;
  106. int pm = tp->pm_cap;
  107. /* Make sure register accesses (indirect or otherwise)
  108. * will function correctly.
  109. */
  110. pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
  111. pci_read_config_word(tp->pdev, pm + PCI_PM_CTRL, &power_control);
  112. power_control |= PCI_PM_CTRL_PME_STATUS;
  113. power_control &= ~(PCI_PM_CTRL_STATE_MASK);
  114. power_control |= 0;
  115. pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
  116. tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
  117. return;
  118. }
  119. void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
  120. { DBGP("%s\n", __func__);
  121. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
  122. (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
  123. *val = 0;
  124. return;
  125. }
  126. pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
  127. pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
  128. /* Always leave this as zero. */
  129. pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
  130. }
  131. #define PCI_VENDOR_ID_ARIMA 0x161f
  132. static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
  133. { DBGP("%s\n", __func__);
  134. u32 val;
  135. u16 pmcsr;
  136. /* On some early chips the SRAM cannot be accessed in D3hot state,
  137. * so need make sure we're in D0.
  138. */
  139. pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
  140. pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
  141. pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
  142. mdelay(1);
  143. /* Make sure register accesses (indirect or otherwise)
  144. * will function correctly.
  145. */
  146. pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
  147. tp->misc_host_ctrl);
  148. /* The memory arbiter has to be enabled in order for SRAM accesses
  149. * to succeed. Normally on powerup the tg3 chip firmware will make
  150. * sure it is enabled, but other entities such as system netboot
  151. * code might disable it.
  152. */
  153. val = tr32(MEMARB_MODE);
  154. tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
  155. tp->phy_id = TG3_PHY_ID_INVALID;
  156. tp->led_ctrl = LED_CTRL_MODE_PHY_1;
  157. /* Assume an onboard device by default. */
  158. tg3_flag_set(tp, EEPROM_WRITE_PROT);
  159. tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
  160. if (val == NIC_SRAM_DATA_SIG_MAGIC) {
  161. u32 nic_cfg, led_cfg;
  162. u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
  163. int eeprom_phy_serdes = 0;
  164. tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
  165. tp->nic_sram_data_cfg = nic_cfg;
  166. tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
  167. ver >>= NIC_SRAM_DATA_VER_SHIFT;
  168. if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
  169. GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
  170. GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
  171. (ver > 0) && (ver < 0x100))
  172. tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
  173. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
  174. tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
  175. if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
  176. NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
  177. eeprom_phy_serdes = 1;
  178. tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
  179. if (nic_phy_id != 0) {
  180. u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
  181. u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
  182. eeprom_phy_id = (id1 >> 16) << 10;
  183. eeprom_phy_id |= (id2 & 0xfc00) << 16;
  184. eeprom_phy_id |= (id2 & 0x03ff) << 0;
  185. } else
  186. eeprom_phy_id = 0;
  187. tp->phy_id = eeprom_phy_id;
  188. if (eeprom_phy_serdes) {
  189. if (!tg3_flag(tp, 5705_PLUS))
  190. tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
  191. else
  192. tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
  193. }
  194. if (tg3_flag(tp, 5750_PLUS))
  195. led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
  196. SHASTA_EXT_LED_MODE_MASK);
  197. else
  198. led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
  199. switch (led_cfg) {
  200. default:
  201. case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
  202. tp->led_ctrl = LED_CTRL_MODE_PHY_1;
  203. break;
  204. case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
  205. tp->led_ctrl = LED_CTRL_MODE_PHY_2;
  206. break;
  207. case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
  208. tp->led_ctrl = LED_CTRL_MODE_MAC;
  209. /* Default to PHY_1_MODE if 0 (MAC_MODE) is
  210. * read on some older 5700/5701 bootcode.
  211. */
  212. if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
  213. ASIC_REV_5700 ||
  214. GET_ASIC_REV(tp->pci_chip_rev_id) ==
  215. ASIC_REV_5701)
  216. tp->led_ctrl = LED_CTRL_MODE_PHY_1;
  217. break;
  218. case SHASTA_EXT_LED_SHARED:
  219. tp->led_ctrl = LED_CTRL_MODE_SHARED;
  220. if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
  221. tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
  222. tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
  223. LED_CTRL_MODE_PHY_2);
  224. break;
  225. case SHASTA_EXT_LED_MAC:
  226. tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
  227. break;
  228. case SHASTA_EXT_LED_COMBO:
  229. tp->led_ctrl = LED_CTRL_MODE_COMBO;
  230. if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
  231. tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
  232. LED_CTRL_MODE_PHY_2);
  233. break;
  234. }
  235. if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
  236. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
  237. tp->subsystem_vendor == PCI_VENDOR_ID_DELL)
  238. tp->led_ctrl = LED_CTRL_MODE_PHY_2;
  239. if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
  240. tp->led_ctrl = LED_CTRL_MODE_PHY_1;
  241. if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
  242. tg3_flag_set(tp, EEPROM_WRITE_PROT);
  243. if ((tp->subsystem_vendor ==
  244. PCI_VENDOR_ID_ARIMA) &&
  245. (tp->subsystem_device == 0x205a ||
  246. tp->subsystem_device == 0x2063))
  247. tg3_flag_clear(tp, EEPROM_WRITE_PROT);
  248. } else {
  249. tg3_flag_clear(tp, EEPROM_WRITE_PROT);
  250. tg3_flag_set(tp, IS_NIC);
  251. }
  252. if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
  253. tg3_flag_set(tp, ENABLE_ASF);
  254. if (tg3_flag(tp, 5750_PLUS))
  255. tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
  256. }
  257. if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
  258. tg3_flag(tp, 5750_PLUS))
  259. tg3_flag_set(tp, ENABLE_APE);
  260. if (cfg2 & (1 << 17))
  261. tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
  262. /* serdes signal pre-emphasis in register 0x590 set by */
  263. /* bootcode if bit 18 is set */
  264. if (cfg2 & (1 << 18))
  265. tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
  266. if ((tg3_flag(tp, 57765_PLUS) ||
  267. (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
  268. GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
  269. (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
  270. tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
  271. if (tg3_flag(tp, PCI_EXPRESS) &&
  272. GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
  273. !tg3_flag(tp, 57765_PLUS)) {
  274. u32 cfg3;
  275. tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
  276. }
  277. if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
  278. tg3_flag_set(tp, RGMII_INBAND_DISABLE);
  279. if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
  280. tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
  281. if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
  282. tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
  283. }
  284. }
  285. static void tg3_switch_clocks(struct tg3 *tp)
  286. { DBGP("%s\n", __func__);
  287. u32 clock_ctrl;
  288. u32 orig_clock_ctrl;
  289. if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
  290. return;
  291. clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
  292. orig_clock_ctrl = clock_ctrl;
  293. clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
  294. CLOCK_CTRL_CLKRUN_OENABLE |
  295. 0x1f);
  296. tp->pci_clock_ctrl = clock_ctrl;
  297. if (tg3_flag(tp, 5705_PLUS)) {
  298. if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
  299. tw32_wait_f(TG3PCI_CLOCK_CTRL,
  300. clock_ctrl | CLOCK_CTRL_625_CORE, 40);
  301. }
  302. } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
  303. tw32_wait_f(TG3PCI_CLOCK_CTRL,
  304. clock_ctrl |
  305. (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
  306. 40);
  307. tw32_wait_f(TG3PCI_CLOCK_CTRL,
  308. clock_ctrl | (CLOCK_CTRL_ALTCLK),
  309. 40);
  310. }
  311. tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
  312. }
  313. int tg3_get_invariants(struct tg3 *tp)
  314. { DBGP("%s\n", __func__);
  315. u32 misc_ctrl_reg;
  316. u32 pci_state_reg, grc_misc_cfg;
  317. u32 val;
  318. u16 pci_cmd;
  319. int err;
  320. /* Force memory write invalidate off. If we leave it on,
  321. * then on 5700_BX chips we have to enable a workaround.
  322. * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
  323. * to match the cacheline size. The Broadcom driver have this
  324. * workaround but turns MWI off all the times so never uses
  325. * it. This seems to suggest that the workaround is insufficient.
  326. */
  327. pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
  328. pci_cmd &= ~PCI_COMMAND_INVALIDATE;
  329. pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
  330. /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
  331. * has the register indirect write enable bit set before
  332. * we try to access any of the MMIO registers. It is also
  333. * critical that the PCI-X hw workaround situation is decided
  334. * before that as well.
  335. */
  336. pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
  337. &misc_ctrl_reg);
  338. tp->pci_chip_rev_id = (misc_ctrl_reg >>
  339. MISC_HOST_CTRL_CHIPREV_SHIFT);
  340. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
  341. u32 prod_id_asic_rev;
  342. if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
  343. tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
  344. tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
  345. tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
  346. pci_read_config_dword(tp->pdev,
  347. TG3PCI_GEN2_PRODID_ASICREV,
  348. &prod_id_asic_rev);
  349. else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
  350. tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
  351. tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
  352. tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
  353. tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
  354. tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
  355. tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
  356. pci_read_config_dword(tp->pdev,
  357. TG3PCI_GEN15_PRODID_ASICREV,
  358. &prod_id_asic_rev);
  359. else
  360. pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
  361. &prod_id_asic_rev);
  362. tp->pci_chip_rev_id = prod_id_asic_rev;
  363. }
  364. /* Wrong chip ID in 5752 A0. This code can be removed later
  365. * as A0 is not in production.
  366. */
  367. if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
  368. tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
  369. /* Initialize misc host control in PCI block. */
  370. tp->misc_host_ctrl |= (misc_ctrl_reg &
  371. MISC_HOST_CTRL_CHIPREV);
  372. pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
  373. tp->misc_host_ctrl);
  374. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
  375. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
  376. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
  377. tg3_flag_set(tp, 5717_PLUS);
  378. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
  379. tg3_flag(tp, 5717_PLUS))
  380. tg3_flag_set(tp, 57765_PLUS);
  381. /* Intentionally exclude ASIC_REV_5906 */
  382. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
  383. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
  384. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
  385. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
  386. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
  387. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
  388. tg3_flag(tp, 57765_PLUS))
  389. tg3_flag_set(tp, 5755_PLUS);
  390. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
  391. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
  392. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
  393. tg3_flag(tp, 5755_PLUS) ||
  394. tg3_flag(tp, 5780_CLASS))
  395. tg3_flag_set(tp, 5750_PLUS);
  396. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
  397. tg3_flag(tp, 5750_PLUS))
  398. tg3_flag_set(tp, 5705_PLUS);
  399. if (tg3_flag(tp, 5717_PLUS))
  400. tg3_flag_set(tp, LRG_PROD_RING_CAP);
  401. pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
  402. &pci_state_reg);
  403. tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
  404. if (tp->pcie_cap != 0) {
  405. u16 lnkctl;
  406. tg3_flag_set(tp, PCI_EXPRESS);
  407. pci_read_config_word(tp->pdev,
  408. tp->pcie_cap + PCI_EXP_LNKCTL,
  409. &lnkctl);
  410. if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
  411. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
  412. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
  413. tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
  414. tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
  415. tg3_flag_set(tp, CLKREQ_BUG);
  416. } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
  417. tg3_flag_set(tp, L1PLLPD_EN);
  418. }
  419. } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
  420. tg3_flag_set(tp, PCI_EXPRESS);
  421. } else if (!tg3_flag(tp, 5705_PLUS) ||
  422. tg3_flag(tp, 5780_CLASS)) {
  423. tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
  424. if (!tp->pcix_cap) {
  425. DBGC(&tp->pdev->dev,
  426. "Cannot find PCI-X capability, aborting\n");
  427. return -EIO;
  428. }
  429. if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
  430. tg3_flag_set(tp, PCIX_MODE);
  431. }
  432. /* If we have an AMD 762 or VIA K8T800 chipset, write
  433. * reordering to the mailbox registers done by the host
  434. * controller can cause major troubles. We read back from
  435. * every mailbox register write to force the writes to be
  436. * posted to the chip in order.
  437. */
  438. pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
  439. &tp->pci_cacheline_sz);
  440. pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
  441. &tp->pci_lat_timer);
  442. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
  443. tp->pci_lat_timer < 64) {
  444. tp->pci_lat_timer = 64;
  445. pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
  446. tp->pci_lat_timer);
  447. }
  448. if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
  449. /* 5700 BX chips need to have their TX producer index
  450. * mailboxes written twice to workaround a bug.
  451. */
  452. tg3_flag_set(tp, TXD_MBOX_HWBUG);
  453. /* If we are in PCI-X mode, enable register write workaround.
  454. *
  455. * The workaround is to use indirect register accesses
  456. * for all chip writes not to mailbox registers.
  457. */
  458. if (tg3_flag(tp, PCIX_MODE)) {
  459. u32 pm_reg;
  460. tg3_flag_set(tp, PCIX_TARGET_HWBUG);
  461. /* The chip can have it's power management PCI config
  462. * space registers clobbered due to this bug.
  463. * So explicitly force the chip into D0 here.
  464. */
  465. pci_read_config_dword(tp->pdev,
  466. tp->pm_cap + PCI_PM_CTRL,
  467. &pm_reg);
  468. pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
  469. pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
  470. pci_write_config_dword(tp->pdev,
  471. tp->pm_cap + PCI_PM_CTRL,
  472. pm_reg);
  473. /* Also, force SERR#/PERR# in PCI command. */
  474. pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
  475. pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
  476. pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
  477. }
  478. }
  479. if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
  480. tg3_flag_set(tp, PCI_HIGH_SPEED);
  481. if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
  482. tg3_flag_set(tp, PCI_32BIT);
  483. /* Chip-specific fixup from Broadcom driver */
  484. if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
  485. (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
  486. pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
  487. pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
  488. }
  489. tp->write32_mbox = tg3_write_indirect_reg32;
  490. tp->write32_rx_mbox = tg3_write_indirect_mbox;
  491. tp->write32_tx_mbox = tg3_write_indirect_mbox;
  492. tp->read32_mbox = tg3_read_indirect_mbox;
  493. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
  494. tp->read32_mbox = tg3_read32_mbox_5906;
  495. tp->write32_mbox = tg3_write32_mbox_5906;
  496. tp->write32_tx_mbox = tg3_write32_mbox_5906;
  497. tp->write32_rx_mbox = tg3_write32_mbox_5906;
  498. }
  499. /* Get eeprom hw config before calling tg3_set_power_state().
  500. * In particular, the TG3_FLAG_IS_NIC flag must be
  501. * determined before calling tg3_set_power_state() so that
  502. * we know whether or not to switch out of Vaux power.
  503. * When the flag is set, it means that GPIO1 is used for eeprom
  504. * write protect and also implies that it is a LOM where GPIOs
  505. * are not used to switch power.
  506. */
  507. tg3_get_eeprom_hw_cfg(tp);
  508. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
  509. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
  510. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
  511. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
  512. tg3_flag(tp, 57765_PLUS))
  513. tg3_flag_set(tp, CPMU_PRESENT);
  514. /* Set up tp->grc_local_ctrl before calling tg3_power_up().
  515. * GPIO1 driven high will bring 5700's external PHY out of reset.
  516. * It is also used as eeprom write protect on LOMs.
  517. */
  518. tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
  519. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
  520. tg3_flag(tp, EEPROM_WRITE_PROT))
  521. tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
  522. GRC_LCLCTRL_GPIO_OUTPUT1);
  523. /* Unused GPIO3 must be driven as output on 5752 because there
  524. * are no pull-up resistors on unused GPIO pins.
  525. */
  526. else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
  527. tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
  528. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
  529. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
  530. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
  531. tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
  532. if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
  533. tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
  534. /* Turn off the debug UART. */
  535. tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
  536. if (tg3_flag(tp, IS_NIC))
  537. /* Keep VMain power. */
  538. tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
  539. GRC_LCLCTRL_GPIO_OUTPUT0;
  540. }
  541. /* Force the chip into D0. */
  542. tg3_set_power_state_0(tp);
  543. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
  544. tp->phy_flags |= TG3_PHYFLG_IS_FET;
  545. /* A few boards don't want Ethernet@WireSpeed phy feature */
  546. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
  547. (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
  548. (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
  549. (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
  550. (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
  551. (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
  552. tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
  553. if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
  554. GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
  555. tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
  556. if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
  557. tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
  558. if (tg3_flag(tp, 5705_PLUS) &&
  559. !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
  560. GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
  561. GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
  562. !tg3_flag(tp, 57765_PLUS)) {
  563. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
  564. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
  565. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
  566. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
  567. if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
  568. tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
  569. tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
  570. if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
  571. tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
  572. } else
  573. tp->phy_flags |= TG3_PHYFLG_BER_BUG;
  574. }
  575. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
  576. GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
  577. tp->phy_otp = tg3_read_otp_phycfg(tp);
  578. if (tp->phy_otp == 0)
  579. tp->phy_otp = TG3_OTP_DEFAULT;
  580. }
  581. if (tg3_flag(tp, CPMU_PRESENT))
  582. tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
  583. else
  584. tp->mi_mode = MAC_MI_MODE_BASE;
  585. tp->coalesce_mode = 0;
  586. if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
  587. GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
  588. tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
  589. /* Set these bits to enable statistics workaround. */
  590. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
  591. tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
  592. tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
  593. tp->coalesce_mode |= HOSTCC_MODE_ATTN;
  594. tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
  595. }
  596. tg3_mdio_init(tp);
  597. /* Initialize data/descriptor byte/word swapping. */
  598. val = tr32(GRC_MODE);
  599. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
  600. val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
  601. GRC_MODE_WORD_SWAP_B2HRX_DATA |
  602. GRC_MODE_B2HRX_ENABLE |
  603. GRC_MODE_HTX2B_ENABLE |
  604. GRC_MODE_HOST_STACKUP);
  605. else
  606. val &= GRC_MODE_HOST_STACKUP;
  607. tw32(GRC_MODE, val | tp->grc_mode);
  608. tg3_switch_clocks(tp);
  609. /* Clear this out for sanity. */
  610. tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
  611. pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
  612. &pci_state_reg);
  613. if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
  614. !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
  615. u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
  616. if (chiprevid == CHIPREV_ID_5701_A0 ||
  617. chiprevid == CHIPREV_ID_5701_B0 ||
  618. chiprevid == CHIPREV_ID_5701_B2 ||
  619. chiprevid == CHIPREV_ID_5701_B5) {
  620. void *sram_base;
  621. /* Write some dummy words into the SRAM status block
  622. * area, see if it reads back correctly. If the return
  623. * value is bad, force enable the PCIX workaround.
  624. */
  625. sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
  626. writel(0x00000000, sram_base);
  627. writel(0x00000000, sram_base + 4);
  628. writel(0xffffffff, sram_base + 4);
  629. if (readl(sram_base) != 0x00000000)
  630. tg3_flag_set(tp, PCIX_TARGET_HWBUG);
  631. }
  632. }
  633. udelay(50);
  634. /* FIXME: do we need nvram access? */
  635. /// tg3_nvram_init(tp);
  636. grc_misc_cfg = tr32(GRC_MISC_CFG);
  637. grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
  638. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
  639. (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
  640. grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
  641. tg3_flag_set(tp, IS_5788);
  642. if (!tg3_flag(tp, IS_5788) &&
  643. GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
  644. tg3_flag_set(tp, TAGGED_STATUS);
  645. if (tg3_flag(tp, TAGGED_STATUS)) {
  646. tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
  647. HOSTCC_MODE_CLRTICK_TXBD);
  648. tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
  649. pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
  650. tp->misc_host_ctrl);
  651. }
  652. /* Preserve the APE MAC_MODE bits */
  653. if (tg3_flag(tp, ENABLE_APE))
  654. tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
  655. else
  656. tp->mac_mode = TG3_DEF_MAC_MODE;
  657. /* these are limited to 10/100 only */
  658. if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
  659. (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
  660. (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
  661. tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
  662. (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
  663. tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
  664. tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
  665. (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
  666. (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
  667. tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
  668. tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
  669. tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
  670. tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
  671. tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
  672. (tp->phy_flags & TG3_PHYFLG_IS_FET))
  673. tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
  674. err = tg3_phy_probe(tp);
  675. if (err) {
  676. DBGC(&tp->pdev->dev, "phy probe failed, err: %s\n", strerror(err));
  677. /* ... but do not return immediately ... */
  678. }
  679. if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
  680. tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
  681. } else {
  682. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
  683. tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
  684. else
  685. tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
  686. }
  687. /* For all SERDES we poll the MAC status register. */
  688. if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
  689. tg3_flag_set(tp, POLL_SERDES);
  690. else
  691. tg3_flag_clear(tp, POLL_SERDES);
  692. /* Increment the rx prod index on the rx std ring by at most
  693. * 8 for these chips to workaround hw errata.
  694. */
  695. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
  696. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
  697. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
  698. tp->rx_std_max_post = 8;
  699. return err;
  700. }
  701. void tg3_init_bufmgr_config(struct tg3 *tp)
  702. { DBGP("%s\n", __func__);
  703. if (tg3_flag(tp, 57765_PLUS)) {
  704. tp->bufmgr_config.mbuf_read_dma_low_water =
  705. DEFAULT_MB_RDMA_LOW_WATER_5705;
  706. tp->bufmgr_config.mbuf_mac_rx_low_water =
  707. DEFAULT_MB_MACRX_LOW_WATER_57765;
  708. tp->bufmgr_config.mbuf_high_water =
  709. DEFAULT_MB_HIGH_WATER_57765;
  710. tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
  711. DEFAULT_MB_RDMA_LOW_WATER_5705;
  712. tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
  713. DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
  714. tp->bufmgr_config.mbuf_high_water_jumbo =
  715. DEFAULT_MB_HIGH_WATER_JUMBO_57765;
  716. } else if (tg3_flag(tp, 5705_PLUS)) {
  717. tp->bufmgr_config.mbuf_read_dma_low_water =
  718. DEFAULT_MB_RDMA_LOW_WATER_5705;
  719. tp->bufmgr_config.mbuf_mac_rx_low_water =
  720. DEFAULT_MB_MACRX_LOW_WATER_5705;
  721. tp->bufmgr_config.mbuf_high_water =
  722. DEFAULT_MB_HIGH_WATER_5705;
  723. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
  724. tp->bufmgr_config.mbuf_mac_rx_low_water =
  725. DEFAULT_MB_MACRX_LOW_WATER_5906;
  726. tp->bufmgr_config.mbuf_high_water =
  727. DEFAULT_MB_HIGH_WATER_5906;
  728. }
  729. tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
  730. DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
  731. tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
  732. DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
  733. tp->bufmgr_config.mbuf_high_water_jumbo =
  734. DEFAULT_MB_HIGH_WATER_JUMBO_5780;
  735. } else {
  736. tp->bufmgr_config.mbuf_read_dma_low_water =
  737. DEFAULT_MB_RDMA_LOW_WATER;
  738. tp->bufmgr_config.mbuf_mac_rx_low_water =
  739. DEFAULT_MB_MACRX_LOW_WATER;
  740. tp->bufmgr_config.mbuf_high_water =
  741. DEFAULT_MB_HIGH_WATER;
  742. tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
  743. DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
  744. tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
  745. DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
  746. tp->bufmgr_config.mbuf_high_water_jumbo =
  747. DEFAULT_MB_HIGH_WATER_JUMBO;
  748. }
  749. tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
  750. tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
  751. }
  752. #define TG3_FW_EVENT_TIMEOUT_USEC 2500
  753. void tg3_wait_for_event_ack(struct tg3 *tp)
  754. { DBGP("%s\n", __func__);
  755. int i;
  756. for (i = 0; i < TG3_FW_EVENT_TIMEOUT_USEC / 10; i++) {
  757. if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
  758. break;
  759. udelay(10);
  760. }
  761. }
  762. void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
  763. { DBGP("%s\n", __func__);
  764. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
  765. (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
  766. return;
  767. pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
  768. pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
  769. /* Always leave this as zero. */
  770. pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
  771. }
  772. static void tg3_stop_fw(struct tg3 *tp)
  773. { DBGP("%s\n", __func__);
  774. if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
  775. /* Wait for RX cpu to ACK the previous event. */
  776. tg3_wait_for_event_ack(tp);
  777. tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
  778. tg3_generate_fw_event(tp);
  779. /* Wait for RX cpu to ACK this event. */
  780. tg3_wait_for_event_ack(tp);
  781. }
  782. }
  783. static void tg3_write_sig_pre_reset(struct tg3 *tp)
  784. { DBGP("%s\n", __func__);
  785. tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
  786. NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
  787. }
  788. void tg3_disable_ints(struct tg3 *tp)
  789. { DBGP("%s\n", __func__);
  790. tw32(TG3PCI_MISC_HOST_CTRL,
  791. (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
  792. tw32_mailbox_f(tp->int_mbox, 0x00000001);
  793. }
  794. void tg3_enable_ints(struct tg3 *tp)
  795. { DBGP("%s\n", __func__);
  796. tw32(TG3PCI_MISC_HOST_CTRL,
  797. (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
  798. tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
  799. tw32_mailbox_f(tp->int_mbox, tp->last_tag << 24);
  800. /* Force an initial interrupt */
  801. if (!tg3_flag(tp, TAGGED_STATUS) &&
  802. (tp->hw_status->status & SD_STATUS_UPDATED))
  803. tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
  804. else
  805. tw32(HOSTCC_MODE, tp->coal_now);
  806. }
  807. #define MAX_WAIT_CNT 1000
  808. /* To stop a block, clear the enable bit and poll till it clears. */
  809. static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit)
  810. { DBGP("%s\n", __func__);
  811. unsigned int i;
  812. u32 val;
  813. if (tg3_flag(tp, 5705_PLUS)) {
  814. switch (ofs) {
  815. case RCVLSC_MODE:
  816. case DMAC_MODE:
  817. case MBFREE_MODE:
  818. case BUFMGR_MODE:
  819. case MEMARB_MODE:
  820. /* We can't enable/disable these bits of the
  821. * 5705/5750, just say success.
  822. */
  823. return 0;
  824. default:
  825. break;
  826. }
  827. }
  828. val = tr32(ofs);
  829. val &= ~enable_bit;
  830. tw32_f(ofs, val);
  831. for (i = 0; i < MAX_WAIT_CNT; i++) {
  832. udelay(100);
  833. val = tr32(ofs);
  834. if ((val & enable_bit) == 0)
  835. break;
  836. }
  837. if (i == MAX_WAIT_CNT) {
  838. DBGC(&tp->pdev->dev,
  839. "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
  840. ofs, enable_bit);
  841. return -ENODEV;
  842. }
  843. return 0;
  844. }
  845. static int tg3_abort_hw(struct tg3 *tp)
  846. { DBGP("%s\n", __func__);
  847. int i, err;
  848. tg3_disable_ints(tp);
  849. tp->rx_mode &= ~RX_MODE_ENABLE;
  850. tw32_f(MAC_RX_MODE, tp->rx_mode);
  851. udelay(10);
  852. err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE);
  853. err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE);
  854. err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE);
  855. err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE);
  856. err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE);
  857. err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE);
  858. err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE);
  859. err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE);
  860. err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
  861. err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE);
  862. err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
  863. err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE);
  864. err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE);
  865. tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
  866. tw32_f(MAC_MODE, tp->mac_mode);
  867. udelay(40);
  868. tp->tx_mode &= ~TX_MODE_ENABLE;
  869. tw32_f(MAC_TX_MODE, tp->tx_mode);
  870. for (i = 0; i < MAX_WAIT_CNT; i++) {
  871. udelay(100);
  872. if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
  873. break;
  874. }
  875. if (i >= MAX_WAIT_CNT) {
  876. DBGC(&tp->pdev->dev,
  877. "%s timed out, TX_MODE_ENABLE will not clear "
  878. "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
  879. err |= -ENODEV;
  880. }
  881. err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE);
  882. err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE);
  883. err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE);
  884. tw32(FTQ_RESET, 0xffffffff);
  885. tw32(FTQ_RESET, 0x00000000);
  886. err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE);
  887. err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE);
  888. if (tp->hw_status)
  889. memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
  890. return err;
  891. }
  892. void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
  893. { DBGP("%s\n", __func__);
  894. u32 addr_high, addr_low;
  895. int i;
  896. addr_high = ((tp->dev->ll_addr[0] << 8) |
  897. tp->dev->ll_addr[1]);
  898. addr_low = ((tp->dev->ll_addr[2] << 24) |
  899. (tp->dev->ll_addr[3] << 16) |
  900. (tp->dev->ll_addr[4] << 8) |
  901. (tp->dev->ll_addr[5] << 0));
  902. for (i = 0; i < 4; i++) {
  903. if (i == 1 && skip_mac_1)
  904. continue;
  905. tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
  906. tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
  907. }
  908. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
  909. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
  910. for (i = 0; i < 12; i++) {
  911. tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
  912. tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
  913. }
  914. }
  915. addr_high = (tp->dev->ll_addr[0] +
  916. tp->dev->ll_addr[1] +
  917. tp->dev->ll_addr[2] +
  918. tp->dev->ll_addr[3] +
  919. tp->dev->ll_addr[4] +
  920. tp->dev->ll_addr[5]) &
  921. TX_BACKOFF_SEED_MASK;
  922. tw32(MAC_TX_BACKOFF_SEED, addr_high);
  923. }
  924. /* Save PCI command register before chip reset */
  925. static void tg3_save_pci_state(struct tg3 *tp)
  926. { DBGP("%s\n", __func__);
  927. pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
  928. }
  929. /* Restore PCI state after chip reset */
  930. static void tg3_restore_pci_state(struct tg3 *tp)
  931. { DBGP("%s\n", __func__);
  932. u32 val;
  933. /* Re-enable indirect register accesses. */
  934. pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
  935. tp->misc_host_ctrl);
  936. /* Set MAX PCI retry to zero. */
  937. val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
  938. if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
  939. tg3_flag(tp, PCIX_MODE))
  940. val |= PCISTATE_RETRY_SAME_DMA;
  941. pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
  942. pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
  943. if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
  944. pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
  945. tp->pci_cacheline_sz);
  946. pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
  947. tp->pci_lat_timer);
  948. }
  949. /* Make sure PCI-X relaxed ordering bit is clear. */
  950. if (tg3_flag(tp, PCIX_MODE)) {
  951. u16 pcix_cmd;
  952. pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
  953. &pcix_cmd);
  954. pcix_cmd &= ~PCI_X_CMD_ERO;
  955. pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
  956. pcix_cmd);
  957. }
  958. }
  959. static int tg3_poll_fw(struct tg3 *tp)
  960. { DBGP("%s\n", __func__);
  961. int i;
  962. u32 val;
  963. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
  964. /* Wait up to 20ms for init done. */
  965. for (i = 0; i < 200; i++) {
  966. if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
  967. return 0;
  968. udelay(100);
  969. }
  970. return -ENODEV;
  971. }
  972. /* Wait for firmware initialization to complete. */
  973. for (i = 0; i < 100000; i++) {
  974. tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
  975. if (val == (u32)~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
  976. break;
  977. udelay(10);
  978. }
  979. /* Chip might not be fitted with firmware. Some Sun onboard
  980. * parts are configured like that. So don't signal the timeout
  981. * of the above loop as an error, but do report the lack of
  982. * running firmware once.
  983. */
  984. if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
  985. tg3_flag_set(tp, NO_FWARE_REPORTED);
  986. DBGC(tp->dev, "No firmware running\n");
  987. }
  988. if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
  989. /* The 57765 A0 needs a little more
  990. * time to do some important work.
  991. */
  992. mdelay(10);
  993. }
  994. return 0;
  995. }
  996. static int tg3_nvram_lock(struct tg3 *tp)
  997. { DBGP("%s\n", __func__);
  998. if (tg3_flag(tp, NVRAM)) {
  999. int i;
  1000. if (tp->nvram_lock_cnt == 0) {
  1001. tw32(NVRAM_SWARB, SWARB_REQ_SET1);
  1002. for (i = 0; i < 8000; i++) {
  1003. if (tr32(NVRAM_SWARB) & SWARB_GNT1)
  1004. break;
  1005. udelay(20);
  1006. }
  1007. if (i == 8000) {
  1008. tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
  1009. return -ENODEV;
  1010. }
  1011. }
  1012. tp->nvram_lock_cnt++;
  1013. }
  1014. return 0;
  1015. }
  1016. static void tg3_nvram_unlock(struct tg3 *tp)
  1017. { DBGP("%s\n", __func__);
  1018. if (tg3_flag(tp, NVRAM)) {
  1019. if (tp->nvram_lock_cnt > 0)
  1020. tp->nvram_lock_cnt--;
  1021. if (tp->nvram_lock_cnt == 0)
  1022. tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
  1023. }
  1024. }
  1025. static int tg3_chip_reset(struct tg3 *tp)
  1026. { DBGP("%s\n", __func__);
  1027. u32 val;
  1028. int err;
  1029. tg3_nvram_lock(tp);
  1030. /* No matching tg3_nvram_unlock() after this because
  1031. * chip reset below will undo the nvram lock.
  1032. */
  1033. tp->nvram_lock_cnt = 0;
  1034. /* GRC_MISC_CFG core clock reset will clear the memory
  1035. * enable bit in PCI register 4 and the MSI enable bit
  1036. * on some chips, so we save relevant registers here.
  1037. */
  1038. tg3_save_pci_state(tp);
  1039. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
  1040. tg3_flag(tp, 5755_PLUS))
  1041. tw32(GRC_FASTBOOT_PC, 0);
  1042. #if 0
  1043. /*
  1044. * We must avoid the readl() that normally takes place.
  1045. * It locks machines, causes machine checks, and other
  1046. * fun things. So, temporarily disable the 5701
  1047. * hardware workaround, while we do the reset.
  1048. */
  1049. write_op = tp->write32;
  1050. if (write_op == tg3_write_flush_reg32)
  1051. tp->write32 = tg3_write32;
  1052. #endif
  1053. /* Prevent the irq handler from reading or writing PCI registers
  1054. * during chip reset when the memory enable bit in the PCI command
  1055. * register may be cleared. The chip does not generate interrupt
  1056. * at this time, but the irq handler may still be called due to irq
  1057. * sharing or irqpoll.
  1058. */
  1059. tg3_flag_set(tp, CHIP_RESETTING);
  1060. if (tp->hw_status) {
  1061. tp->hw_status->status = 0;
  1062. tp->hw_status->status_tag = 0;
  1063. }
  1064. tp->last_tag = 0;
  1065. tp->last_irq_tag = 0;
  1066. mb();
  1067. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
  1068. val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
  1069. tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
  1070. }
  1071. /* do the reset */
  1072. val = GRC_MISC_CFG_CORECLK_RESET;
  1073. if (tg3_flag(tp, PCI_EXPRESS)) {
  1074. /* Force PCIe 1.0a mode */
  1075. if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
  1076. !tg3_flag(tp, 57765_PLUS) &&
  1077. tr32(TG3_PCIE_PHY_TSTCTL) ==
  1078. (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
  1079. tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
  1080. if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
  1081. tw32(GRC_MISC_CFG, (1 << 29));
  1082. val |= (1 << 29);
  1083. }
  1084. }
  1085. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
  1086. tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
  1087. tw32(GRC_VCPU_EXT_CTRL,
  1088. tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
  1089. }
  1090. /* Manage gphy power for all CPMU absent PCIe devices. */
  1091. if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
  1092. val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
  1093. tw32(GRC_MISC_CFG, val);
  1094. /* Unfortunately, we have to delay before the PCI read back.
  1095. * Some 575X chips even will not respond to a PCI cfg access
  1096. * when the reset command is given to the chip.
  1097. *
  1098. * How do these hardware designers expect things to work
  1099. * properly if the PCI write is posted for a long period
  1100. * of time? It is always necessary to have some method by
  1101. * which a register read back can occur to push the write
  1102. * out which does the reset.
  1103. *
  1104. * For most tg3 variants the trick below was working.
  1105. * Ho hum...
  1106. */
  1107. udelay(120);
  1108. /* Flush PCI posted writes. The normal MMIO registers
  1109. * are inaccessible at this time so this is the only
  1110. * way to make this reliably (actually, this is no longer
  1111. * the case, see above). I tried to use indirect
  1112. * register read/write but this upset some 5701 variants.
  1113. */
  1114. pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
  1115. udelay(120);
  1116. if (tg3_flag(tp, PCI_EXPRESS) && tp->pcie_cap) {
  1117. u16 val16;
  1118. if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
  1119. int i;
  1120. u32 cfg_val;
  1121. /* Wait for link training to complete. */
  1122. for (i = 0; i < 5000; i++)
  1123. udelay(100);
  1124. pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
  1125. pci_write_config_dword(tp->pdev, 0xc4,
  1126. cfg_val | (1 << 15));
  1127. }
  1128. /* Clear the "no snoop" and "relaxed ordering" bits. */
  1129. pci_read_config_word(tp->pdev,
  1130. tp->pcie_cap + PCI_EXP_DEVCTL,
  1131. &val16);
  1132. val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
  1133. PCI_EXP_DEVCTL_NOSNOOP_EN);
  1134. /*
  1135. * Older PCIe devices only support the 128 byte
  1136. * MPS setting. Enforce the restriction.
  1137. */
  1138. if (!tg3_flag(tp, CPMU_PRESENT))
  1139. val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
  1140. pci_write_config_word(tp->pdev,
  1141. tp->pcie_cap + PCI_EXP_DEVCTL,
  1142. val16);
  1143. /* Clear error status */
  1144. pci_write_config_word(tp->pdev,
  1145. tp->pcie_cap + PCI_EXP_DEVSTA,
  1146. PCI_EXP_DEVSTA_CED |
  1147. PCI_EXP_DEVSTA_NFED |
  1148. PCI_EXP_DEVSTA_FED |
  1149. PCI_EXP_DEVSTA_URD);
  1150. }
  1151. tg3_restore_pci_state(tp);
  1152. tg3_flag_clear(tp, CHIP_RESETTING);
  1153. tg3_flag_clear(tp, ERROR_PROCESSED);
  1154. val = 0;
  1155. if (tg3_flag(tp, 5780_CLASS))
  1156. val = tr32(MEMARB_MODE);
  1157. tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
  1158. if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
  1159. tg3_stop_fw(tp);
  1160. tw32(0x5000, 0x400);
  1161. }
  1162. tw32(GRC_MODE, tp->grc_mode);
  1163. if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
  1164. val = tr32(0xc4);
  1165. tw32(0xc4, val | (1 << 15));
  1166. }
  1167. if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
  1168. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
  1169. tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
  1170. if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
  1171. tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
  1172. tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
  1173. }
  1174. if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
  1175. tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
  1176. val = tp->mac_mode;
  1177. } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
  1178. tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
  1179. val = tp->mac_mode;
  1180. } else
  1181. val = 0;
  1182. tw32_f(MAC_MODE, val);
  1183. udelay(40);
  1184. err = tg3_poll_fw(tp);
  1185. if (err)
  1186. return err;
  1187. if (tg3_flag(tp, PCI_EXPRESS) &&
  1188. tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
  1189. GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
  1190. !tg3_flag(tp, 57765_PLUS)) {
  1191. val = tr32(0x7c00);
  1192. tw32(0x7c00, val | (1 << 25));
  1193. }
  1194. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
  1195. val = tr32(TG3_CPMU_CLCK_ORIDE);
  1196. tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
  1197. }
  1198. return 0;
  1199. }
  1200. int tg3_halt(struct tg3 *tp)
  1201. { DBGP("%s\n", __func__);
  1202. int err;
  1203. tg3_stop_fw(tp);
  1204. tg3_write_sig_pre_reset(tp);
  1205. tg3_abort_hw(tp);
  1206. err = tg3_chip_reset(tp);
  1207. __tg3_set_mac_addr(tp, 0);
  1208. if (err)
  1209. return err;
  1210. return 0;
  1211. }
  1212. static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
  1213. u32 offset, u32 *val)
  1214. { DBGP("%s\n", __func__);
  1215. u32 tmp;
  1216. int i;
  1217. if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
  1218. return -EINVAL;
  1219. tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
  1220. EEPROM_ADDR_DEVID_MASK |
  1221. EEPROM_ADDR_READ);
  1222. tw32(GRC_EEPROM_ADDR,
  1223. tmp |
  1224. (0 << EEPROM_ADDR_DEVID_SHIFT) |
  1225. ((offset << EEPROM_ADDR_ADDR_SHIFT) &
  1226. EEPROM_ADDR_ADDR_MASK) |
  1227. EEPROM_ADDR_READ | EEPROM_ADDR_START);
  1228. for (i = 0; i < 1000; i++) {
  1229. tmp = tr32(GRC_EEPROM_ADDR);
  1230. if (tmp & EEPROM_ADDR_COMPLETE)
  1231. break;
  1232. mdelay(1);
  1233. }
  1234. if (!(tmp & EEPROM_ADDR_COMPLETE))
  1235. return -EBUSY;
  1236. tmp = tr32(GRC_EEPROM_DATA);
  1237. /*
  1238. * The data will always be opposite the native endian
  1239. * format. Perform a blind byteswap to compensate.
  1240. */
  1241. *val = bswap_32(tmp);
  1242. return 0;
  1243. }
  1244. static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
  1245. { DBGP("%s\n", __func__);
  1246. if (tg3_flag(tp, NVRAM) &&
  1247. tg3_flag(tp, NVRAM_BUFFERED) &&
  1248. tg3_flag(tp, FLASH) &&
  1249. !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
  1250. (tp->nvram_jedecnum == JEDEC_ATMEL))
  1251. addr = ((addr / tp->nvram_pagesize) <<
  1252. ATMEL_AT45DB0X1B_PAGE_POS) +
  1253. (addr % tp->nvram_pagesize);
  1254. return addr;
  1255. }
  1256. static void tg3_enable_nvram_access(struct tg3 *tp)
  1257. { DBGP("%s\n", __func__);
  1258. if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
  1259. u32 nvaccess = tr32(NVRAM_ACCESS);
  1260. tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
  1261. }
  1262. }
  1263. static void tg3_disable_nvram_access(struct tg3 *tp)
  1264. { DBGP("%s\n", __func__);
  1265. if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
  1266. u32 nvaccess = tr32(NVRAM_ACCESS);
  1267. tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
  1268. }
  1269. }
  1270. #define NVRAM_CMD_TIMEOUT 10000
  1271. static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
  1272. { DBGP("%s\n", __func__);
  1273. int i;
  1274. tw32(NVRAM_CMD, nvram_cmd);
  1275. for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
  1276. udelay(10);
  1277. if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
  1278. udelay(10);
  1279. break;
  1280. }
  1281. }
  1282. if (i == NVRAM_CMD_TIMEOUT)
  1283. return -EBUSY;
  1284. return 0;
  1285. }
  1286. /* NOTE: Data read in from NVRAM is byteswapped according to
  1287. * the byteswapping settings for all other register accesses.
  1288. * tg3 devices are BE devices, so on a BE machine, the data
  1289. * returned will be exactly as it is seen in NVRAM. On a LE
  1290. * machine, the 32-bit value will be byteswapped.
  1291. */
  1292. static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
  1293. { DBGP("%s\n", __func__);
  1294. int ret;
  1295. if (!tg3_flag(tp, NVRAM))
  1296. return tg3_nvram_read_using_eeprom(tp, offset, val);
  1297. offset = tg3_nvram_phys_addr(tp, offset);
  1298. if (offset > NVRAM_ADDR_MSK)
  1299. return -EINVAL;
  1300. ret = tg3_nvram_lock(tp);
  1301. if (ret)
  1302. return ret;
  1303. tg3_enable_nvram_access(tp);
  1304. tw32(NVRAM_ADDR, offset);
  1305. ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
  1306. NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
  1307. if (ret == 0)
  1308. *val = tr32(NVRAM_RDDATA);
  1309. tg3_disable_nvram_access(tp);
  1310. tg3_nvram_unlock(tp);
  1311. return ret;
  1312. }
  1313. /* Ensures NVRAM data is in bytestream format. */
  1314. static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, u32 *val)
  1315. { DBGP("%s\n", __func__);
  1316. u32 v = 0;
  1317. int res = tg3_nvram_read(tp, offset, &v);
  1318. if (!res)
  1319. *val = cpu_to_be32(v);
  1320. return res;
  1321. }
  1322. int tg3_get_device_address(struct tg3 *tp)
  1323. { DBGP("%s\n", __func__);
  1324. struct net_device *dev = tp->dev;
  1325. u32 hi, lo, mac_offset;
  1326. int addr_ok = 0;
  1327. mac_offset = 0x7c;
  1328. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
  1329. tg3_flag(tp, 5780_CLASS)) {
  1330. if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
  1331. mac_offset = 0xcc;
  1332. if (tg3_nvram_lock(tp))
  1333. tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
  1334. else
  1335. tg3_nvram_unlock(tp);
  1336. } else if (tg3_flag(tp, 5717_PLUS)) {
  1337. if (PCI_FUNC(tp->pdev->busdevfn) & 1)
  1338. mac_offset = 0xcc;
  1339. if (PCI_FUNC(tp->pdev->busdevfn) > 1)
  1340. mac_offset += 0x18c;
  1341. } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
  1342. mac_offset = 0x10;
  1343. /* First try to get it from MAC address mailbox. */
  1344. tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
  1345. if ((hi >> 16) == 0x484b) {
  1346. dev->hw_addr[0] = (hi >> 8) & 0xff;
  1347. dev->hw_addr[1] = (hi >> 0) & 0xff;
  1348. tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
  1349. dev->hw_addr[2] = (lo >> 24) & 0xff;
  1350. dev->hw_addr[3] = (lo >> 16) & 0xff;
  1351. dev->hw_addr[4] = (lo >> 8) & 0xff;
  1352. dev->hw_addr[5] = (lo >> 0) & 0xff;
  1353. /* Some old bootcode may report a 0 MAC address in SRAM */
  1354. addr_ok = is_valid_ether_addr(&dev->hw_addr[0]);
  1355. }
  1356. if (!addr_ok) {
  1357. /* Next, try NVRAM. */
  1358. if (!tg3_flag(tp, NO_NVRAM) &&
  1359. !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
  1360. !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
  1361. memcpy(&dev->hw_addr[0], ((char *)&hi) + 2, 2);
  1362. memcpy(&dev->hw_addr[2], (char *)&lo, sizeof(lo));
  1363. }
  1364. /* Finally just fetch it out of the MAC control regs. */
  1365. else {
  1366. hi = tr32(MAC_ADDR_0_HIGH);
  1367. lo = tr32(MAC_ADDR_0_LOW);
  1368. dev->hw_addr[5] = lo & 0xff;
  1369. dev->hw_addr[4] = (lo >> 8) & 0xff;
  1370. dev->hw_addr[3] = (lo >> 16) & 0xff;
  1371. dev->hw_addr[2] = (lo >> 24) & 0xff;
  1372. dev->hw_addr[1] = hi & 0xff;
  1373. dev->hw_addr[0] = (hi >> 8) & 0xff;
  1374. }
  1375. }
  1376. if (!is_valid_ether_addr(&dev->hw_addr[0])) {
  1377. return -EINVAL;
  1378. }
  1379. return 0;
  1380. }
  1381. static void __tg3_set_rx_mode(struct net_device *dev)
  1382. { DBGP("%s\n", __func__);
  1383. struct tg3 *tp = netdev_priv(dev);
  1384. u32 rx_mode;
  1385. rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
  1386. RX_MODE_KEEP_VLAN_TAG);
  1387. rx_mode |= RX_MODE_KEEP_VLAN_TAG;
  1388. /* Accept all multicast. */
  1389. tw32(MAC_HASH_REG_0, 0xffffffff);
  1390. tw32(MAC_HASH_REG_1, 0xffffffff);
  1391. tw32(MAC_HASH_REG_2, 0xffffffff);
  1392. tw32(MAC_HASH_REG_3, 0xffffffff);
  1393. if (rx_mode != tp->rx_mode) {
  1394. tp->rx_mode = rx_mode;
  1395. tw32_f(MAC_RX_MODE, rx_mode);
  1396. udelay(10);
  1397. }
  1398. }
  1399. static void __tg3_set_coalesce(struct tg3 *tp)
  1400. { DBGP("%s\n", __func__);
  1401. tw32(HOSTCC_RXCOL_TICKS, 0);
  1402. tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
  1403. tw32(HOSTCC_RXMAX_FRAMES, 1);
  1404. /* FIXME: mix between TXMAX and RXMAX taken from legacy driver */
  1405. tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
  1406. tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
  1407. tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
  1408. if (!tg3_flag(tp, 5705_PLUS)) {
  1409. u32 val = DEFAULT_STAT_COAL_TICKS;
  1410. tw32(HOSTCC_RXCOAL_TICK_INT, DEFAULT_RXCOAL_TICK_INT);
  1411. tw32(HOSTCC_TXCOAL_TICK_INT, DEFAULT_TXCOAL_TICK_INT);
  1412. if (!netdev_link_ok(tp->dev))
  1413. val = 0;
  1414. tw32(HOSTCC_STAT_COAL_TICKS, val);
  1415. }
  1416. }
  1417. static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
  1418. dma_addr_t mapping, u32 maxlen_flags,
  1419. u32 nic_addr)
  1420. { DBGP("%s\n", __func__);
  1421. tg3_write_mem(tp,
  1422. (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
  1423. ((u64) mapping >> 32));
  1424. tg3_write_mem(tp,
  1425. (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
  1426. ((u64) mapping & 0xffffffff));
  1427. tg3_write_mem(tp,
  1428. (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
  1429. maxlen_flags);
  1430. if (!tg3_flag(tp, 5705_PLUS))
  1431. tg3_write_mem(tp,
  1432. (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
  1433. nic_addr);
  1434. }
  1435. static void tg3_rings_reset(struct tg3 *tp)
  1436. { DBGP("%s\n", __func__);
  1437. int i;
  1438. u32 txrcb, rxrcb, limit;
  1439. /* Disable all transmit rings but the first. */
  1440. if (!tg3_flag(tp, 5705_PLUS))
  1441. limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
  1442. else if (tg3_flag(tp, 5717_PLUS))
  1443. limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
  1444. else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
  1445. limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
  1446. else
  1447. limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
  1448. for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
  1449. txrcb < limit; txrcb += TG3_BDINFO_SIZE)
  1450. tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
  1451. BDINFO_FLAGS_DISABLED);
  1452. /* Disable all receive return rings but the first. */
  1453. if (tg3_flag(tp, 5717_PLUS))
  1454. limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
  1455. else if (!tg3_flag(tp, 5705_PLUS))
  1456. limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
  1457. else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
  1458. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
  1459. limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
  1460. else
  1461. limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
  1462. for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
  1463. rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
  1464. tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
  1465. BDINFO_FLAGS_DISABLED);
  1466. /* Disable interrupts */
  1467. tw32_mailbox_f(tp->int_mbox, 1);
  1468. tp->tx_prod = 0;
  1469. tp->tx_cons = 0;
  1470. tw32_mailbox(tp->prodmbox, 0);
  1471. tw32_rx_mbox(tp->consmbox, 0);
  1472. /* Make sure the NIC-based send BD rings are disabled. */
  1473. if (!tg3_flag(tp, 5705_PLUS)) {
  1474. u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
  1475. for (i = 0; i < 16; i++)
  1476. tw32_tx_mbox(mbox + i * 8, 0);
  1477. }
  1478. txrcb = NIC_SRAM_SEND_RCB;
  1479. rxrcb = NIC_SRAM_RCV_RET_RCB;
  1480. /* Clear status block in ram. */
  1481. memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
  1482. /* Set status block DMA address */
  1483. tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
  1484. ((u64) tp->status_mapping >> 32));
  1485. tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
  1486. ((u64) tp->status_mapping & 0xffffffff));
  1487. if (tp->tx_ring) {
  1488. tg3_set_bdinfo(tp, txrcb, tp->tx_desc_mapping,
  1489. (TG3_TX_RING_SIZE <<
  1490. BDINFO_FLAGS_MAXLEN_SHIFT),
  1491. NIC_SRAM_TX_BUFFER_DESC);
  1492. txrcb += TG3_BDINFO_SIZE;
  1493. }
  1494. /* FIXME: will TG3_RX_RET_MAX_SIZE_5705 work on all cards? */
  1495. if (tp->rx_rcb) {
  1496. tg3_set_bdinfo(tp, rxrcb, tp->rx_rcb_mapping,
  1497. TG3_RX_RET_MAX_SIZE_5705 <<
  1498. BDINFO_FLAGS_MAXLEN_SHIFT, 0);
  1499. rxrcb += TG3_BDINFO_SIZE;
  1500. }
  1501. }
  1502. static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
  1503. { DBGP("%s\n", __func__);
  1504. u32 val, bdcache_maxcnt;
  1505. if (!tg3_flag(tp, 5750_PLUS) ||
  1506. tg3_flag(tp, 5780_CLASS) ||
  1507. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
  1508. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
  1509. bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
  1510. else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
  1511. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
  1512. bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
  1513. else
  1514. bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
  1515. /* NOTE: legacy driver uses RX_PENDING / 8, we only use 4 descriptors
  1516. * for now, use / 4 so the result is > 0
  1517. */
  1518. val = TG3_DEF_RX_RING_PENDING / 4;
  1519. tw32(RCVBDI_STD_THRESH, val);
  1520. if (tg3_flag(tp, 57765_PLUS))
  1521. tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
  1522. }
  1523. static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
  1524. { DBGP("%s\n", __func__);
  1525. u32 val, rdmac_mode;
  1526. int i, err, limit;
  1527. struct tg3_rx_prodring_set *tpr = &tp->prodring;
  1528. tg3_stop_fw(tp);
  1529. tg3_write_sig_pre_reset(tp);
  1530. if (tg3_flag(tp, INIT_COMPLETE))
  1531. tg3_abort_hw(tp);
  1532. if (reset_phy)
  1533. tg3_phy_reset(tp);
  1534. err = tg3_chip_reset(tp);
  1535. if (err)
  1536. return err;
  1537. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
  1538. val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
  1539. val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
  1540. PCIE_PWR_MGMT_L1_THRESH_4MS;
  1541. tw32(PCIE_PWR_MGMT_THRESH, val);
  1542. val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
  1543. tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
  1544. tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
  1545. val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
  1546. tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
  1547. }
  1548. if (tg3_flag(tp, L1PLLPD_EN)) {
  1549. u32 grc_mode = tr32(GRC_MODE);
  1550. /* Access the lower 1K of PL PCIE block registers. */
  1551. val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
  1552. tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
  1553. val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
  1554. tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
  1555. val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
  1556. tw32(GRC_MODE, grc_mode);
  1557. }
  1558. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
  1559. if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
  1560. u32 grc_mode = tr32(GRC_MODE);
  1561. /* Access the lower 1K of PL PCIE block registers. */
  1562. val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
  1563. tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
  1564. val = tr32(TG3_PCIE_TLDLPL_PORT +
  1565. TG3_PCIE_PL_LO_PHYCTL5);
  1566. tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
  1567. val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
  1568. tw32(GRC_MODE, grc_mode);
  1569. }
  1570. if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
  1571. u32 grc_mode = tr32(GRC_MODE);
  1572. /* Access the lower 1K of DL PCIE block registers. */
  1573. val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
  1574. tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
  1575. val = tr32(TG3_PCIE_TLDLPL_PORT +
  1576. TG3_PCIE_DL_LO_FTSMAX);
  1577. val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
  1578. tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
  1579. val | TG3_PCIE_DL_LO_FTSMAX_VAL);
  1580. tw32(GRC_MODE, grc_mode);
  1581. }
  1582. val = tr32(TG3_CPMU_LSPD_10MB_CLK);
  1583. val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
  1584. val |= CPMU_LSPD_10MB_MACCLK_6_25;
  1585. tw32(TG3_CPMU_LSPD_10MB_CLK, val);
  1586. }
  1587. /* This works around an issue with Athlon chipsets on
  1588. * B3 tigon3 silicon. This bit has no effect on any
  1589. * other revision. But do not set this on PCI Express
  1590. * chips and don't even touch the clocks if the CPMU is present.
  1591. */
  1592. if (!tg3_flag(tp, CPMU_PRESENT)) {
  1593. if (!tg3_flag(tp, PCI_EXPRESS))
  1594. tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
  1595. tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
  1596. }
  1597. if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
  1598. tg3_flag(tp, PCIX_MODE)) {
  1599. val = tr32(TG3PCI_PCISTATE);
  1600. val |= PCISTATE_RETRY_SAME_DMA;
  1601. tw32(TG3PCI_PCISTATE, val);
  1602. }
  1603. if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
  1604. /* Enable some hw fixes. */
  1605. val = tr32(TG3PCI_MSI_DATA);
  1606. val |= (1 << 26) | (1 << 28) | (1 << 29);
  1607. tw32(TG3PCI_MSI_DATA, val);
  1608. }
  1609. /* Descriptor ring init may make accesses to the
  1610. * NIC SRAM area to setup the TX descriptors, so we
  1611. * can only do this after the hardware has been
  1612. * successfully reset.
  1613. */
  1614. err = tg3_init_rings(tp);
  1615. if (err)
  1616. return err;
  1617. if (tg3_flag(tp, 57765_PLUS)) {
  1618. val = tr32(TG3PCI_DMA_RW_CTRL) &
  1619. ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
  1620. if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
  1621. val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
  1622. if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
  1623. GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
  1624. val |= DMA_RWCTRL_TAGGED_STAT_WA;
  1625. tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
  1626. } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
  1627. GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
  1628. /* This value is determined during the probe time DMA
  1629. * engine test, tg3_test_dma.
  1630. */
  1631. tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
  1632. }
  1633. tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
  1634. GRC_MODE_4X_NIC_SEND_RINGS |
  1635. GRC_MODE_NO_TX_PHDR_CSUM |
  1636. GRC_MODE_NO_RX_PHDR_CSUM);
  1637. tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
  1638. tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
  1639. /* Pseudo-header checksum is done by hardware logic and not
  1640. * the offload processers, so make the chip do the pseudo-
  1641. * header checksums on receive. For transmit it is more
  1642. * convenient to do the pseudo-header checksum in software
  1643. * as Linux does that on transmit for us in all cases.
  1644. */
  1645. tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
  1646. tw32(GRC_MODE,
  1647. tp->grc_mode |
  1648. (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
  1649. /* Setup the timer prescalar register. Clock is always 66Mhz. */
  1650. val = tr32(GRC_MISC_CFG);
  1651. val &= ~0xff;
  1652. val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
  1653. tw32(GRC_MISC_CFG, val);
  1654. /* Initialize MBUF/DESC pool. */
  1655. if (tg3_flag(tp, 5750_PLUS)) {
  1656. /* Do nothing. */
  1657. } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
  1658. tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
  1659. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
  1660. tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
  1661. else
  1662. tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
  1663. tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
  1664. tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
  1665. }
  1666. tw32(BUFMGR_MB_RDMA_LOW_WATER,
  1667. tp->bufmgr_config.mbuf_read_dma_low_water);
  1668. tw32(BUFMGR_MB_MACRX_LOW_WATER,
  1669. tp->bufmgr_config.mbuf_mac_rx_low_water);
  1670. tw32(BUFMGR_MB_HIGH_WATER,
  1671. tp->bufmgr_config.mbuf_high_water);
  1672. tw32(BUFMGR_DMA_LOW_WATER,
  1673. tp->bufmgr_config.dma_low_water);
  1674. tw32(BUFMGR_DMA_HIGH_WATER,
  1675. tp->bufmgr_config.dma_high_water);
  1676. val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
  1677. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
  1678. val |= BUFMGR_MODE_NO_TX_UNDERRUN;
  1679. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
  1680. tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
  1681. tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
  1682. val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
  1683. tw32(BUFMGR_MODE, val);
  1684. for (i = 0; i < 2000; i++) {
  1685. if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
  1686. break;
  1687. udelay(10);
  1688. }
  1689. if (i >= 2000) {
  1690. DBGC(tp->dev, "%s cannot enable BUFMGR\n", __func__);
  1691. return -ENODEV;
  1692. }
  1693. if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
  1694. tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
  1695. tg3_setup_rxbd_thresholds(tp);
  1696. /* Initialize TG3_BDINFO's at:
  1697. * RCVDBDI_STD_BD: standard eth size rx ring
  1698. * RCVDBDI_JUMBO_BD: jumbo frame rx ring
  1699. * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
  1700. *
  1701. * like so:
  1702. * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
  1703. * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
  1704. * ring attribute flags
  1705. * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
  1706. *
  1707. * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
  1708. * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
  1709. *
  1710. * The size of each ring is fixed in the firmware, but the location is
  1711. * configurable.
  1712. */
  1713. tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
  1714. ((u64) tpr->rx_std_mapping >> 32));
  1715. tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
  1716. ((u64) tpr->rx_std_mapping & 0xffffffff));
  1717. if (!tg3_flag(tp, 5717_PLUS))
  1718. tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
  1719. NIC_SRAM_RX_BUFFER_DESC);
  1720. /* Disable the mini ring */
  1721. if (!tg3_flag(tp, 5705_PLUS))
  1722. tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
  1723. BDINFO_FLAGS_DISABLED);
  1724. val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
  1725. if (tg3_flag(tp, 57765_PLUS))
  1726. val |= (RX_STD_MAX_SIZE << 2);
  1727. tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
  1728. tpr->rx_std_prod_idx = 0;
  1729. /* std prod index is updated by tg3_refill_prod_ring() */
  1730. tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 0);
  1731. tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, 0);
  1732. tg3_rings_reset(tp);
  1733. __tg3_set_mac_addr(tp,0);
  1734. #define TG3_MAX_MTU 1522
  1735. /* MTU + ethernet header + FCS + optional VLAN tag */
  1736. tw32(MAC_RX_MTU_SIZE, TG3_MAX_MTU);
  1737. /* The slot time is changed by tg3_setup_phy if we
  1738. * run at gigabit with half duplex.
  1739. */
  1740. val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
  1741. (6 << TX_LENGTHS_IPG_SHIFT) |
  1742. (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
  1743. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
  1744. val |= tr32(MAC_TX_LENGTHS) &
  1745. (TX_LENGTHS_JMB_FRM_LEN_MSK |
  1746. TX_LENGTHS_CNT_DWN_VAL_MSK);
  1747. tw32(MAC_TX_LENGTHS, val);
  1748. /* Receive rules. */
  1749. tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
  1750. tw32(RCVLPC_CONFIG, 0x0181);
  1751. /* Calculate RDMAC_MODE setting early, we need it to determine
  1752. * the RCVLPC_STATE_ENABLE mask.
  1753. */
  1754. rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
  1755. RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
  1756. RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
  1757. RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
  1758. RDMAC_MODE_LNGREAD_ENAB);
  1759. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
  1760. rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
  1761. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
  1762. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
  1763. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
  1764. rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
  1765. RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
  1766. RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
  1767. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
  1768. tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
  1769. if (tg3_flag(tp, TSO_CAPABLE) &&
  1770. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
  1771. rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
  1772. } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
  1773. !tg3_flag(tp, IS_5788)) {
  1774. rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
  1775. }
  1776. }
  1777. if (tg3_flag(tp, PCI_EXPRESS))
  1778. rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
  1779. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
  1780. rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
  1781. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
  1782. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
  1783. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
  1784. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
  1785. tg3_flag(tp, 57765_PLUS)) {
  1786. val = tr32(TG3_RDMA_RSRVCTRL_REG);
  1787. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
  1788. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
  1789. val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
  1790. TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
  1791. TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
  1792. val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
  1793. TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
  1794. TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
  1795. }
  1796. tw32(TG3_RDMA_RSRVCTRL_REG,
  1797. val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
  1798. }
  1799. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
  1800. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
  1801. val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
  1802. tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
  1803. TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
  1804. TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
  1805. }
  1806. /* Receive/send statistics. */
  1807. if (tg3_flag(tp, 5750_PLUS)) {
  1808. val = tr32(RCVLPC_STATS_ENABLE);
  1809. val &= ~RCVLPC_STATSENAB_DACK_FIX;
  1810. tw32(RCVLPC_STATS_ENABLE, val);
  1811. } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
  1812. tg3_flag(tp, TSO_CAPABLE)) {
  1813. val = tr32(RCVLPC_STATS_ENABLE);
  1814. val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
  1815. tw32(RCVLPC_STATS_ENABLE, val);
  1816. } else {
  1817. tw32(RCVLPC_STATS_ENABLE, 0xffffff);
  1818. }
  1819. tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
  1820. tw32(SNDDATAI_STATSENAB, 0xffffff);
  1821. tw32(SNDDATAI_STATSCTRL,
  1822. (SNDDATAI_SCTRL_ENABLE |
  1823. SNDDATAI_SCTRL_FASTUPD));
  1824. /* Setup host coalescing engine. */
  1825. tw32(HOSTCC_MODE, 0);
  1826. for (i = 0; i < 2000; i++) {
  1827. if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
  1828. break;
  1829. udelay(10);
  1830. }
  1831. __tg3_set_coalesce(tp);
  1832. if (!tg3_flag(tp, 5705_PLUS)) {
  1833. /* Status/statistics block address. See tg3_timer,
  1834. * the tg3_periodic_fetch_stats call there, and
  1835. * tg3_get_stats to see how this works for 5705/5750 chips.
  1836. * NOTE: stats block removed for iPXE
  1837. */
  1838. tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
  1839. /* Clear statistics and status block memory areas */
  1840. for (i = NIC_SRAM_STATS_BLK;
  1841. i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
  1842. i += sizeof(u32)) {
  1843. tg3_write_mem(tp, i, 0);
  1844. udelay(40);
  1845. }
  1846. }
  1847. tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
  1848. tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
  1849. tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
  1850. if (!tg3_flag(tp, 5705_PLUS))
  1851. tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
  1852. if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
  1853. tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
  1854. /* reset to prevent losing 1st rx packet intermittently */
  1855. tw32_f(MAC_RX_MODE, RX_MODE_RESET);
  1856. udelay(10);
  1857. }
  1858. if (tg3_flag(tp, ENABLE_APE))
  1859. tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
  1860. else
  1861. tp->mac_mode = 0;
  1862. tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
  1863. MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
  1864. if (!tg3_flag(tp, 5705_PLUS) &&
  1865. !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
  1866. GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
  1867. tp->mac_mode |= MAC_MODE_LINK_POLARITY;
  1868. tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
  1869. udelay(40);
  1870. /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
  1871. * If TG3_FLAG_IS_NIC is zero, we should read the
  1872. * register to preserve the GPIO settings for LOMs. The GPIOs,
  1873. * whether used as inputs or outputs, are set by boot code after
  1874. * reset.
  1875. */
  1876. if (!tg3_flag(tp, IS_NIC)) {
  1877. u32 gpio_mask;
  1878. gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
  1879. GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
  1880. GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
  1881. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
  1882. gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
  1883. GRC_LCLCTRL_GPIO_OUTPUT3;
  1884. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
  1885. gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
  1886. tp->grc_local_ctrl &= ~gpio_mask;
  1887. tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
  1888. /* GPIO1 must be driven high for eeprom write protect */
  1889. if (tg3_flag(tp, EEPROM_WRITE_PROT))
  1890. tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
  1891. GRC_LCLCTRL_GPIO_OUTPUT1);
  1892. }
  1893. tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
  1894. udelay(100);
  1895. if (!tg3_flag(tp, 5705_PLUS)) {
  1896. tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
  1897. udelay(40);
  1898. }
  1899. val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
  1900. WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
  1901. WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
  1902. WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
  1903. WDMAC_MODE_LNGREAD_ENAB);
  1904. /* Enable host coalescing bug fix */
  1905. if (tg3_flag(tp, 5755_PLUS))
  1906. val |= WDMAC_MODE_STATUS_TAG_FIX;
  1907. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
  1908. val |= WDMAC_MODE_BURST_ALL_DATA;
  1909. tw32_f(WDMAC_MODE, val);
  1910. udelay(40);
  1911. if (tg3_flag(tp, PCIX_MODE)) {
  1912. u16 pcix_cmd;
  1913. pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
  1914. &pcix_cmd);
  1915. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
  1916. pcix_cmd &= ~PCI_X_CMD_MAX_READ;
  1917. pcix_cmd |= PCI_X_CMD_READ_2K;
  1918. } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
  1919. pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
  1920. pcix_cmd |= PCI_X_CMD_READ_2K;
  1921. }
  1922. pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
  1923. pcix_cmd);
  1924. }
  1925. tw32_f(RDMAC_MODE, rdmac_mode);
  1926. udelay(40);
  1927. tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
  1928. if (!tg3_flag(tp, 5705_PLUS))
  1929. tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
  1930. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
  1931. tw32(SNDDATAC_MODE,
  1932. SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
  1933. else
  1934. tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
  1935. tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
  1936. tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
  1937. val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
  1938. if (tg3_flag(tp, LRG_PROD_RING_CAP))
  1939. val |= RCVDBDI_MODE_LRG_RING_SZ;
  1940. tw32(RCVDBDI_MODE, val);
  1941. tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
  1942. val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
  1943. if (tg3_flag(tp, ENABLE_TSS))
  1944. val |= SNDBDI_MODE_MULTI_TXQ_EN;
  1945. tw32(SNDBDI_MODE, val);
  1946. tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
  1947. /* FIXME: 5701 firmware fix? */
  1948. #if 0
  1949. if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
  1950. err = tg3_load_5701_a0_firmware_fix(tp);
  1951. if (err)
  1952. return err;
  1953. }
  1954. #endif
  1955. tp->tx_mode = TX_MODE_ENABLE;
  1956. if (tg3_flag(tp, 5755_PLUS) ||
  1957. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
  1958. tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
  1959. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
  1960. val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
  1961. tp->tx_mode &= ~val;
  1962. tp->tx_mode |= tr32(MAC_TX_MODE) & val;
  1963. }
  1964. tw32_f(MAC_TX_MODE, tp->tx_mode);
  1965. udelay(100);
  1966. tp->rx_mode = RX_MODE_ENABLE;
  1967. tw32_f(MAC_RX_MODE, tp->rx_mode);
  1968. udelay(10);
  1969. tw32(MAC_LED_CTRL, tp->led_ctrl);
  1970. tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
  1971. if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
  1972. tw32_f(MAC_RX_MODE, RX_MODE_RESET);
  1973. udelay(10);
  1974. }
  1975. tw32_f(MAC_RX_MODE, tp->rx_mode);
  1976. udelay(10);
  1977. if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
  1978. if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
  1979. !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
  1980. /* Set drive transmission level to 1.2V */
  1981. /* only if the signal pre-emphasis bit is not set */
  1982. val = tr32(MAC_SERDES_CFG);
  1983. val &= 0xfffff000;
  1984. val |= 0x880;
  1985. tw32(MAC_SERDES_CFG, val);
  1986. }
  1987. if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
  1988. tw32(MAC_SERDES_CFG, 0x616000);
  1989. }
  1990. /* Prevent chip from dropping frames when flow control
  1991. * is enabled.
  1992. */
  1993. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
  1994. val = 1;
  1995. else
  1996. val = 2;
  1997. tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
  1998. if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
  1999. (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
  2000. /* Use hardware link auto-negotiation */
  2001. tg3_flag_set(tp, HW_AUTONEG);
  2002. }
  2003. if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
  2004. GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
  2005. u32 tmp;
  2006. tmp = tr32(SERDES_RX_CTRL);
  2007. tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
  2008. tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
  2009. tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
  2010. tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
  2011. }
  2012. err = tg3_setup_phy(tp, 0);
  2013. if (err)
  2014. return err;
  2015. if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
  2016. !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
  2017. u32 tmp;
  2018. /* Clear CRC stats. */
  2019. if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
  2020. tg3_writephy(tp, MII_TG3_TEST1,
  2021. tmp | MII_TG3_TEST1_CRC_EN);
  2022. tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
  2023. }
  2024. }
  2025. __tg3_set_rx_mode(tp->dev);
  2026. /* Initialize receive rules. */
  2027. tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
  2028. tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
  2029. tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
  2030. tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
  2031. if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
  2032. limit = 8;
  2033. else
  2034. limit = 16;
  2035. if (tg3_flag(tp, ENABLE_ASF))
  2036. limit -= 4;
  2037. switch (limit) {
  2038. case 16:
  2039. tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
  2040. case 15:
  2041. tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
  2042. case 14:
  2043. tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
  2044. case 13:
  2045. tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
  2046. case 12:
  2047. tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
  2048. case 11:
  2049. tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
  2050. case 10:
  2051. tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
  2052. case 9:
  2053. tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
  2054. case 8:
  2055. tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
  2056. case 7:
  2057. tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
  2058. case 6:
  2059. tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
  2060. case 5:
  2061. tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
  2062. case 4:
  2063. /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
  2064. case 3:
  2065. /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
  2066. case 2:
  2067. case 1:
  2068. default:
  2069. break;
  2070. }
  2071. return 0;
  2072. }
  2073. /* Called at device open time to get the chip ready for
  2074. * packet processing. Invoked with tp->lock held.
  2075. */
  2076. int tg3_init_hw(struct tg3 *tp, int reset_phy)
  2077. { DBGP("%s\n", __func__);
  2078. tg3_switch_clocks(tp);
  2079. tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
  2080. return tg3_reset_hw(tp, reset_phy);
  2081. }
  2082. void tg3_set_txd(struct tg3 *tp, int entry,
  2083. dma_addr_t mapping, int len, u32 flags)
  2084. { DBGP("%s\n", __func__);
  2085. struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
  2086. txd->addr_hi = ((u64) mapping >> 32);
  2087. txd->addr_lo = ((u64) mapping & 0xffffffff);
  2088. txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
  2089. txd->vlan_tag = 0;
  2090. }
  2091. int tg3_do_test_dma(struct tg3 *tp, u32 __unused *buf, dma_addr_t buf_dma, int size, int to_device)
  2092. { DBGP("%s\n", __func__);
  2093. struct tg3_internal_buffer_desc test_desc;
  2094. u32 sram_dma_descs;
  2095. int ret;
  2096. unsigned int i;
  2097. sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
  2098. tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
  2099. tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
  2100. tw32(RDMAC_STATUS, 0);
  2101. tw32(WDMAC_STATUS, 0);
  2102. tw32(BUFMGR_MODE, 0);
  2103. tw32(FTQ_RESET, 0);
  2104. test_desc.addr_hi = ((u64) buf_dma) >> 32;
  2105. test_desc.addr_lo = buf_dma & 0xffffffff;
  2106. test_desc.nic_mbuf = 0x00002100;
  2107. test_desc.len = size;
  2108. /*
  2109. * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
  2110. * the *second* time the tg3 driver was getting loaded after an
  2111. * initial scan.
  2112. *
  2113. * Broadcom tells me:
  2114. * ...the DMA engine is connected to the GRC block and a DMA
  2115. * reset may affect the GRC block in some unpredictable way...
  2116. * The behavior of resets to individual blocks has not been tested.
  2117. *
  2118. * Broadcom noted the GRC reset will also reset all sub-components.
  2119. */
  2120. if (to_device) {
  2121. test_desc.cqid_sqid = (13 << 8) | 2;
  2122. tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
  2123. udelay(40);
  2124. } else {
  2125. test_desc.cqid_sqid = (16 << 8) | 7;
  2126. tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
  2127. udelay(40);
  2128. }
  2129. test_desc.flags = 0x00000005;
  2130. for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
  2131. u32 val;
  2132. val = *(((u32 *)&test_desc) + i);
  2133. pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
  2134. sram_dma_descs + (i * sizeof(u32)));
  2135. pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
  2136. }
  2137. pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
  2138. if (to_device)
  2139. tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
  2140. else
  2141. tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
  2142. ret = -ENODEV;
  2143. for (i = 0; i < 40; i++) {
  2144. u32 val;
  2145. if (to_device)
  2146. val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
  2147. else
  2148. val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
  2149. if ((val & 0xffff) == sram_dma_descs) {
  2150. ret = 0;
  2151. break;
  2152. }
  2153. udelay(100);
  2154. }
  2155. return ret;
  2156. }