You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

vxge_config.c 47KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868
  1. /*
  2. * vxge-config.c: iPXE driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
  3. * Virtualized Server Adapter.
  4. *
  5. * Copyright(c) 2002-2010 Neterion Inc.
  6. *
  7. * This software may be used and distributed according to the terms of
  8. * the GNU General Public License (GPL), incorporated herein by
  9. * reference. Drivers based on or derived from this code fall under
  10. * the GPL and must retain the authorship, copyright and license
  11. * notice.
  12. *
  13. */
  14. FILE_LICENCE(GPL2_ONLY);
  15. #include <stdlib.h>
  16. #include <stdio.h>
  17. #include <ipxe/malloc.h>
  18. #include <ipxe/pci.h>
  19. #include <ipxe/iobuf.h>
  20. #include <ipxe/ethernet.h>
  21. #include <byteswap.h>
  22. #include "vxge_traffic.h"
  23. #include "vxge_config.h"
  24. #include "vxge_main.h"
  25. void
  26. vxge_hw_vpath_set_zero_rx_frm_len(struct __vxge_hw_device *hldev)
  27. {
  28. u64 val64;
  29. struct __vxge_hw_virtualpath *vpath;
  30. struct vxge_hw_vpath_reg __iomem *vp_reg;
  31. vpath = &hldev->virtual_path;
  32. vp_reg = vpath->vp_reg;
  33. val64 = readq(&vp_reg->rxmac_vcfg0);
  34. val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
  35. writeq(val64, &vp_reg->rxmac_vcfg0);
  36. val64 = readq(&vp_reg->rxmac_vcfg0);
  37. return;
  38. }
  39. enum vxge_hw_status
  40. vxge_hw_set_fw_api(struct __vxge_hw_device *hldev,
  41. u64 vp_id,
  42. u32 action,
  43. u32 offset,
  44. u64 data0,
  45. u64 data1)
  46. {
  47. enum vxge_hw_status status = VXGE_HW_OK;
  48. u64 val64;
  49. u32 fw_memo = VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO;
  50. struct vxge_hw_vpath_reg __iomem *vp_reg;
  51. vp_reg = (struct vxge_hw_vpath_reg __iomem *)hldev->vpath_reg[vp_id];
  52. writeq(data0, &vp_reg->rts_access_steer_data0);
  53. writeq(data1, &vp_reg->rts_access_steer_data1);
  54. wmb();
  55. val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
  56. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(fw_memo) |
  57. VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset) |
  58. VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE;
  59. writeq(val64, &vp_reg->rts_access_steer_ctrl);
  60. wmb();
  61. status = __vxge_hw_device_register_poll(
  62. &vp_reg->rts_access_steer_ctrl,
  63. VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
  64. WAIT_FACTOR *
  65. VXGE_HW_DEF_DEVICE_POLL_MILLIS);
  66. if (status != VXGE_HW_OK)
  67. return VXGE_HW_FAIL;
  68. val64 = readq(&vp_reg->rts_access_steer_ctrl);
  69. if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS)
  70. status = VXGE_HW_OK;
  71. else
  72. status = VXGE_HW_FAIL;
  73. return status;
  74. }
  75. /* Get function mode */
  76. enum vxge_hw_status
  77. vxge_hw_get_func_mode(struct __vxge_hw_device *hldev, u32 *func_mode)
  78. {
  79. enum vxge_hw_status status = VXGE_HW_OK;
  80. struct vxge_hw_vpath_reg __iomem *vp_reg;
  81. u64 val64;
  82. int vp_id;
  83. /* get the first vpath number assigned to this function */
  84. vp_id = hldev->first_vp_id;
  85. vp_reg = (struct vxge_hw_vpath_reg __iomem *)hldev->vpath_reg[vp_id];
  86. status = vxge_hw_set_fw_api(hldev, vp_id,
  87. VXGE_HW_FW_API_GET_FUNC_MODE, 0, 0, 0);
  88. if (status == VXGE_HW_OK) {
  89. val64 = readq(&vp_reg->rts_access_steer_data0);
  90. *func_mode = VXGE_HW_GET_FUNC_MODE_VAL(val64);
  91. }
  92. return status;
  93. }
  94. /*
  95. * __vxge_hw_device_pci_e_init
  96. * Initialize certain PCI/PCI-X configuration registers
  97. * with recommended values. Save config space for future hw resets.
  98. */
  99. void
  100. __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
  101. {
  102. u16 cmd = 0;
  103. struct pci_device *pdev = hldev->pdev;
  104. vxge_trace();
  105. /* Set the PErr Repconse bit and SERR in PCI command register. */
  106. pci_read_config_word(pdev, PCI_COMMAND, &cmd);
  107. cmd |= 0x140;
  108. pci_write_config_word(pdev, PCI_COMMAND, cmd);
  109. return;
  110. }
  111. /*
  112. * __vxge_hw_device_register_poll
  113. * Will poll certain register for specified amount of time.
  114. * Will poll until masked bit is not cleared.
  115. */
  116. enum vxge_hw_status
  117. __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
  118. {
  119. u64 val64;
  120. u32 i = 0;
  121. enum vxge_hw_status ret = VXGE_HW_FAIL;
  122. udelay(10);
  123. do {
  124. val64 = readq(reg);
  125. if (!(val64 & mask))
  126. return VXGE_HW_OK;
  127. udelay(100);
  128. } while (++i <= 9);
  129. i = 0;
  130. do {
  131. val64 = readq(reg);
  132. if (!(val64 & mask))
  133. return VXGE_HW_OK;
  134. udelay(1000);
  135. } while (++i <= max_millis);
  136. return ret;
  137. }
  138. /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
  139. * in progress
  140. * This routine checks the vpath reset in progress register is turned zero
  141. */
  142. enum vxge_hw_status
  143. __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
  144. {
  145. enum vxge_hw_status status;
  146. vxge_trace();
  147. status = __vxge_hw_device_register_poll(vpath_rst_in_prog,
  148. VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(0x1ffff),
  149. VXGE_HW_DEF_DEVICE_POLL_MILLIS);
  150. return status;
  151. }
  152. /*
  153. * __vxge_hw_device_get_legacy_reg
  154. * This routine gets the legacy register section's memory mapped address
  155. * and sets the swapper.
  156. */
  157. static struct vxge_hw_legacy_reg __iomem *
  158. __vxge_hw_device_get_legacy_reg(struct pci_device *pdev, void __iomem *bar0)
  159. {
  160. enum vxge_hw_status status;
  161. struct vxge_hw_legacy_reg __iomem *legacy_reg;
  162. /*
  163. * If the length of Bar0 is 16MB, then assume that we are configured
  164. * in MF8P_VP2 mode and then add 8MB to the legacy_reg offsets
  165. */
  166. if (pci_bar_size(pdev, PCI_BASE_ADDRESS_0) == 0x1000000)
  167. legacy_reg = (struct vxge_hw_legacy_reg __iomem *)
  168. (bar0 + 0x800000);
  169. else
  170. legacy_reg = (struct vxge_hw_legacy_reg __iomem *)bar0;
  171. status = __vxge_hw_legacy_swapper_set(legacy_reg);
  172. if (status != VXGE_HW_OK)
  173. return NULL;
  174. return legacy_reg;
  175. }
  176. /*
  177. * __vxge_hw_device_toc_get
  178. * This routine sets the swapper and reads the toc pointer and returns the
  179. * memory mapped address of the toc
  180. */
  181. struct vxge_hw_toc_reg __iomem *
  182. __vxge_hw_device_toc_get(void __iomem *bar0,
  183. struct vxge_hw_legacy_reg __iomem *legacy_reg)
  184. {
  185. u64 val64;
  186. struct vxge_hw_toc_reg __iomem *toc = NULL;
  187. val64 = readq(&legacy_reg->toc_first_pointer);
  188. toc = (struct vxge_hw_toc_reg __iomem *)(bar0+val64);
  189. return toc;
  190. }
  191. /*
  192. * __vxge_hw_device_reg_addr_get
  193. * This routine sets the swapper and reads the toc pointer and initializes the
  194. * register location pointers in the device object. It waits until the ric is
  195. * completed initializing registers.
  196. */
  197. enum vxge_hw_status
  198. __vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
  199. {
  200. u64 val64;
  201. u32 i;
  202. enum vxge_hw_status status = VXGE_HW_OK;
  203. hldev->legacy_reg = __vxge_hw_device_get_legacy_reg(hldev->pdev,
  204. hldev->bar0);
  205. if (hldev->legacy_reg == NULL) {
  206. status = VXGE_HW_FAIL;
  207. goto exit;
  208. }
  209. hldev->toc_reg = __vxge_hw_device_toc_get(hldev->bar0,
  210. hldev->legacy_reg);
  211. if (hldev->toc_reg == NULL) {
  212. status = VXGE_HW_FAIL;
  213. goto exit;
  214. }
  215. val64 = readq(&hldev->toc_reg->toc_common_pointer);
  216. hldev->common_reg =
  217. (struct vxge_hw_common_reg __iomem *)(hldev->bar0 + val64);
  218. val64 = readq(&hldev->toc_reg->toc_mrpcim_pointer);
  219. hldev->mrpcim_reg =
  220. (struct vxge_hw_mrpcim_reg __iomem *)(hldev->bar0 + val64);
  221. for (i = 0; i < VXGE_HW_TITAN_SRPCIM_REG_SPACES; i++) {
  222. val64 = readq(&hldev->toc_reg->toc_srpcim_pointer[i]);
  223. hldev->srpcim_reg[i] =
  224. (struct vxge_hw_srpcim_reg __iomem *)
  225. (hldev->bar0 + val64);
  226. }
  227. for (i = 0; i < VXGE_HW_TITAN_VPMGMT_REG_SPACES; i++) {
  228. val64 = readq(&hldev->toc_reg->toc_vpmgmt_pointer[i]);
  229. hldev->vpmgmt_reg[i] =
  230. (struct vxge_hw_vpmgmt_reg __iomem *)(hldev->bar0 + val64);
  231. }
  232. for (i = 0; i < VXGE_HW_TITAN_VPATH_REG_SPACES; i++) {
  233. val64 = readq(&hldev->toc_reg->toc_vpath_pointer[i]);
  234. hldev->vpath_reg[i] =
  235. (struct vxge_hw_vpath_reg __iomem *)
  236. (hldev->bar0 + val64);
  237. }
  238. val64 = readq(&hldev->toc_reg->toc_kdfc);
  239. switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64)) {
  240. case 0:
  241. hldev->kdfc = (u8 __iomem *)(hldev->bar0 +
  242. VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64));
  243. break;
  244. default:
  245. break;
  246. }
  247. status = __vxge_hw_device_vpath_reset_in_prog_check(
  248. (u64 __iomem *)&hldev->common_reg->vpath_rst_in_prog);
  249. exit:
  250. return status;
  251. }
  252. /*
  253. * __vxge_hw_device_access_rights_get: Get Access Rights of the driver
  254. * This routine returns the Access Rights of the driver
  255. */
  256. static u32
  257. __vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
  258. {
  259. u32 access_rights = VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH;
  260. switch (host_type) {
  261. case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION:
  262. if (func_id == 0) {
  263. access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
  264. VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
  265. }
  266. break;
  267. case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION:
  268. access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
  269. VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
  270. break;
  271. case VXGE_HW_NO_MR_SR_VH0_FUNCTION0:
  272. access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
  273. VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
  274. break;
  275. case VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION:
  276. case VXGE_HW_SR_VH_VIRTUAL_FUNCTION:
  277. case VXGE_HW_MR_SR_VH0_INVALID_CONFIG:
  278. break;
  279. case VXGE_HW_SR_VH_FUNCTION0:
  280. case VXGE_HW_VH_NORMAL_FUNCTION:
  281. access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
  282. break;
  283. }
  284. return access_rights;
  285. }
  286. /*
  287. * __vxge_hw_device_host_info_get
  288. * This routine returns the host type assignments
  289. */
  290. void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
  291. {
  292. u64 val64;
  293. u32 i;
  294. val64 = readq(&hldev->common_reg->host_type_assignments);
  295. hldev->host_type =
  296. (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
  297. hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments);
  298. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  299. if (!(hldev->vpath_assignments & vxge_mBIT(i)))
  300. continue;
  301. hldev->func_id =
  302. __vxge_hw_vpath_func_id_get(hldev->vpmgmt_reg[i]);
  303. hldev->access_rights = __vxge_hw_device_access_rights_get(
  304. hldev->host_type, hldev->func_id);
  305. hldev->first_vp_id = i;
  306. break;
  307. }
  308. return;
  309. }
  310. /**
  311. * vxge_hw_device_hw_info_get - Get the hw information
  312. * Returns the vpath mask that has the bits set for each vpath allocated
  313. * for the driver, FW version information and the first mac addresse for
  314. * each vpath
  315. */
  316. enum vxge_hw_status
  317. vxge_hw_device_hw_info_get(struct pci_device *pdev, void __iomem *bar0,
  318. struct vxge_hw_device_hw_info *hw_info)
  319. {
  320. u32 i;
  321. u64 val64;
  322. struct vxge_hw_toc_reg __iomem *toc;
  323. struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
  324. struct vxge_hw_common_reg __iomem *common_reg;
  325. struct vxge_hw_vpath_reg __iomem *vpath_reg;
  326. struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
  327. struct vxge_hw_legacy_reg __iomem *legacy_reg;
  328. enum vxge_hw_status status;
  329. vxge_trace();
  330. memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info));
  331. legacy_reg = __vxge_hw_device_get_legacy_reg(pdev, bar0);
  332. if (legacy_reg == NULL) {
  333. status = VXGE_HW_ERR_CRITICAL;
  334. goto exit;
  335. }
  336. toc = __vxge_hw_device_toc_get(bar0, legacy_reg);
  337. if (toc == NULL) {
  338. status = VXGE_HW_ERR_CRITICAL;
  339. goto exit;
  340. }
  341. val64 = readq(&toc->toc_common_pointer);
  342. common_reg = (struct vxge_hw_common_reg __iomem *)(bar0 + val64);
  343. status = __vxge_hw_device_vpath_reset_in_prog_check(
  344. (u64 __iomem *)&common_reg->vpath_rst_in_prog);
  345. if (status != VXGE_HW_OK)
  346. goto exit;
  347. hw_info->vpath_mask = readq(&common_reg->vpath_assignments);
  348. val64 = readq(&common_reg->host_type_assignments);
  349. hw_info->host_type =
  350. (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
  351. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  352. if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
  353. continue;
  354. val64 = readq(&toc->toc_vpmgmt_pointer[i]);
  355. vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *)
  356. (bar0 + val64);
  357. hw_info->func_id = __vxge_hw_vpath_func_id_get(vpmgmt_reg);
  358. if (__vxge_hw_device_access_rights_get(hw_info->host_type,
  359. hw_info->func_id) &
  360. VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) {
  361. val64 = readq(&toc->toc_mrpcim_pointer);
  362. mrpcim_reg = (struct vxge_hw_mrpcim_reg __iomem *)
  363. (bar0 + val64);
  364. writeq(0, &mrpcim_reg->xgmac_gen_fw_memo_mask);
  365. wmb();
  366. }
  367. val64 = readq(&toc->toc_vpath_pointer[i]);
  368. vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64);
  369. status = __vxge_hw_vpath_fw_ver_get(vpath_reg, hw_info);
  370. if (status != VXGE_HW_OK)
  371. goto exit;
  372. status = __vxge_hw_vpath_card_info_get(vpath_reg, hw_info);
  373. if (status != VXGE_HW_OK)
  374. goto exit;
  375. break;
  376. }
  377. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  378. if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
  379. continue;
  380. val64 = readq(&toc->toc_vpath_pointer[i]);
  381. vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64);
  382. status = __vxge_hw_vpath_addr_get(vpath_reg,
  383. hw_info->mac_addrs[i],
  384. hw_info->mac_addr_masks[i]);
  385. if (status != VXGE_HW_OK)
  386. goto exit;
  387. }
  388. exit:
  389. return status;
  390. }
  391. /*
  392. * vxge_hw_device_initialize - Initialize Titan device.
  393. * Initialize Titan device. Note that all the arguments of this public API
  394. * are 'IN', including @hldev. Driver cooperates with
  395. * OS to find new Titan device, locate its PCI and memory spaces.
  396. *
  397. * When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW
  398. * to enable the latter to perform Titan hardware initialization.
  399. */
  400. enum vxge_hw_status
  401. vxge_hw_device_initialize(
  402. struct __vxge_hw_device **devh,
  403. void *bar0,
  404. struct pci_device *pdev,
  405. u8 titan1)
  406. {
  407. struct __vxge_hw_device *hldev = NULL;
  408. enum vxge_hw_status status = VXGE_HW_OK;
  409. vxge_trace();
  410. hldev = (struct __vxge_hw_device *)
  411. zalloc(sizeof(struct __vxge_hw_device));
  412. if (hldev == NULL) {
  413. vxge_debug(VXGE_ERR, "hldev allocation failed\n");
  414. status = VXGE_HW_ERR_OUT_OF_MEMORY;
  415. goto exit;
  416. }
  417. hldev->magic = VXGE_HW_DEVICE_MAGIC;
  418. hldev->bar0 = bar0;
  419. hldev->pdev = pdev;
  420. hldev->titan1 = titan1;
  421. __vxge_hw_device_pci_e_init(hldev);
  422. status = __vxge_hw_device_reg_addr_get(hldev);
  423. if (status != VXGE_HW_OK) {
  424. vxge_debug(VXGE_ERR, "%s:%d __vxge_hw_device_reg_addr_get "
  425. "failed\n", __func__, __LINE__);
  426. vxge_hw_device_terminate(hldev);
  427. goto exit;
  428. }
  429. __vxge_hw_device_host_info_get(hldev);
  430. *devh = hldev;
  431. exit:
  432. return status;
  433. }
  434. /*
  435. * vxge_hw_device_terminate - Terminate Titan device.
  436. * Terminate HW device.
  437. */
  438. void
  439. vxge_hw_device_terminate(struct __vxge_hw_device *hldev)
  440. {
  441. vxge_trace();
  442. assert(hldev->magic == VXGE_HW_DEVICE_MAGIC);
  443. hldev->magic = VXGE_HW_DEVICE_DEAD;
  444. free(hldev);
  445. }
  446. /*
  447. *vxge_hw_ring_replenish - Initial replenish of RxDs
  448. * This function replenishes the RxDs from reserve array to work array
  449. */
  450. enum vxge_hw_status
  451. vxge_hw_ring_replenish(struct __vxge_hw_ring *ring)
  452. {
  453. struct __vxge_hw_device *hldev;
  454. struct vxge_hw_ring_rxd_1 *rxd;
  455. enum vxge_hw_status status = VXGE_HW_OK;
  456. u8 offset = 0;
  457. struct __vxge_hw_ring_block *block;
  458. u8 i, iob_off;
  459. vxge_trace();
  460. hldev = ring->vpathh->hldev;
  461. /*
  462. * We allocate all the dma buffers first and then share the
  463. * these buffers among the all rx descriptors in the block.
  464. */
  465. for (i = 0; i < ARRAY_SIZE(ring->iobuf); i++) {
  466. ring->iobuf[i] = alloc_iob(VXGE_LL_MAX_FRAME_SIZE(hldev->vdev));
  467. if (!ring->iobuf[i]) {
  468. while (i) {
  469. free_iob(ring->iobuf[--i]);
  470. ring->iobuf[i] = NULL;
  471. }
  472. status = VXGE_HW_ERR_OUT_OF_MEMORY;
  473. goto iobuf_err;
  474. }
  475. }
  476. for (offset = 0; offset < VXGE_HW_MAX_RXDS_PER_BLOCK_1; offset++) {
  477. rxd = &ring->rxdl->rxd[offset];
  478. if (offset == (VXGE_HW_MAX_RXDS_PER_BLOCK_1 - 1))
  479. iob_off = VXGE_HW_RING_BUF_PER_BLOCK;
  480. else
  481. iob_off = offset % ring->buf_per_block;
  482. rxd->control_0 = rxd->control_1 = 0;
  483. vxge_hw_ring_rxd_1b_set(rxd, ring->iobuf[iob_off],
  484. VXGE_LL_MAX_FRAME_SIZE(hldev->vdev));
  485. vxge_hw_ring_rxd_post(ring, rxd);
  486. }
  487. /* linking the block to itself as we use only one rx block*/
  488. block = ring->rxdl;
  489. block->reserved_2_pNext_RxD_block = (unsigned long) block;
  490. block->pNext_RxD_Blk_physical = (u64)virt_to_bus(block);
  491. ring->rxd_offset = 0;
  492. iobuf_err:
  493. return status;
  494. }
  495. /*
  496. * __vxge_hw_ring_create - Create a Ring
  497. * This function creates Ring and initializes it.
  498. *
  499. */
  500. enum vxge_hw_status
  501. __vxge_hw_ring_create(struct __vxge_hw_virtualpath *vpath,
  502. struct __vxge_hw_ring *ring)
  503. {
  504. enum vxge_hw_status status = VXGE_HW_OK;
  505. struct __vxge_hw_device *hldev;
  506. u32 vp_id;
  507. vxge_trace();
  508. hldev = vpath->hldev;
  509. vp_id = vpath->vp_id;
  510. ring->rxdl = malloc_dma(sizeof(struct __vxge_hw_ring_block),
  511. sizeof(struct __vxge_hw_ring_block));
  512. if (!ring->rxdl) {
  513. vxge_debug(VXGE_ERR, "%s:%d malloc_dma error\n",
  514. __func__, __LINE__);
  515. status = VXGE_HW_ERR_OUT_OF_MEMORY;
  516. goto exit;
  517. }
  518. ring->rxd_offset = 0;
  519. ring->vpathh = vpath;
  520. ring->buf_per_block = VXGE_HW_RING_BUF_PER_BLOCK;
  521. ring->rx_poll_weight = VXGE_HW_RING_RX_POLL_WEIGHT;
  522. ring->vp_id = vp_id;
  523. ring->vp_reg = vpath->vp_reg;
  524. ring->common_reg = hldev->common_reg;
  525. ring->rxd_qword_limit = VXGE_HW_RING_RXD_QWORD_LIMIT;
  526. status = vxge_hw_ring_replenish(ring);
  527. if (status != VXGE_HW_OK) {
  528. __vxge_hw_ring_delete(ring);
  529. goto exit;
  530. }
  531. exit:
  532. return status;
  533. }
  534. /*
  535. * __vxge_hw_ring_delete - Removes the ring
  536. * This function freeup the memory pool and removes the ring
  537. */
  538. enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_ring *ring)
  539. {
  540. u8 i;
  541. vxge_trace();
  542. for (i = 0; (i < ARRAY_SIZE(ring->iobuf)) && ring->iobuf[i]; i++) {
  543. free_iob(ring->iobuf[i]);
  544. ring->iobuf[i] = NULL;
  545. }
  546. if (ring->rxdl) {
  547. free_dma(ring->rxdl, sizeof(struct __vxge_hw_ring_block));
  548. ring->rxdl = NULL;
  549. }
  550. ring->rxd_offset = 0;
  551. return VXGE_HW_OK;
  552. }
  553. /*
  554. * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
  555. * Set the swapper bits appropriately for the legacy section.
  556. */
  557. enum vxge_hw_status
  558. __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
  559. {
  560. u64 val64;
  561. enum vxge_hw_status status = VXGE_HW_OK;
  562. vxge_trace();
  563. val64 = readq(&legacy_reg->toc_swapper_fb);
  564. wmb();
  565. switch (val64) {
  566. case VXGE_HW_SWAPPER_INITIAL_VALUE:
  567. return status;
  568. case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
  569. writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
  570. &legacy_reg->pifm_rd_swap_en);
  571. writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
  572. &legacy_reg->pifm_rd_flip_en);
  573. writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
  574. &legacy_reg->pifm_wr_swap_en);
  575. writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
  576. &legacy_reg->pifm_wr_flip_en);
  577. break;
  578. case VXGE_HW_SWAPPER_BYTE_SWAPPED:
  579. writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
  580. &legacy_reg->pifm_rd_swap_en);
  581. writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
  582. &legacy_reg->pifm_wr_swap_en);
  583. break;
  584. case VXGE_HW_SWAPPER_BIT_FLIPPED:
  585. writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
  586. &legacy_reg->pifm_rd_flip_en);
  587. writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
  588. &legacy_reg->pifm_wr_flip_en);
  589. break;
  590. }
  591. wmb();
  592. val64 = readq(&legacy_reg->toc_swapper_fb);
  593. if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
  594. status = VXGE_HW_ERR_SWAPPER_CTRL;
  595. return status;
  596. }
  597. /*
  598. * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
  599. * Set the swapper bits appropriately for the vpath.
  600. */
  601. enum vxge_hw_status
  602. __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
  603. {
  604. vxge_trace();
  605. #if (__BYTE_ORDER != __BIG_ENDIAN)
  606. u64 val64;
  607. val64 = readq(&vpath_reg->vpath_general_cfg1);
  608. wmb();
  609. val64 |= VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN;
  610. writeq(val64, &vpath_reg->vpath_general_cfg1);
  611. wmb();
  612. #endif
  613. return VXGE_HW_OK;
  614. }
  615. /*
  616. * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
  617. * Set the swapper bits appropriately for the vpath.
  618. */
  619. enum vxge_hw_status
  620. __vxge_hw_kdfc_swapper_set(
  621. struct vxge_hw_legacy_reg __iomem *legacy_reg,
  622. struct vxge_hw_vpath_reg __iomem *vpath_reg)
  623. {
  624. u64 val64;
  625. vxge_trace();
  626. val64 = readq(&legacy_reg->pifm_wr_swap_en);
  627. if (val64 == VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE) {
  628. val64 = readq(&vpath_reg->kdfcctl_cfg0);
  629. wmb();
  630. val64 |= VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0 |
  631. VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1 |
  632. VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2;
  633. writeq(val64, &vpath_reg->kdfcctl_cfg0);
  634. wmb();
  635. }
  636. return VXGE_HW_OK;
  637. }
  638. /*
  639. * vxge_hw_vpath_strip_fcs_check - Check for FCS strip.
  640. */
  641. enum vxge_hw_status
  642. vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
  643. {
  644. struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
  645. enum vxge_hw_status status = VXGE_HW_OK;
  646. int i = 0, j = 0;
  647. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  648. if (!((vpath_mask) & vxge_mBIT(i)))
  649. continue;
  650. vpmgmt_reg = hldev->vpmgmt_reg[i];
  651. for (j = 0; j < VXGE_HW_MAC_MAX_MAC_PORT_ID; j++) {
  652. if (readq(&vpmgmt_reg->rxmac_cfg0_port_vpmgmt_clone[j])
  653. & VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS)
  654. return VXGE_HW_FAIL;
  655. }
  656. }
  657. return status;
  658. }
  659. /*
  660. * __vxge_hw_fifo_create - Create a FIFO
  661. * This function creates FIFO and initializes it.
  662. */
  663. enum vxge_hw_status
  664. __vxge_hw_fifo_create(struct __vxge_hw_virtualpath *vpath,
  665. struct __vxge_hw_fifo *fifo)
  666. {
  667. enum vxge_hw_status status = VXGE_HW_OK;
  668. vxge_trace();
  669. fifo->vpathh = vpath;
  670. fifo->depth = VXGE_HW_FIFO_TXD_DEPTH;
  671. fifo->hw_offset = fifo->sw_offset = 0;
  672. fifo->nofl_db = vpath->nofl_db;
  673. fifo->vp_id = vpath->vp_id;
  674. fifo->vp_reg = vpath->vp_reg;
  675. fifo->tx_intr_num = (vpath->vp_id * VXGE_HW_MAX_INTR_PER_VP)
  676. + VXGE_HW_VPATH_INTR_TX;
  677. fifo->txdl = malloc_dma(sizeof(struct vxge_hw_fifo_txd)
  678. * fifo->depth, fifo->depth);
  679. if (!fifo->txdl) {
  680. vxge_debug(VXGE_ERR, "%s:%d malloc_dma error\n",
  681. __func__, __LINE__);
  682. return VXGE_HW_ERR_OUT_OF_MEMORY;
  683. }
  684. memset(fifo->txdl, 0, sizeof(struct vxge_hw_fifo_txd) * fifo->depth);
  685. return status;
  686. }
  687. /*
  688. * __vxge_hw_fifo_delete - Removes the FIFO
  689. * This function freeup the memory pool and removes the FIFO
  690. */
  691. enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_fifo *fifo)
  692. {
  693. vxge_trace();
  694. if (fifo->txdl)
  695. free_dma(fifo->txdl,
  696. sizeof(struct vxge_hw_fifo_txd) * fifo->depth);
  697. fifo->txdl = NULL;
  698. fifo->hw_offset = fifo->sw_offset = 0;
  699. return VXGE_HW_OK;
  700. }
  701. /*
  702. * __vxge_hw_vpath_pci_read - Read the content of given address
  703. * in pci config space.
  704. * Read from the vpath pci config space.
  705. */
  706. enum vxge_hw_status
  707. __vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
  708. u32 phy_func_0, u32 offset, u32 *val)
  709. {
  710. u64 val64;
  711. enum vxge_hw_status status = VXGE_HW_OK;
  712. struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
  713. val64 = VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset);
  714. if (phy_func_0)
  715. val64 |= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0;
  716. writeq(val64, &vp_reg->pci_config_access_cfg1);
  717. wmb();
  718. writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ,
  719. &vp_reg->pci_config_access_cfg2);
  720. wmb();
  721. status = __vxge_hw_device_register_poll(
  722. &vp_reg->pci_config_access_cfg2,
  723. VXGE_HW_INTR_MASK_ALL, VXGE_HW_DEF_DEVICE_POLL_MILLIS);
  724. if (status != VXGE_HW_OK)
  725. goto exit;
  726. val64 = readq(&vp_reg->pci_config_access_status);
  727. if (val64 & VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR) {
  728. status = VXGE_HW_FAIL;
  729. *val = 0;
  730. } else
  731. *val = (u32)vxge_bVALn(val64, 32, 32);
  732. exit:
  733. return status;
  734. }
  735. /*
  736. * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
  737. * Returns the function number of the vpath.
  738. */
  739. u32
  740. __vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
  741. {
  742. u64 val64;
  743. val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
  744. return
  745. (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
  746. }
  747. /*
  748. * __vxge_hw_read_rts_ds - Program RTS steering critieria
  749. */
  750. static inline void
  751. __vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem *vpath_reg,
  752. u64 dta_struct_sel)
  753. {
  754. writeq(0, &vpath_reg->rts_access_steer_ctrl);
  755. wmb();
  756. writeq(dta_struct_sel, &vpath_reg->rts_access_steer_data0);
  757. writeq(0, &vpath_reg->rts_access_steer_data1);
  758. wmb();
  759. return;
  760. }
  761. /*
  762. * __vxge_hw_vpath_card_info_get - Get the serial numbers,
  763. * part number and product description.
  764. */
  765. enum vxge_hw_status
  766. __vxge_hw_vpath_card_info_get(
  767. struct vxge_hw_vpath_reg __iomem *vpath_reg,
  768. struct vxge_hw_device_hw_info *hw_info)
  769. {
  770. u32 i, j;
  771. u64 val64;
  772. u64 data1 = 0ULL;
  773. u64 data2 = 0ULL;
  774. enum vxge_hw_status status = VXGE_HW_OK;
  775. u8 *serial_number = hw_info->serial_number;
  776. u8 *part_number = hw_info->part_number;
  777. u8 *product_desc = hw_info->product_desc;
  778. __vxge_hw_read_rts_ds(vpath_reg,
  779. VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER);
  780. val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
  781. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
  782. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
  783. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
  784. VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
  785. VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
  786. status = __vxge_hw_pio_mem_write64(val64,
  787. &vpath_reg->rts_access_steer_ctrl,
  788. VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
  789. VXGE_HW_DEF_DEVICE_POLL_MILLIS);
  790. if (status != VXGE_HW_OK)
  791. return status;
  792. val64 = readq(&vpath_reg->rts_access_steer_ctrl);
  793. if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
  794. data1 = readq(&vpath_reg->rts_access_steer_data0);
  795. ((u64 *)serial_number)[0] = be64_to_cpu(data1);
  796. data2 = readq(&vpath_reg->rts_access_steer_data1);
  797. ((u64 *)serial_number)[1] = be64_to_cpu(data2);
  798. status = VXGE_HW_OK;
  799. } else
  800. *serial_number = 0;
  801. __vxge_hw_read_rts_ds(vpath_reg,
  802. VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER);
  803. val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
  804. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
  805. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
  806. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
  807. VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
  808. VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
  809. status = __vxge_hw_pio_mem_write64(val64,
  810. &vpath_reg->rts_access_steer_ctrl,
  811. VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
  812. VXGE_HW_DEF_DEVICE_POLL_MILLIS);
  813. if (status != VXGE_HW_OK)
  814. return status;
  815. val64 = readq(&vpath_reg->rts_access_steer_ctrl);
  816. if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
  817. data1 = readq(&vpath_reg->rts_access_steer_data0);
  818. ((u64 *)part_number)[0] = be64_to_cpu(data1);
  819. data2 = readq(&vpath_reg->rts_access_steer_data1);
  820. ((u64 *)part_number)[1] = be64_to_cpu(data2);
  821. status = VXGE_HW_OK;
  822. } else
  823. *part_number = 0;
  824. j = 0;
  825. for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
  826. i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
  827. __vxge_hw_read_rts_ds(vpath_reg, i);
  828. val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
  829. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
  830. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
  831. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
  832. VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
  833. VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
  834. status = __vxge_hw_pio_mem_write64(val64,
  835. &vpath_reg->rts_access_steer_ctrl,
  836. VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
  837. VXGE_HW_DEF_DEVICE_POLL_MILLIS);
  838. if (status != VXGE_HW_OK)
  839. return status;
  840. val64 = readq(&vpath_reg->rts_access_steer_ctrl);
  841. if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
  842. data1 = readq(&vpath_reg->rts_access_steer_data0);
  843. ((u64 *)product_desc)[j++] = be64_to_cpu(data1);
  844. data2 = readq(&vpath_reg->rts_access_steer_data1);
  845. ((u64 *)product_desc)[j++] = be64_to_cpu(data2);
  846. status = VXGE_HW_OK;
  847. } else
  848. *product_desc = 0;
  849. }
  850. return status;
  851. }
  852. /*
  853. * __vxge_hw_vpath_fw_ver_get - Get the fw version
  854. * Returns FW Version
  855. */
  856. enum vxge_hw_status
  857. __vxge_hw_vpath_fw_ver_get(
  858. struct vxge_hw_vpath_reg __iomem *vpath_reg,
  859. struct vxge_hw_device_hw_info *hw_info)
  860. {
  861. u64 val64;
  862. u64 data1 = 0ULL;
  863. u64 data2 = 0ULL;
  864. struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
  865. struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
  866. struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
  867. struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
  868. enum vxge_hw_status status = VXGE_HW_OK;
  869. val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
  870. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY) |
  871. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
  872. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
  873. VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
  874. VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
  875. status = __vxge_hw_pio_mem_write64(val64,
  876. &vpath_reg->rts_access_steer_ctrl,
  877. VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
  878. VXGE_HW_DEF_DEVICE_POLL_MILLIS);
  879. if (status != VXGE_HW_OK)
  880. goto exit;
  881. val64 = readq(&vpath_reg->rts_access_steer_ctrl);
  882. if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
  883. data1 = readq(&vpath_reg->rts_access_steer_data0);
  884. data2 = readq(&vpath_reg->rts_access_steer_data1);
  885. fw_date->day =
  886. (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(
  887. data1);
  888. fw_date->month =
  889. (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(
  890. data1);
  891. fw_date->year =
  892. (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(
  893. data1);
  894. snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%d/%d/%d",
  895. fw_date->month, fw_date->day, fw_date->year);
  896. fw_version->major =
  897. (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data1);
  898. fw_version->minor =
  899. (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data1);
  900. fw_version->build =
  901. (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data1);
  902. snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
  903. fw_version->major, fw_version->minor, fw_version->build);
  904. flash_date->day =
  905. (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data2);
  906. flash_date->month =
  907. (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data2);
  908. flash_date->year =
  909. (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data2);
  910. snprintf(flash_date->date, VXGE_HW_FW_STRLEN, "%d/%d/%d",
  911. flash_date->month, flash_date->day, flash_date->year);
  912. flash_version->major =
  913. (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data2);
  914. flash_version->minor =
  915. (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data2);
  916. flash_version->build =
  917. (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data2);
  918. snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
  919. flash_version->major, flash_version->minor,
  920. flash_version->build);
  921. status = VXGE_HW_OK;
  922. } else
  923. status = VXGE_HW_FAIL;
  924. exit:
  925. return status;
  926. }
  927. /*
  928. * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
  929. * from MAC address table.
  930. */
  931. enum vxge_hw_status
  932. __vxge_hw_vpath_addr_get(
  933. struct vxge_hw_vpath_reg *vpath_reg,
  934. u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN])
  935. {
  936. u32 i;
  937. u64 val64;
  938. u64 data1 = 0ULL;
  939. u64 data2 = 0ULL;
  940. u64 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY;
  941. enum vxge_hw_status status = VXGE_HW_OK;
  942. while (1) {
  943. val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
  944. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
  945. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) |
  946. VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
  947. VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
  948. status = __vxge_hw_pio_mem_write64(val64,
  949. &vpath_reg->rts_access_steer_ctrl,
  950. VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
  951. VXGE_HW_DEF_DEVICE_POLL_MILLIS);
  952. if (status != VXGE_HW_OK)
  953. break;
  954. val64 = readq(&vpath_reg->rts_access_steer_ctrl);
  955. if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
  956. data1 = readq(&vpath_reg->rts_access_steer_data0);
  957. data2 = readq(&vpath_reg->rts_access_steer_data1);
  958. data1 =
  959. VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
  960. data2 =
  961. VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
  962. data2);
  963. for (i = ETH_ALEN; i > 0; i--) {
  964. macaddr[i-1] = (u8)(data1 & 0xFF);
  965. data1 >>= 8;
  966. macaddr_mask[i-1] = (u8)(data2 & 0xFF);
  967. data2 >>= 8;
  968. }
  969. if (is_valid_ether_addr(macaddr)) {
  970. status = VXGE_HW_OK;
  971. break;
  972. }
  973. action =
  974. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY;
  975. } else
  976. status = VXGE_HW_FAIL;
  977. }
  978. return status;
  979. }
  980. /*
  981. * __vxge_hw_vpath_mgmt_read
  982. * This routine reads the vpath_mgmt registers
  983. */
  984. static enum vxge_hw_status
  985. __vxge_hw_vpath_mgmt_read(
  986. struct __vxge_hw_virtualpath *vpath)
  987. {
  988. u32 i, mtu = 0, max_pyld = 0;
  989. u64 val64;
  990. enum vxge_hw_status status = VXGE_HW_OK;
  991. for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
  992. val64 = readq(&vpath->vpmgmt_reg->
  993. rxmac_cfg0_port_vpmgmt_clone[i]);
  994. max_pyld =
  995. (u32)
  996. VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN
  997. (val64);
  998. if (mtu < max_pyld)
  999. mtu = max_pyld;
  1000. }
  1001. vpath->max_mtu = mtu + VXGE_HW_MAC_HEADER_MAX_SIZE;
  1002. val64 = readq(&vpath->vpmgmt_reg->xgmac_gen_status_vpmgmt_clone);
  1003. if (val64 & VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK)
  1004. VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_UP);
  1005. else
  1006. VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN);
  1007. return status;
  1008. }
  1009. /*
  1010. * __vxge_hw_vpath_reset_check - Check if resetting the vpath completed
  1011. * This routine checks the vpath_rst_in_prog register to see if
  1012. * adapter completed the reset process for the vpath
  1013. */
  1014. enum vxge_hw_status
  1015. __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
  1016. {
  1017. enum vxge_hw_status status;
  1018. vxge_trace();
  1019. status = __vxge_hw_device_register_poll(
  1020. &vpath->hldev->common_reg->vpath_rst_in_prog,
  1021. VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(
  1022. 1 << (16 - vpath->vp_id)),
  1023. VXGE_HW_DEF_DEVICE_POLL_MILLIS);
  1024. return status;
  1025. }
  1026. /*
  1027. * __vxge_hw_vpath_reset
  1028. * This routine resets the vpath on the device
  1029. */
  1030. enum vxge_hw_status
  1031. __vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
  1032. {
  1033. u64 val64;
  1034. enum vxge_hw_status status = VXGE_HW_OK;
  1035. vxge_trace();
  1036. val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id));
  1037. __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
  1038. &hldev->common_reg->cmn_rsthdlr_cfg0);
  1039. return status;
  1040. }
  1041. /*
  1042. * __vxge_hw_vpath_prc_configure
  1043. * This routine configures the prc registers of virtual path using the config
  1044. * passed
  1045. */
  1046. void
  1047. __vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev)
  1048. {
  1049. u64 val64;
  1050. struct __vxge_hw_virtualpath *vpath;
  1051. struct vxge_hw_vpath_reg __iomem *vp_reg;
  1052. vxge_trace();
  1053. vpath = &hldev->virtual_path;
  1054. vp_reg = vpath->vp_reg;
  1055. val64 = readq(&vp_reg->prc_cfg1);
  1056. val64 |= VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE;
  1057. writeq(val64, &vp_reg->prc_cfg1);
  1058. val64 = readq(&vpath->vp_reg->prc_cfg6);
  1059. val64 &= ~VXGE_HW_PRC_CFG6_RXD_CRXDT(0x1ff);
  1060. val64 &= ~VXGE_HW_PRC_CFG6_RXD_SPAT(0x1ff);
  1061. val64 |= VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN;
  1062. val64 |= VXGE_HW_PRC_CFG6_RXD_CRXDT(0x3);
  1063. val64 |= VXGE_HW_PRC_CFG6_RXD_SPAT(0xf);
  1064. writeq(val64, &vpath->vp_reg->prc_cfg6);
  1065. writeq(VXGE_HW_PRC_CFG5_RXD0_ADD(
  1066. (u64)virt_to_bus(vpath->ringh.rxdl) >> 3),
  1067. &vp_reg->prc_cfg5);
  1068. val64 = readq(&vp_reg->prc_cfg4);
  1069. val64 |= VXGE_HW_PRC_CFG4_IN_SVC;
  1070. val64 &= ~VXGE_HW_PRC_CFG4_RING_MODE(0x3);
  1071. val64 |= VXGE_HW_PRC_CFG4_RING_MODE(
  1072. VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER);
  1073. val64 |= VXGE_HW_PRC_CFG4_RTH_DISABLE;
  1074. writeq(val64, &vp_reg->prc_cfg4);
  1075. return;
  1076. }
  1077. /*
  1078. * __vxge_hw_vpath_kdfc_configure
  1079. * This routine configures the kdfc registers of virtual path using the
  1080. * config passed
  1081. */
  1082. enum vxge_hw_status
  1083. __vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
  1084. {
  1085. u64 val64;
  1086. u64 vpath_stride;
  1087. enum vxge_hw_status status = VXGE_HW_OK;
  1088. struct __vxge_hw_virtualpath *vpath;
  1089. struct vxge_hw_vpath_reg __iomem *vp_reg;
  1090. vxge_trace();
  1091. vpath = &hldev->virtual_path;
  1092. vp_reg = vpath->vp_reg;
  1093. status = __vxge_hw_kdfc_swapper_set(hldev->legacy_reg, vp_reg);
  1094. if (status != VXGE_HW_OK)
  1095. goto exit;
  1096. val64 = readq(&vp_reg->kdfc_drbl_triplet_total);
  1097. vpath->max_kdfc_db =
  1098. (u32)VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(
  1099. val64+1)/2;
  1100. vpath->max_nofl_db = vpath->max_kdfc_db;
  1101. val64 = VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(
  1102. (vpath->max_nofl_db*2)-1);
  1103. writeq(val64, &vp_reg->kdfc_fifo_trpl_partition);
  1104. writeq(VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE,
  1105. &vp_reg->kdfc_fifo_trpl_ctrl);
  1106. val64 = readq(&vp_reg->kdfc_trpl_fifo_0_ctrl);
  1107. val64 &= ~(VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(0x3) |
  1108. VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0xFF));
  1109. val64 |= VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(
  1110. VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY) |
  1111. #if (__BYTE_ORDER != __BIG_ENDIAN)
  1112. VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN |
  1113. #endif
  1114. VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0);
  1115. writeq(val64, &vp_reg->kdfc_trpl_fifo_0_ctrl);
  1116. writeq((u64)0, &vp_reg->kdfc_trpl_fifo_0_wb_address);
  1117. wmb();
  1118. vpath_stride = readq(&hldev->toc_reg->toc_kdfc_vpath_stride);
  1119. vpath->nofl_db =
  1120. (struct __vxge_hw_non_offload_db_wrapper __iomem *)
  1121. (hldev->kdfc + (vp_id *
  1122. VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(
  1123. vpath_stride)));
  1124. exit:
  1125. return status;
  1126. }
  1127. /*
  1128. * __vxge_hw_vpath_mac_configure
  1129. * This routine configures the mac of virtual path using the config passed
  1130. */
  1131. enum vxge_hw_status
  1132. __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev)
  1133. {
  1134. u64 val64;
  1135. enum vxge_hw_status status = VXGE_HW_OK;
  1136. struct __vxge_hw_virtualpath *vpath;
  1137. struct vxge_hw_vpath_reg __iomem *vp_reg;
  1138. vxge_trace();
  1139. vpath = &hldev->virtual_path;
  1140. vp_reg = vpath->vp_reg;
  1141. writeq(VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(
  1142. vpath->vsport_number), &vp_reg->xmac_vsport_choice);
  1143. val64 = readq(&vp_reg->rxmac_vcfg1);
  1144. val64 &= ~(VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(0x3) |
  1145. VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE);
  1146. writeq(val64, &vp_reg->rxmac_vcfg1);
  1147. return status;
  1148. }
  1149. /*
  1150. * __vxge_hw_vpath_tim_configure
  1151. * This routine configures the tim registers of virtual path using the config
  1152. * passed
  1153. */
  1154. enum vxge_hw_status
  1155. __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
  1156. {
  1157. u64 val64;
  1158. enum vxge_hw_status status = VXGE_HW_OK;
  1159. struct __vxge_hw_virtualpath *vpath;
  1160. struct vxge_hw_vpath_reg __iomem *vp_reg;
  1161. vxge_trace();
  1162. vpath = &hldev->virtual_path;
  1163. vp_reg = vpath->vp_reg;
  1164. writeq((u64)0, &vp_reg->tim_dest_addr);
  1165. writeq((u64)0, &vp_reg->tim_vpath_map);
  1166. writeq((u64)0, &vp_reg->tim_bitmap);
  1167. writeq((u64)0, &vp_reg->tim_remap);
  1168. writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM(
  1169. (vp_id * VXGE_HW_MAX_INTR_PER_VP) +
  1170. VXGE_HW_VPATH_INTR_RX), &vp_reg->tim_ring_assn);
  1171. val64 = readq(&vp_reg->tim_pci_cfg);
  1172. val64 |= VXGE_HW_TIM_PCI_CFG_ADD_PAD;
  1173. writeq(val64, &vp_reg->tim_pci_cfg);
  1174. /* TX configuration */
  1175. val64 = VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
  1176. (VXGE_TTI_BTIMER_VAL * 1000) / 272);
  1177. val64 |= (VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC |
  1178. VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI |
  1179. VXGE_HW_TIM_CFG1_INT_NUM_TXFRM_CNT_EN);
  1180. val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(TTI_TX_URANGE_A) |
  1181. VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(TTI_TX_URANGE_B) |
  1182. VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(TTI_TX_URANGE_C);
  1183. writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
  1184. val64 = VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(TTI_TX_UFC_A) |
  1185. VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(TTI_TX_UFC_B) |
  1186. VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(TTI_TX_UFC_C) |
  1187. VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(TTI_TX_UFC_D);
  1188. writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
  1189. val64 = VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
  1190. VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL);
  1191. val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
  1192. (VXGE_TTI_LTIMER_VAL * 1000) / 272);
  1193. writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
  1194. /* RX configuration */
  1195. val64 = VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
  1196. (VXGE_RTI_BTIMER_VAL * 1000) / 272);
  1197. val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
  1198. val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(RTI_RX_URANGE_A) |
  1199. VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(RTI_RX_URANGE_B) |
  1200. VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(RTI_RX_URANGE_C);
  1201. writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
  1202. val64 = VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(RTI_RX_UFC_A) |
  1203. VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(RTI_RX_UFC_B) |
  1204. VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(RTI_RX_UFC_C) |
  1205. VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(RTI_RX_UFC_D);
  1206. writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
  1207. val64 = VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
  1208. VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL);
  1209. val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
  1210. (VXGE_RTI_LTIMER_VAL * 1000) / 272);
  1211. writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
  1212. val64 = 0;
  1213. writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_EINTA]);
  1214. writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_EINTA]);
  1215. writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_EINTA]);
  1216. writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_BMAP]);
  1217. writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]);
  1218. writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]);
  1219. return status;
  1220. }
  1221. /*
  1222. * __vxge_hw_vpath_initialize
  1223. * This routine is the final phase of init which initializes the
  1224. * registers of the vpath using the configuration passed.
  1225. */
  1226. enum vxge_hw_status
  1227. __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
  1228. {
  1229. u64 val64;
  1230. u32 val32;
  1231. int i;
  1232. enum vxge_hw_status status = VXGE_HW_OK;
  1233. struct __vxge_hw_virtualpath *vpath;
  1234. struct vxge_hw_vpath_reg *vp_reg;
  1235. vxge_trace();
  1236. vpath = &hldev->virtual_path;
  1237. if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
  1238. status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
  1239. goto exit;
  1240. }
  1241. vp_reg = vpath->vp_reg;
  1242. status = __vxge_hw_legacy_swapper_set(hldev->legacy_reg);
  1243. if (status != VXGE_HW_OK)
  1244. goto exit;
  1245. status = __vxge_hw_vpath_swapper_set(vpath->vp_reg);
  1246. if (status != VXGE_HW_OK)
  1247. goto exit;
  1248. val64 = readq(&vpath->vpmgmt_reg->xmac_vsport_choices_vp);
  1249. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  1250. if (val64 & vxge_mBIT(i))
  1251. vpath->vsport_number = i;
  1252. }
  1253. status = __vxge_hw_vpath_mac_configure(hldev);
  1254. if (status != VXGE_HW_OK)
  1255. goto exit;
  1256. status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id);
  1257. if (status != VXGE_HW_OK)
  1258. goto exit;
  1259. status = __vxge_hw_vpath_tim_configure(hldev, vp_id);
  1260. if (status != VXGE_HW_OK)
  1261. goto exit;
  1262. val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl);
  1263. /* Get MRRS value from device control */
  1264. status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32);
  1265. if (status == VXGE_HW_OK) {
  1266. val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12;
  1267. val64 &=
  1268. ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7));
  1269. val64 |=
  1270. VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32);
  1271. val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE;
  1272. }
  1273. val64 &= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7));
  1274. val64 |=
  1275. VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(
  1276. VXGE_HW_MAX_PAYLOAD_SIZE_512);
  1277. val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN;
  1278. writeq(val64, &vp_reg->rtdma_rd_optimization_ctrl);
  1279. exit:
  1280. return status;
  1281. }
  1282. /*
  1283. * __vxge_hw_vp_initialize - Initialize Virtual Path structure
  1284. * This routine is the initial phase of init which resets the vpath and
  1285. * initializes the software support structures.
  1286. */
  1287. enum vxge_hw_status
  1288. __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
  1289. struct __vxge_hw_virtualpath *vpath)
  1290. {
  1291. enum vxge_hw_status status = VXGE_HW_OK;
  1292. vxge_trace();
  1293. if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
  1294. status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
  1295. goto exit;
  1296. }
  1297. vpath->vp_id = vp_id;
  1298. vpath->vp_open = VXGE_HW_VP_OPEN;
  1299. vpath->hldev = hldev;
  1300. vpath->vp_reg = hldev->vpath_reg[vp_id];
  1301. vpath->vpmgmt_reg = hldev->vpmgmt_reg[vp_id];
  1302. __vxge_hw_vpath_reset(hldev, vp_id);
  1303. status = __vxge_hw_vpath_reset_check(vpath);
  1304. if (status != VXGE_HW_OK) {
  1305. memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
  1306. goto exit;
  1307. }
  1308. VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev->tim_int_mask0,
  1309. hldev->tim_int_mask1, vp_id);
  1310. status = __vxge_hw_vpath_initialize(hldev, vp_id);
  1311. if (status != VXGE_HW_OK) {
  1312. __vxge_hw_vp_terminate(hldev, vpath);
  1313. goto exit;
  1314. }
  1315. status = __vxge_hw_vpath_mgmt_read(vpath);
  1316. exit:
  1317. return status;
  1318. }
  1319. /*
  1320. * __vxge_hw_vp_terminate - Terminate Virtual Path structure
  1321. * This routine closes all channels it opened and freeup memory
  1322. */
  1323. void
  1324. __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev,
  1325. struct __vxge_hw_virtualpath *vpath)
  1326. {
  1327. vxge_trace();
  1328. if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
  1329. return;
  1330. VXGE_HW_DEVICE_TIM_INT_MASK_RESET(hldev->tim_int_mask0,
  1331. hldev->tim_int_mask1, vpath->vp_id);
  1332. memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
  1333. }
  1334. /*
  1335. * vxge_hw_vpath_mtu_set - Set MTU.
  1336. * Set new MTU value. Example, to use jumbo frames:
  1337. * vxge_hw_vpath_mtu_set(my_device, 9600);
  1338. */
  1339. enum vxge_hw_status
  1340. vxge_hw_vpath_mtu_set(struct __vxge_hw_virtualpath *vpath, u32 new_mtu)
  1341. {
  1342. u64 val64;
  1343. enum vxge_hw_status status = VXGE_HW_OK;
  1344. vxge_trace();
  1345. new_mtu += VXGE_HW_MAC_HEADER_MAX_SIZE;
  1346. if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > vpath->max_mtu))
  1347. status = VXGE_HW_ERR_INVALID_MTU_SIZE;
  1348. val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
  1349. val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
  1350. val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu);
  1351. writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
  1352. return status;
  1353. }
  1354. /*
  1355. * vxge_hw_vpath_open - Open a virtual path on a given adapter
  1356. * This function is used to open access to virtual path of an
  1357. * adapter for offload, GRO operations. This function returns
  1358. * synchronously.
  1359. */
  1360. enum vxge_hw_status
  1361. vxge_hw_vpath_open(struct __vxge_hw_device *hldev, struct vxge_vpath *vpath)
  1362. {
  1363. struct __vxge_hw_virtualpath *vpathh;
  1364. enum vxge_hw_status status;
  1365. vxge_trace();
  1366. vpathh = &hldev->virtual_path;
  1367. if (vpath->vp_open == VXGE_HW_VP_OPEN) {
  1368. status = VXGE_HW_ERR_INVALID_STATE;
  1369. goto vpath_open_exit1;
  1370. }
  1371. status = __vxge_hw_vp_initialize(hldev, hldev->first_vp_id, vpathh);
  1372. if (status != VXGE_HW_OK)
  1373. goto vpath_open_exit1;
  1374. status = __vxge_hw_fifo_create(vpathh, &vpathh->fifoh);
  1375. if (status != VXGE_HW_OK)
  1376. goto vpath_open_exit2;
  1377. status = __vxge_hw_ring_create(vpathh, &vpathh->ringh);
  1378. if (status != VXGE_HW_OK)
  1379. goto vpath_open_exit3;
  1380. __vxge_hw_vpath_prc_configure(hldev);
  1381. return VXGE_HW_OK;
  1382. vpath_open_exit3:
  1383. __vxge_hw_fifo_delete(&vpathh->fifoh);
  1384. vpath_open_exit2:
  1385. __vxge_hw_vp_terminate(hldev, vpathh);
  1386. vpath_open_exit1:
  1387. return status;
  1388. }
  1389. /*
  1390. * vxge_hw_vpath_rx_doorbell_init - Post the count of the refreshed region
  1391. * of RxD list
  1392. * @vp: vpath handle
  1393. *
  1394. * This function decides on the Rxd replenish count depending on the
  1395. * descriptor memory that has been allocated to this VPath.
  1396. */
  1397. void
  1398. vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_virtualpath *vpath)
  1399. {
  1400. u64 new_count, val64;
  1401. vxge_trace();
  1402. if (vpath->hldev->titan1) {
  1403. new_count = readq(&vpath->vp_reg->rxdmem_size);
  1404. new_count &= 0x1fff;
  1405. } else
  1406. new_count = VXGE_HW_RING_RXD_QWORDS_MODE_1 * 4;
  1407. val64 = (VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count));
  1408. writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val64),
  1409. &vpath->vp_reg->prc_rxd_doorbell);
  1410. }
  1411. /*
  1412. * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
  1413. * This function is used to close access to virtual path opened
  1414. * earlier.
  1415. */
  1416. enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_virtualpath *vpath)
  1417. {
  1418. struct __vxge_hw_device *devh = NULL;
  1419. u32 vp_id = vpath->vp_id;
  1420. enum vxge_hw_status status = VXGE_HW_OK;
  1421. vxge_trace();
  1422. devh = vpath->hldev;
  1423. if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
  1424. status = VXGE_HW_ERR_VPATH_NOT_OPEN;
  1425. goto vpath_close_exit;
  1426. }
  1427. devh->vpaths_deployed &= ~vxge_mBIT(vp_id);
  1428. __vxge_hw_ring_delete(&vpath->ringh);
  1429. __vxge_hw_fifo_delete(&vpath->fifoh);
  1430. __vxge_hw_vp_terminate(devh, vpath);
  1431. vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
  1432. vpath_close_exit:
  1433. return status;
  1434. }
  1435. /*
  1436. * vxge_hw_vpath_reset - Resets vpath
  1437. * This function is used to request a reset of vpath
  1438. */
  1439. enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_virtualpath *vpath)
  1440. {
  1441. enum vxge_hw_status status;
  1442. u32 vp_id;
  1443. vxge_trace();
  1444. vp_id = vpath->vp_id;
  1445. if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
  1446. status = VXGE_HW_ERR_VPATH_NOT_OPEN;
  1447. goto exit;
  1448. }
  1449. status = __vxge_hw_vpath_reset(vpath->hldev, vp_id);
  1450. exit:
  1451. return status;
  1452. }
  1453. /*
  1454. * vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize.
  1455. * This function poll's for the vpath reset completion and re initializes
  1456. * the vpath.
  1457. */
  1458. enum vxge_hw_status
  1459. vxge_hw_vpath_recover_from_reset(struct __vxge_hw_virtualpath *vpath)
  1460. {
  1461. enum vxge_hw_status status;
  1462. struct __vxge_hw_device *hldev;
  1463. u32 vp_id;
  1464. vxge_trace();
  1465. vp_id = vpath->vp_id;
  1466. hldev = vpath->hldev;
  1467. if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
  1468. status = VXGE_HW_ERR_VPATH_NOT_OPEN;
  1469. goto exit;
  1470. }
  1471. status = __vxge_hw_vpath_reset_check(vpath);
  1472. if (status != VXGE_HW_OK)
  1473. goto exit;
  1474. status = __vxge_hw_vpath_initialize(hldev, vp_id);
  1475. if (status != VXGE_HW_OK)
  1476. goto exit;
  1477. __vxge_hw_vpath_prc_configure(hldev);
  1478. exit:
  1479. return status;
  1480. }
  1481. /*
  1482. * vxge_hw_vpath_enable - Enable vpath.
  1483. * This routine clears the vpath reset thereby enabling a vpath
  1484. * to start forwarding frames and generating interrupts.
  1485. */
  1486. void
  1487. vxge_hw_vpath_enable(struct __vxge_hw_virtualpath *vpath)
  1488. {
  1489. struct __vxge_hw_device *hldev;
  1490. u64 val64;
  1491. vxge_trace();
  1492. hldev = vpath->hldev;
  1493. val64 = VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(
  1494. 1 << (16 - vpath->vp_id));
  1495. __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
  1496. &hldev->common_reg->cmn_rsthdlr_cfg1);
  1497. }