You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

vxge_config.c 46KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834
  1. /*
  2. * vxge-config.c: gPXE driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
  3. * Virtualized Server Adapter.
  4. *
  5. * Copyright(c) 2002-2010 Neterion Inc.
  6. *
  7. * This software may be used and distributed according to the terms of
  8. * the GNU General Public License (GPL), incorporated herein by
  9. * reference. Drivers based on or derived from this code fall under
  10. * the GPL and must retain the authorship, copyright and license
  11. * notice.
  12. *
  13. */
  14. FILE_LICENCE(GPL2_ONLY);
  15. #include <stdlib.h>
  16. #include <stdio.h>
  17. #include <gpxe/malloc.h>
  18. #include <gpxe/iobuf.h>
  19. #include <byteswap.h>
  20. #include "vxge_traffic.h"
  21. #include "vxge_config.h"
  22. #include "vxge_main.h"
  23. void
  24. vxge_hw_vpath_set_zero_rx_frm_len(struct __vxge_hw_device *hldev)
  25. {
  26. u64 val64;
  27. struct __vxge_hw_virtualpath *vpath;
  28. struct vxge_hw_vpath_reg __iomem *vp_reg;
  29. vpath = &hldev->virtual_path;
  30. vp_reg = vpath->vp_reg;
  31. val64 = readq(&vp_reg->rxmac_vcfg0);
  32. val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
  33. writeq(val64, &vp_reg->rxmac_vcfg0);
  34. val64 = readq(&vp_reg->rxmac_vcfg0);
  35. return;
  36. }
  37. enum vxge_hw_status
  38. vxge_hw_set_fw_api(struct __vxge_hw_device *hldev,
  39. u64 vp_id,
  40. u32 action,
  41. u32 offset,
  42. u64 data0,
  43. u64 data1)
  44. {
  45. enum vxge_hw_status status = VXGE_HW_OK;
  46. u64 val64;
  47. u32 fw_memo = VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO;
  48. struct vxge_hw_vpath_reg __iomem *vp_reg;
  49. vp_reg = (struct vxge_hw_vpath_reg __iomem *)hldev->vpath_reg[vp_id];
  50. writeq(data0, &vp_reg->rts_access_steer_data0);
  51. writeq(data1, &vp_reg->rts_access_steer_data1);
  52. wmb();
  53. val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
  54. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(fw_memo) |
  55. VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset) |
  56. VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE;
  57. writeq(val64, &vp_reg->rts_access_steer_ctrl);
  58. wmb();
  59. status = __vxge_hw_device_register_poll(
  60. &vp_reg->rts_access_steer_ctrl,
  61. VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
  62. WAIT_FACTOR *
  63. VXGE_HW_DEF_DEVICE_POLL_MILLIS);
  64. if (status != VXGE_HW_OK)
  65. return VXGE_HW_FAIL;
  66. val64 = readq(&vp_reg->rts_access_steer_ctrl);
  67. if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS)
  68. status = VXGE_HW_OK;
  69. else
  70. status = VXGE_HW_FAIL;
  71. return status;
  72. }
  73. /* Get function mode */
  74. enum vxge_hw_status
  75. vxge_hw_get_func_mode(struct __vxge_hw_device *hldev, u32 *func_mode)
  76. {
  77. enum vxge_hw_status status = VXGE_HW_OK;
  78. struct vxge_hw_vpath_reg __iomem *vp_reg;
  79. u64 val64;
  80. int vp_id;
  81. /* get the first vpath number assigned to this function */
  82. vp_id = hldev->first_vp_id;
  83. vp_reg = (struct vxge_hw_vpath_reg __iomem *)hldev->vpath_reg[vp_id];
  84. status = vxge_hw_set_fw_api(hldev, vp_id,
  85. VXGE_HW_FW_API_GET_FUNC_MODE, 0, 0, 0);
  86. if (status == VXGE_HW_OK) {
  87. val64 = readq(&vp_reg->rts_access_steer_data0);
  88. *func_mode = VXGE_HW_GET_FUNC_MODE_VAL(val64);
  89. }
  90. return status;
  91. }
  92. /*
  93. * __vxge_hw_device_pci_e_init
  94. * Initialize certain PCI/PCI-X configuration registers
  95. * with recommended values. Save config space for future hw resets.
  96. */
  97. void
  98. __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
  99. {
  100. u16 cmd = 0;
  101. struct pci_device *pdev = hldev->pdev;
  102. vxge_trace();
  103. /* Set the PErr Repconse bit and SERR in PCI command register. */
  104. pci_read_config_word(pdev, PCI_COMMAND, &cmd);
  105. cmd |= 0x140;
  106. pci_write_config_word(pdev, PCI_COMMAND, cmd);
  107. return;
  108. }
  109. /*
  110. * __vxge_hw_device_register_poll
  111. * Will poll certain register for specified amount of time.
  112. * Will poll until masked bit is not cleared.
  113. */
  114. enum vxge_hw_status
  115. __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
  116. {
  117. u64 val64;
  118. u32 i = 0;
  119. enum vxge_hw_status ret = VXGE_HW_FAIL;
  120. udelay(10);
  121. do {
  122. val64 = readq(reg);
  123. if (!(val64 & mask))
  124. return VXGE_HW_OK;
  125. udelay(100);
  126. } while (++i <= 9);
  127. i = 0;
  128. do {
  129. val64 = readq(reg);
  130. if (!(val64 & mask))
  131. return VXGE_HW_OK;
  132. udelay(1000);
  133. } while (++i <= max_millis);
  134. return ret;
  135. }
  136. /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
  137. * in progress
  138. * This routine checks the vpath reset in progress register is turned zero
  139. */
  140. enum vxge_hw_status
  141. __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
  142. {
  143. enum vxge_hw_status status;
  144. vxge_trace();
  145. status = __vxge_hw_device_register_poll(vpath_rst_in_prog,
  146. VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(0x1ffff),
  147. VXGE_HW_DEF_DEVICE_POLL_MILLIS);
  148. return status;
  149. }
  150. /*
  151. * __vxge_hw_device_toc_get
  152. * This routine sets the swapper and reads the toc pointer and returns the
  153. * memory mapped address of the toc
  154. */
  155. struct vxge_hw_toc_reg __iomem *
  156. __vxge_hw_device_toc_get(void __iomem *bar0)
  157. {
  158. u64 val64;
  159. struct vxge_hw_toc_reg __iomem *toc = NULL;
  160. enum vxge_hw_status status;
  161. struct vxge_hw_legacy_reg __iomem *legacy_reg =
  162. (struct vxge_hw_legacy_reg __iomem *)bar0;
  163. status = __vxge_hw_legacy_swapper_set(legacy_reg);
  164. if (status != VXGE_HW_OK)
  165. goto exit;
  166. val64 = readq(&legacy_reg->toc_first_pointer);
  167. toc = (struct vxge_hw_toc_reg __iomem *)(bar0+val64);
  168. exit:
  169. return toc;
  170. }
  171. /*
  172. * __vxge_hw_device_reg_addr_get
  173. * This routine sets the swapper and reads the toc pointer and initializes the
  174. * register location pointers in the device object. It waits until the ric is
  175. * completed initializing registers.
  176. */
  177. enum vxge_hw_status
  178. __vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
  179. {
  180. u64 val64;
  181. u32 i;
  182. enum vxge_hw_status status = VXGE_HW_OK;
  183. hldev->legacy_reg = (struct vxge_hw_legacy_reg __iomem *)hldev->bar0;
  184. hldev->toc_reg = __vxge_hw_device_toc_get(hldev->bar0);
  185. if (hldev->toc_reg == NULL) {
  186. status = VXGE_HW_FAIL;
  187. goto exit;
  188. }
  189. val64 = readq(&hldev->toc_reg->toc_common_pointer);
  190. hldev->common_reg =
  191. (struct vxge_hw_common_reg __iomem *)(hldev->bar0 + val64);
  192. val64 = readq(&hldev->toc_reg->toc_mrpcim_pointer);
  193. hldev->mrpcim_reg =
  194. (struct vxge_hw_mrpcim_reg __iomem *)(hldev->bar0 + val64);
  195. for (i = 0; i < VXGE_HW_TITAN_SRPCIM_REG_SPACES; i++) {
  196. val64 = readq(&hldev->toc_reg->toc_srpcim_pointer[i]);
  197. hldev->srpcim_reg[i] =
  198. (struct vxge_hw_srpcim_reg __iomem *)
  199. (hldev->bar0 + val64);
  200. }
  201. for (i = 0; i < VXGE_HW_TITAN_VPMGMT_REG_SPACES; i++) {
  202. val64 = readq(&hldev->toc_reg->toc_vpmgmt_pointer[i]);
  203. hldev->vpmgmt_reg[i] =
  204. (struct vxge_hw_vpmgmt_reg __iomem *)(hldev->bar0 + val64);
  205. }
  206. for (i = 0; i < VXGE_HW_TITAN_VPATH_REG_SPACES; i++) {
  207. val64 = readq(&hldev->toc_reg->toc_vpath_pointer[i]);
  208. hldev->vpath_reg[i] =
  209. (struct vxge_hw_vpath_reg __iomem *)
  210. (hldev->bar0 + val64);
  211. }
  212. val64 = readq(&hldev->toc_reg->toc_kdfc);
  213. switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64)) {
  214. case 0:
  215. hldev->kdfc = (u8 __iomem *)(hldev->bar0 +
  216. VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64));
  217. break;
  218. default:
  219. break;
  220. }
  221. status = __vxge_hw_device_vpath_reset_in_prog_check(
  222. (u64 __iomem *)&hldev->common_reg->vpath_rst_in_prog);
  223. exit:
  224. return status;
  225. }
  226. /*
  227. * __vxge_hw_device_access_rights_get: Get Access Rights of the driver
  228. * This routine returns the Access Rights of the driver
  229. */
  230. static u32
  231. __vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
  232. {
  233. u32 access_rights = VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH;
  234. switch (host_type) {
  235. case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION:
  236. if (func_id == 0) {
  237. access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
  238. VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
  239. }
  240. break;
  241. case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION:
  242. access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
  243. VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
  244. break;
  245. case VXGE_HW_NO_MR_SR_VH0_FUNCTION0:
  246. access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
  247. VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
  248. break;
  249. case VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION:
  250. case VXGE_HW_SR_VH_VIRTUAL_FUNCTION:
  251. case VXGE_HW_MR_SR_VH0_INVALID_CONFIG:
  252. break;
  253. case VXGE_HW_SR_VH_FUNCTION0:
  254. case VXGE_HW_VH_NORMAL_FUNCTION:
  255. access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
  256. break;
  257. }
  258. return access_rights;
  259. }
  260. /*
  261. * __vxge_hw_device_host_info_get
  262. * This routine returns the host type assignments
  263. */
  264. void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
  265. {
  266. u64 val64;
  267. u32 i;
  268. val64 = readq(&hldev->common_reg->host_type_assignments);
  269. hldev->host_type =
  270. (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
  271. hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments);
  272. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  273. if (!(hldev->vpath_assignments & vxge_mBIT(i)))
  274. continue;
  275. hldev->func_id =
  276. __vxge_hw_vpath_func_id_get(hldev->vpmgmt_reg[i]);
  277. hldev->access_rights = __vxge_hw_device_access_rights_get(
  278. hldev->host_type, hldev->func_id);
  279. hldev->first_vp_id = i;
  280. break;
  281. }
  282. return;
  283. }
  284. /**
  285. * vxge_hw_device_hw_info_get - Get the hw information
  286. * Returns the vpath mask that has the bits set for each vpath allocated
  287. * for the driver, FW version information and the first mac addresse for
  288. * each vpath
  289. */
  290. enum vxge_hw_status
  291. vxge_hw_device_hw_info_get(void __iomem *bar0,
  292. struct vxge_hw_device_hw_info *hw_info)
  293. {
  294. u32 i;
  295. u64 val64;
  296. struct vxge_hw_toc_reg __iomem *toc;
  297. struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
  298. struct vxge_hw_common_reg __iomem *common_reg;
  299. struct vxge_hw_vpath_reg __iomem *vpath_reg;
  300. struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
  301. enum vxge_hw_status status;
  302. vxge_trace();
  303. memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info));
  304. toc = __vxge_hw_device_toc_get(bar0);
  305. if (toc == NULL) {
  306. status = VXGE_HW_ERR_CRITICAL;
  307. goto exit;
  308. }
  309. val64 = readq(&toc->toc_common_pointer);
  310. common_reg = (struct vxge_hw_common_reg __iomem *)(bar0 + val64);
  311. status = __vxge_hw_device_vpath_reset_in_prog_check(
  312. (u64 __iomem *)&common_reg->vpath_rst_in_prog);
  313. if (status != VXGE_HW_OK)
  314. goto exit;
  315. hw_info->vpath_mask = readq(&common_reg->vpath_assignments);
  316. val64 = readq(&common_reg->host_type_assignments);
  317. hw_info->host_type =
  318. (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
  319. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  320. if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
  321. continue;
  322. val64 = readq(&toc->toc_vpmgmt_pointer[i]);
  323. vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *)
  324. (bar0 + val64);
  325. hw_info->func_id = __vxge_hw_vpath_func_id_get(vpmgmt_reg);
  326. if (__vxge_hw_device_access_rights_get(hw_info->host_type,
  327. hw_info->func_id) &
  328. VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) {
  329. val64 = readq(&toc->toc_mrpcim_pointer);
  330. mrpcim_reg = (struct vxge_hw_mrpcim_reg __iomem *)
  331. (bar0 + val64);
  332. writeq(0, &mrpcim_reg->xgmac_gen_fw_memo_mask);
  333. wmb();
  334. }
  335. val64 = readq(&toc->toc_vpath_pointer[i]);
  336. vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64);
  337. status = __vxge_hw_vpath_fw_ver_get(vpath_reg, hw_info);
  338. if (status != VXGE_HW_OK)
  339. goto exit;
  340. status = __vxge_hw_vpath_card_info_get(vpath_reg, hw_info);
  341. if (status != VXGE_HW_OK)
  342. goto exit;
  343. break;
  344. }
  345. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  346. if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
  347. continue;
  348. val64 = readq(&toc->toc_vpath_pointer[i]);
  349. vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64);
  350. status = __vxge_hw_vpath_addr_get(vpath_reg,
  351. hw_info->mac_addrs[i],
  352. hw_info->mac_addr_masks[i]);
  353. if (status != VXGE_HW_OK)
  354. goto exit;
  355. }
  356. exit:
  357. return status;
  358. }
  359. /*
  360. * vxge_hw_device_initialize - Initialize Titan device.
  361. * Initialize Titan device. Note that all the arguments of this public API
  362. * are 'IN', including @hldev. Driver cooperates with
  363. * OS to find new Titan device, locate its PCI and memory spaces.
  364. *
  365. * When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW
  366. * to enable the latter to perform Titan hardware initialization.
  367. */
  368. enum vxge_hw_status
  369. vxge_hw_device_initialize(
  370. struct __vxge_hw_device **devh,
  371. void *bar0,
  372. struct pci_device *pdev,
  373. u8 titan1)
  374. {
  375. struct __vxge_hw_device *hldev = NULL;
  376. enum vxge_hw_status status = VXGE_HW_OK;
  377. vxge_trace();
  378. hldev = (struct __vxge_hw_device *)
  379. zalloc(sizeof(struct __vxge_hw_device));
  380. if (hldev == NULL) {
  381. vxge_debug(VXGE_ERR, "hldev allocation failed\n");
  382. status = VXGE_HW_ERR_OUT_OF_MEMORY;
  383. goto exit;
  384. }
  385. hldev->magic = VXGE_HW_DEVICE_MAGIC;
  386. hldev->bar0 = bar0;
  387. hldev->pdev = pdev;
  388. hldev->titan1 = titan1;
  389. __vxge_hw_device_pci_e_init(hldev);
  390. status = __vxge_hw_device_reg_addr_get(hldev);
  391. if (status != VXGE_HW_OK) {
  392. vxge_debug(VXGE_ERR, "%s:%d __vxge_hw_device_reg_addr_get "
  393. "failed\n", __func__, __LINE__);
  394. vxge_hw_device_terminate(hldev);
  395. goto exit;
  396. }
  397. __vxge_hw_device_host_info_get(hldev);
  398. *devh = hldev;
  399. exit:
  400. return status;
  401. }
  402. /*
  403. * vxge_hw_device_terminate - Terminate Titan device.
  404. * Terminate HW device.
  405. */
  406. void
  407. vxge_hw_device_terminate(struct __vxge_hw_device *hldev)
  408. {
  409. vxge_trace();
  410. assert(hldev->magic == VXGE_HW_DEVICE_MAGIC);
  411. hldev->magic = VXGE_HW_DEVICE_DEAD;
  412. free(hldev);
  413. }
  414. /*
  415. *vxge_hw_ring_replenish - Initial replenish of RxDs
  416. * This function replenishes the RxDs from reserve array to work array
  417. */
  418. enum vxge_hw_status
  419. vxge_hw_ring_replenish(struct __vxge_hw_ring *ring)
  420. {
  421. struct __vxge_hw_device *hldev;
  422. struct vxge_hw_ring_rxd_1 *rxd;
  423. enum vxge_hw_status status = VXGE_HW_OK;
  424. u8 offset = 0;
  425. struct __vxge_hw_ring_block *block;
  426. u8 i, iob_off;
  427. vxge_trace();
  428. hldev = ring->vpathh->hldev;
  429. /*
  430. * We allocate all the dma buffers first and then share the
  431. * these buffers among the all rx descriptors in the block.
  432. */
  433. for (i = 0; i < ARRAY_SIZE(ring->iobuf); i++) {
  434. ring->iobuf[i] = alloc_iob(VXGE_LL_MAX_FRAME_SIZE(hldev->vdev));
  435. if (!ring->iobuf[i]) {
  436. while (i) {
  437. free_iob(ring->iobuf[--i]);
  438. ring->iobuf[i] = NULL;
  439. }
  440. status = VXGE_HW_ERR_OUT_OF_MEMORY;
  441. goto iobuf_err;
  442. }
  443. }
  444. for (offset = 0; offset < VXGE_HW_MAX_RXDS_PER_BLOCK_1; offset++) {
  445. rxd = &ring->rxdl->rxd[offset];
  446. if (offset == (VXGE_HW_MAX_RXDS_PER_BLOCK_1 - 1))
  447. iob_off = VXGE_HW_RING_BUF_PER_BLOCK;
  448. else
  449. iob_off = offset % ring->buf_per_block;
  450. rxd->control_0 = rxd->control_1 = 0;
  451. vxge_hw_ring_rxd_1b_set(rxd, ring->iobuf[iob_off],
  452. VXGE_LL_MAX_FRAME_SIZE(hldev->vdev));
  453. vxge_hw_ring_rxd_post(ring, rxd);
  454. }
  455. /* linking the block to itself as we use only one rx block*/
  456. block = ring->rxdl;
  457. block->reserved_2_pNext_RxD_block = (unsigned long) block;
  458. block->pNext_RxD_Blk_physical = (u64)virt_to_bus(block);
  459. ring->rxd_offset = 0;
  460. iobuf_err:
  461. return status;
  462. }
  463. /*
  464. * __vxge_hw_ring_create - Create a Ring
  465. * This function creates Ring and initializes it.
  466. *
  467. */
  468. enum vxge_hw_status
  469. __vxge_hw_ring_create(struct __vxge_hw_virtualpath *vpath,
  470. struct __vxge_hw_ring *ring)
  471. {
  472. enum vxge_hw_status status = VXGE_HW_OK;
  473. struct __vxge_hw_device *hldev;
  474. u32 vp_id;
  475. vxge_trace();
  476. hldev = vpath->hldev;
  477. vp_id = vpath->vp_id;
  478. ring->rxdl = malloc_dma(sizeof(struct __vxge_hw_ring_block),
  479. sizeof(struct __vxge_hw_ring_block));
  480. if (!ring->rxdl) {
  481. vxge_debug(VXGE_ERR, "%s:%d malloc_dma error\n",
  482. __func__, __LINE__);
  483. status = VXGE_HW_ERR_OUT_OF_MEMORY;
  484. goto exit;
  485. }
  486. ring->rxd_offset = 0;
  487. ring->vpathh = vpath;
  488. ring->buf_per_block = VXGE_HW_RING_BUF_PER_BLOCK;
  489. ring->rx_poll_weight = VXGE_HW_RING_RX_POLL_WEIGHT;
  490. ring->vp_id = vp_id;
  491. ring->vp_reg = vpath->vp_reg;
  492. ring->common_reg = hldev->common_reg;
  493. ring->rxd_qword_limit = VXGE_HW_RING_RXD_QWORD_LIMIT;
  494. status = vxge_hw_ring_replenish(ring);
  495. if (status != VXGE_HW_OK) {
  496. __vxge_hw_ring_delete(ring);
  497. goto exit;
  498. }
  499. exit:
  500. return status;
  501. }
  502. /*
  503. * __vxge_hw_ring_delete - Removes the ring
  504. * This function freeup the memory pool and removes the ring
  505. */
  506. enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_ring *ring)
  507. {
  508. u8 i;
  509. vxge_trace();
  510. for (i = 0; (i < ARRAY_SIZE(ring->iobuf)) && ring->iobuf[i]; i++) {
  511. free_iob(ring->iobuf[i]);
  512. ring->iobuf[i] = NULL;
  513. }
  514. if (ring->rxdl) {
  515. free_dma(ring->rxdl, sizeof(struct __vxge_hw_ring_block));
  516. ring->rxdl = NULL;
  517. }
  518. ring->rxd_offset = 0;
  519. return VXGE_HW_OK;
  520. }
  521. /*
  522. * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
  523. * Set the swapper bits appropriately for the legacy section.
  524. */
  525. enum vxge_hw_status
  526. __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
  527. {
  528. u64 val64;
  529. enum vxge_hw_status status = VXGE_HW_OK;
  530. vxge_trace();
  531. val64 = readq(&legacy_reg->toc_swapper_fb);
  532. wmb();
  533. switch (val64) {
  534. case VXGE_HW_SWAPPER_INITIAL_VALUE:
  535. return status;
  536. case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
  537. writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
  538. &legacy_reg->pifm_rd_swap_en);
  539. writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
  540. &legacy_reg->pifm_rd_flip_en);
  541. writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
  542. &legacy_reg->pifm_wr_swap_en);
  543. writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
  544. &legacy_reg->pifm_wr_flip_en);
  545. break;
  546. case VXGE_HW_SWAPPER_BYTE_SWAPPED:
  547. writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
  548. &legacy_reg->pifm_rd_swap_en);
  549. writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
  550. &legacy_reg->pifm_wr_swap_en);
  551. break;
  552. case VXGE_HW_SWAPPER_BIT_FLIPPED:
  553. writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
  554. &legacy_reg->pifm_rd_flip_en);
  555. writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
  556. &legacy_reg->pifm_wr_flip_en);
  557. break;
  558. }
  559. wmb();
  560. val64 = readq(&legacy_reg->toc_swapper_fb);
  561. if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
  562. status = VXGE_HW_ERR_SWAPPER_CTRL;
  563. return status;
  564. }
  565. /*
  566. * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
  567. * Set the swapper bits appropriately for the vpath.
  568. */
  569. enum vxge_hw_status
  570. __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
  571. {
  572. vxge_trace();
  573. #if (__BYTE_ORDER != __BIG_ENDIAN)
  574. u64 val64;
  575. val64 = readq(&vpath_reg->vpath_general_cfg1);
  576. wmb();
  577. val64 |= VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN;
  578. writeq(val64, &vpath_reg->vpath_general_cfg1);
  579. wmb();
  580. #endif
  581. return VXGE_HW_OK;
  582. }
  583. /*
  584. * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
  585. * Set the swapper bits appropriately for the vpath.
  586. */
  587. enum vxge_hw_status
  588. __vxge_hw_kdfc_swapper_set(
  589. struct vxge_hw_legacy_reg __iomem *legacy_reg,
  590. struct vxge_hw_vpath_reg __iomem *vpath_reg)
  591. {
  592. u64 val64;
  593. vxge_trace();
  594. val64 = readq(&legacy_reg->pifm_wr_swap_en);
  595. if (val64 == VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE) {
  596. val64 = readq(&vpath_reg->kdfcctl_cfg0);
  597. wmb();
  598. val64 |= VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0 |
  599. VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1 |
  600. VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2;
  601. writeq(val64, &vpath_reg->kdfcctl_cfg0);
  602. wmb();
  603. }
  604. return VXGE_HW_OK;
  605. }
  606. /*
  607. * vxge_hw_vpath_strip_fcs_check - Check for FCS strip.
  608. */
  609. enum vxge_hw_status
  610. vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
  611. {
  612. struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
  613. enum vxge_hw_status status = VXGE_HW_OK;
  614. int i = 0, j = 0;
  615. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  616. if (!((vpath_mask) & vxge_mBIT(i)))
  617. continue;
  618. vpmgmt_reg = hldev->vpmgmt_reg[i];
  619. for (j = 0; j < VXGE_HW_MAC_MAX_MAC_PORT_ID; j++) {
  620. if (readq(&vpmgmt_reg->rxmac_cfg0_port_vpmgmt_clone[j])
  621. & VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS)
  622. return VXGE_HW_FAIL;
  623. }
  624. }
  625. return status;
  626. }
  627. /*
  628. * __vxge_hw_fifo_create - Create a FIFO
  629. * This function creates FIFO and initializes it.
  630. */
  631. enum vxge_hw_status
  632. __vxge_hw_fifo_create(struct __vxge_hw_virtualpath *vpath,
  633. struct __vxge_hw_fifo *fifo)
  634. {
  635. enum vxge_hw_status status = VXGE_HW_OK;
  636. vxge_trace();
  637. fifo->vpathh = vpath;
  638. fifo->depth = VXGE_HW_FIFO_TXD_DEPTH;
  639. fifo->hw_offset = fifo->sw_offset = 0;
  640. fifo->nofl_db = vpath->nofl_db;
  641. fifo->vp_id = vpath->vp_id;
  642. fifo->vp_reg = vpath->vp_reg;
  643. fifo->tx_intr_num = (vpath->vp_id * VXGE_HW_MAX_INTR_PER_VP)
  644. + VXGE_HW_VPATH_INTR_TX;
  645. fifo->txdl = malloc_dma(sizeof(struct vxge_hw_fifo_txd)
  646. * fifo->depth, fifo->depth);
  647. if (!fifo->txdl) {
  648. vxge_debug(VXGE_ERR, "%s:%d malloc_dma error\n",
  649. __func__, __LINE__);
  650. return VXGE_HW_ERR_OUT_OF_MEMORY;
  651. }
  652. memset(fifo->txdl, 0, sizeof(struct vxge_hw_fifo_txd) * fifo->depth);
  653. return status;
  654. }
  655. /*
  656. * __vxge_hw_fifo_delete - Removes the FIFO
  657. * This function freeup the memory pool and removes the FIFO
  658. */
  659. enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_fifo *fifo)
  660. {
  661. vxge_trace();
  662. if (fifo->txdl)
  663. free_dma(fifo->txdl,
  664. sizeof(struct vxge_hw_fifo_txd) * fifo->depth);
  665. fifo->txdl = NULL;
  666. fifo->hw_offset = fifo->sw_offset = 0;
  667. return VXGE_HW_OK;
  668. }
  669. /*
  670. * __vxge_hw_vpath_pci_read - Read the content of given address
  671. * in pci config space.
  672. * Read from the vpath pci config space.
  673. */
  674. enum vxge_hw_status
  675. __vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
  676. u32 phy_func_0, u32 offset, u32 *val)
  677. {
  678. u64 val64;
  679. enum vxge_hw_status status = VXGE_HW_OK;
  680. struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
  681. val64 = VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset);
  682. if (phy_func_0)
  683. val64 |= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0;
  684. writeq(val64, &vp_reg->pci_config_access_cfg1);
  685. wmb();
  686. writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ,
  687. &vp_reg->pci_config_access_cfg2);
  688. wmb();
  689. status = __vxge_hw_device_register_poll(
  690. &vp_reg->pci_config_access_cfg2,
  691. VXGE_HW_INTR_MASK_ALL, VXGE_HW_DEF_DEVICE_POLL_MILLIS);
  692. if (status != VXGE_HW_OK)
  693. goto exit;
  694. val64 = readq(&vp_reg->pci_config_access_status);
  695. if (val64 & VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR) {
  696. status = VXGE_HW_FAIL;
  697. *val = 0;
  698. } else
  699. *val = (u32)vxge_bVALn(val64, 32, 32);
  700. exit:
  701. return status;
  702. }
  703. /*
  704. * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
  705. * Returns the function number of the vpath.
  706. */
  707. u32
  708. __vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
  709. {
  710. u64 val64;
  711. val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
  712. return
  713. (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
  714. }
  715. /*
  716. * __vxge_hw_read_rts_ds - Program RTS steering critieria
  717. */
  718. static inline void
  719. __vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem *vpath_reg,
  720. u64 dta_struct_sel)
  721. {
  722. writeq(0, &vpath_reg->rts_access_steer_ctrl);
  723. wmb();
  724. writeq(dta_struct_sel, &vpath_reg->rts_access_steer_data0);
  725. writeq(0, &vpath_reg->rts_access_steer_data1);
  726. wmb();
  727. return;
  728. }
  729. /*
  730. * __vxge_hw_vpath_card_info_get - Get the serial numbers,
  731. * part number and product description.
  732. */
  733. enum vxge_hw_status
  734. __vxge_hw_vpath_card_info_get(
  735. struct vxge_hw_vpath_reg __iomem *vpath_reg,
  736. struct vxge_hw_device_hw_info *hw_info)
  737. {
  738. u32 i, j;
  739. u64 val64;
  740. u64 data1 = 0ULL;
  741. u64 data2 = 0ULL;
  742. enum vxge_hw_status status = VXGE_HW_OK;
  743. u8 *serial_number = hw_info->serial_number;
  744. u8 *part_number = hw_info->part_number;
  745. u8 *product_desc = hw_info->product_desc;
  746. __vxge_hw_read_rts_ds(vpath_reg,
  747. VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER);
  748. val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
  749. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
  750. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
  751. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
  752. VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
  753. VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
  754. status = __vxge_hw_pio_mem_write64(val64,
  755. &vpath_reg->rts_access_steer_ctrl,
  756. VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
  757. VXGE_HW_DEF_DEVICE_POLL_MILLIS);
  758. if (status != VXGE_HW_OK)
  759. return status;
  760. val64 = readq(&vpath_reg->rts_access_steer_ctrl);
  761. if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
  762. data1 = readq(&vpath_reg->rts_access_steer_data0);
  763. ((u64 *)serial_number)[0] = be64_to_cpu(data1);
  764. data2 = readq(&vpath_reg->rts_access_steer_data1);
  765. ((u64 *)serial_number)[1] = be64_to_cpu(data2);
  766. status = VXGE_HW_OK;
  767. } else
  768. *serial_number = 0;
  769. __vxge_hw_read_rts_ds(vpath_reg,
  770. VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER);
  771. val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
  772. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
  773. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
  774. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
  775. VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
  776. VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
  777. status = __vxge_hw_pio_mem_write64(val64,
  778. &vpath_reg->rts_access_steer_ctrl,
  779. VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
  780. VXGE_HW_DEF_DEVICE_POLL_MILLIS);
  781. if (status != VXGE_HW_OK)
  782. return status;
  783. val64 = readq(&vpath_reg->rts_access_steer_ctrl);
  784. if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
  785. data1 = readq(&vpath_reg->rts_access_steer_data0);
  786. ((u64 *)part_number)[0] = be64_to_cpu(data1);
  787. data2 = readq(&vpath_reg->rts_access_steer_data1);
  788. ((u64 *)part_number)[1] = be64_to_cpu(data2);
  789. status = VXGE_HW_OK;
  790. } else
  791. *part_number = 0;
  792. j = 0;
  793. for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
  794. i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
  795. __vxge_hw_read_rts_ds(vpath_reg, i);
  796. val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
  797. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
  798. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
  799. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
  800. VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
  801. VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
  802. status = __vxge_hw_pio_mem_write64(val64,
  803. &vpath_reg->rts_access_steer_ctrl,
  804. VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
  805. VXGE_HW_DEF_DEVICE_POLL_MILLIS);
  806. if (status != VXGE_HW_OK)
  807. return status;
  808. val64 = readq(&vpath_reg->rts_access_steer_ctrl);
  809. if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
  810. data1 = readq(&vpath_reg->rts_access_steer_data0);
  811. ((u64 *)product_desc)[j++] = be64_to_cpu(data1);
  812. data2 = readq(&vpath_reg->rts_access_steer_data1);
  813. ((u64 *)product_desc)[j++] = be64_to_cpu(data2);
  814. status = VXGE_HW_OK;
  815. } else
  816. *product_desc = 0;
  817. }
  818. return status;
  819. }
  820. /*
  821. * __vxge_hw_vpath_fw_ver_get - Get the fw version
  822. * Returns FW Version
  823. */
  824. enum vxge_hw_status
  825. __vxge_hw_vpath_fw_ver_get(
  826. struct vxge_hw_vpath_reg __iomem *vpath_reg,
  827. struct vxge_hw_device_hw_info *hw_info)
  828. {
  829. u64 val64;
  830. u64 data1 = 0ULL;
  831. u64 data2 = 0ULL;
  832. struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
  833. struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
  834. struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
  835. struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
  836. enum vxge_hw_status status = VXGE_HW_OK;
  837. val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
  838. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY) |
  839. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
  840. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
  841. VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
  842. VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
  843. status = __vxge_hw_pio_mem_write64(val64,
  844. &vpath_reg->rts_access_steer_ctrl,
  845. VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
  846. VXGE_HW_DEF_DEVICE_POLL_MILLIS);
  847. if (status != VXGE_HW_OK)
  848. goto exit;
  849. val64 = readq(&vpath_reg->rts_access_steer_ctrl);
  850. if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
  851. data1 = readq(&vpath_reg->rts_access_steer_data0);
  852. data2 = readq(&vpath_reg->rts_access_steer_data1);
  853. fw_date->day =
  854. (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(
  855. data1);
  856. fw_date->month =
  857. (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(
  858. data1);
  859. fw_date->year =
  860. (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(
  861. data1);
  862. snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%d/%d/%d",
  863. fw_date->month, fw_date->day, fw_date->year);
  864. fw_version->major =
  865. (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data1);
  866. fw_version->minor =
  867. (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data1);
  868. fw_version->build =
  869. (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data1);
  870. snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
  871. fw_version->major, fw_version->minor, fw_version->build);
  872. flash_date->day =
  873. (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data2);
  874. flash_date->month =
  875. (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data2);
  876. flash_date->year =
  877. (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data2);
  878. snprintf(flash_date->date, VXGE_HW_FW_STRLEN, "%d/%d/%d",
  879. flash_date->month, flash_date->day, flash_date->year);
  880. flash_version->major =
  881. (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data2);
  882. flash_version->minor =
  883. (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data2);
  884. flash_version->build =
  885. (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data2);
  886. snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
  887. flash_version->major, flash_version->minor,
  888. flash_version->build);
  889. status = VXGE_HW_OK;
  890. } else
  891. status = VXGE_HW_FAIL;
  892. exit:
  893. return status;
  894. }
  895. /*
  896. * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
  897. * from MAC address table.
  898. */
  899. enum vxge_hw_status
  900. __vxge_hw_vpath_addr_get(
  901. struct vxge_hw_vpath_reg *vpath_reg,
  902. u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN])
  903. {
  904. u32 i;
  905. u64 val64;
  906. u64 data1 = 0ULL;
  907. u64 data2 = 0ULL;
  908. u64 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY;
  909. enum vxge_hw_status status = VXGE_HW_OK;
  910. while (1) {
  911. val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
  912. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
  913. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) |
  914. VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
  915. VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
  916. status = __vxge_hw_pio_mem_write64(val64,
  917. &vpath_reg->rts_access_steer_ctrl,
  918. VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
  919. VXGE_HW_DEF_DEVICE_POLL_MILLIS);
  920. if (status != VXGE_HW_OK)
  921. break;
  922. val64 = readq(&vpath_reg->rts_access_steer_ctrl);
  923. if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
  924. data1 = readq(&vpath_reg->rts_access_steer_data0);
  925. data2 = readq(&vpath_reg->rts_access_steer_data1);
  926. data1 =
  927. VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
  928. data2 =
  929. VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
  930. data2);
  931. for (i = ETH_ALEN; i > 0; i--) {
  932. macaddr[i-1] = (u8)(data1 & 0xFF);
  933. data1 >>= 8;
  934. macaddr_mask[i-1] = (u8)(data2 & 0xFF);
  935. data2 >>= 8;
  936. }
  937. if (is_valid_ether_addr(macaddr)) {
  938. status = VXGE_HW_OK;
  939. break;
  940. }
  941. action =
  942. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY;
  943. } else
  944. status = VXGE_HW_FAIL;
  945. }
  946. return status;
  947. }
  948. /*
  949. * __vxge_hw_vpath_mgmt_read
  950. * This routine reads the vpath_mgmt registers
  951. */
  952. static enum vxge_hw_status
  953. __vxge_hw_vpath_mgmt_read(
  954. struct __vxge_hw_virtualpath *vpath)
  955. {
  956. u32 i, mtu = 0, max_pyld = 0;
  957. u64 val64;
  958. enum vxge_hw_status status = VXGE_HW_OK;
  959. for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
  960. val64 = readq(&vpath->vpmgmt_reg->
  961. rxmac_cfg0_port_vpmgmt_clone[i]);
  962. max_pyld =
  963. (u32)
  964. VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN
  965. (val64);
  966. if (mtu < max_pyld)
  967. mtu = max_pyld;
  968. }
  969. vpath->max_mtu = mtu + VXGE_HW_MAC_HEADER_MAX_SIZE;
  970. val64 = readq(&vpath->vpmgmt_reg->xgmac_gen_status_vpmgmt_clone);
  971. if (val64 & VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK)
  972. VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_UP);
  973. else
  974. VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN);
  975. return status;
  976. }
  977. /*
  978. * __vxge_hw_vpath_reset_check - Check if resetting the vpath completed
  979. * This routine checks the vpath_rst_in_prog register to see if
  980. * adapter completed the reset process for the vpath
  981. */
  982. enum vxge_hw_status
  983. __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
  984. {
  985. enum vxge_hw_status status;
  986. vxge_trace();
  987. status = __vxge_hw_device_register_poll(
  988. &vpath->hldev->common_reg->vpath_rst_in_prog,
  989. VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(
  990. 1 << (16 - vpath->vp_id)),
  991. VXGE_HW_DEF_DEVICE_POLL_MILLIS);
  992. return status;
  993. }
  994. /*
  995. * __vxge_hw_vpath_reset
  996. * This routine resets the vpath on the device
  997. */
  998. enum vxge_hw_status
  999. __vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
  1000. {
  1001. u64 val64;
  1002. enum vxge_hw_status status = VXGE_HW_OK;
  1003. vxge_trace();
  1004. val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id));
  1005. __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
  1006. &hldev->common_reg->cmn_rsthdlr_cfg0);
  1007. return status;
  1008. }
  1009. /*
  1010. * __vxge_hw_vpath_prc_configure
  1011. * This routine configures the prc registers of virtual path using the config
  1012. * passed
  1013. */
  1014. void
  1015. __vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev)
  1016. {
  1017. u64 val64;
  1018. struct __vxge_hw_virtualpath *vpath;
  1019. struct vxge_hw_vpath_reg __iomem *vp_reg;
  1020. vxge_trace();
  1021. vpath = &hldev->virtual_path;
  1022. vp_reg = vpath->vp_reg;
  1023. val64 = readq(&vp_reg->prc_cfg1);
  1024. val64 |= VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE;
  1025. writeq(val64, &vp_reg->prc_cfg1);
  1026. val64 = readq(&vpath->vp_reg->prc_cfg6);
  1027. val64 &= ~VXGE_HW_PRC_CFG6_RXD_CRXDT(0x1ff);
  1028. val64 &= ~VXGE_HW_PRC_CFG6_RXD_SPAT(0x1ff);
  1029. val64 |= VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN;
  1030. val64 |= VXGE_HW_PRC_CFG6_RXD_CRXDT(0x3);
  1031. val64 |= VXGE_HW_PRC_CFG6_RXD_SPAT(0xf);
  1032. writeq(val64, &vpath->vp_reg->prc_cfg6);
  1033. writeq(VXGE_HW_PRC_CFG5_RXD0_ADD(
  1034. (u64)virt_to_bus(vpath->ringh.rxdl) >> 3),
  1035. &vp_reg->prc_cfg5);
  1036. val64 = readq(&vp_reg->prc_cfg4);
  1037. val64 |= VXGE_HW_PRC_CFG4_IN_SVC;
  1038. val64 &= ~VXGE_HW_PRC_CFG4_RING_MODE(0x3);
  1039. val64 |= VXGE_HW_PRC_CFG4_RING_MODE(
  1040. VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER);
  1041. val64 |= VXGE_HW_PRC_CFG4_RTH_DISABLE;
  1042. writeq(val64, &vp_reg->prc_cfg4);
  1043. return;
  1044. }
  1045. /*
  1046. * __vxge_hw_vpath_kdfc_configure
  1047. * This routine configures the kdfc registers of virtual path using the
  1048. * config passed
  1049. */
  1050. enum vxge_hw_status
  1051. __vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
  1052. {
  1053. u64 val64;
  1054. u64 vpath_stride;
  1055. enum vxge_hw_status status = VXGE_HW_OK;
  1056. struct __vxge_hw_virtualpath *vpath;
  1057. struct vxge_hw_vpath_reg __iomem *vp_reg;
  1058. vxge_trace();
  1059. vpath = &hldev->virtual_path;
  1060. vp_reg = vpath->vp_reg;
  1061. status = __vxge_hw_kdfc_swapper_set(hldev->legacy_reg, vp_reg);
  1062. if (status != VXGE_HW_OK)
  1063. goto exit;
  1064. val64 = readq(&vp_reg->kdfc_drbl_triplet_total);
  1065. vpath->max_kdfc_db =
  1066. (u32)VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(
  1067. val64+1)/2;
  1068. vpath->max_nofl_db = vpath->max_kdfc_db;
  1069. val64 = VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(
  1070. (vpath->max_nofl_db*2)-1);
  1071. writeq(val64, &vp_reg->kdfc_fifo_trpl_partition);
  1072. writeq(VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE,
  1073. &vp_reg->kdfc_fifo_trpl_ctrl);
  1074. val64 = readq(&vp_reg->kdfc_trpl_fifo_0_ctrl);
  1075. val64 &= ~(VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(0x3) |
  1076. VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0xFF));
  1077. val64 |= VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(
  1078. VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY) |
  1079. #if (__BYTE_ORDER != __BIG_ENDIAN)
  1080. VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN |
  1081. #endif
  1082. VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0);
  1083. writeq(val64, &vp_reg->kdfc_trpl_fifo_0_ctrl);
  1084. writeq((u64)0, &vp_reg->kdfc_trpl_fifo_0_wb_address);
  1085. wmb();
  1086. vpath_stride = readq(&hldev->toc_reg->toc_kdfc_vpath_stride);
  1087. vpath->nofl_db =
  1088. (struct __vxge_hw_non_offload_db_wrapper __iomem *)
  1089. (hldev->kdfc + (vp_id *
  1090. VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(
  1091. vpath_stride)));
  1092. exit:
  1093. return status;
  1094. }
  1095. /*
  1096. * __vxge_hw_vpath_mac_configure
  1097. * This routine configures the mac of virtual path using the config passed
  1098. */
  1099. enum vxge_hw_status
  1100. __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev)
  1101. {
  1102. u64 val64;
  1103. enum vxge_hw_status status = VXGE_HW_OK;
  1104. struct __vxge_hw_virtualpath *vpath;
  1105. struct vxge_hw_vpath_reg __iomem *vp_reg;
  1106. vxge_trace();
  1107. vpath = &hldev->virtual_path;
  1108. vp_reg = vpath->vp_reg;
  1109. writeq(VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(
  1110. vpath->vsport_number), &vp_reg->xmac_vsport_choice);
  1111. val64 = readq(&vp_reg->rxmac_vcfg1);
  1112. val64 &= ~(VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(0x3) |
  1113. VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE);
  1114. writeq(val64, &vp_reg->rxmac_vcfg1);
  1115. return status;
  1116. }
  1117. /*
  1118. * __vxge_hw_vpath_tim_configure
  1119. * This routine configures the tim registers of virtual path using the config
  1120. * passed
  1121. */
  1122. enum vxge_hw_status
  1123. __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
  1124. {
  1125. u64 val64;
  1126. enum vxge_hw_status status = VXGE_HW_OK;
  1127. struct __vxge_hw_virtualpath *vpath;
  1128. struct vxge_hw_vpath_reg __iomem *vp_reg;
  1129. vxge_trace();
  1130. vpath = &hldev->virtual_path;
  1131. vp_reg = vpath->vp_reg;
  1132. writeq((u64)0, &vp_reg->tim_dest_addr);
  1133. writeq((u64)0, &vp_reg->tim_vpath_map);
  1134. writeq((u64)0, &vp_reg->tim_bitmap);
  1135. writeq((u64)0, &vp_reg->tim_remap);
  1136. writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM(
  1137. (vp_id * VXGE_HW_MAX_INTR_PER_VP) +
  1138. VXGE_HW_VPATH_INTR_RX), &vp_reg->tim_ring_assn);
  1139. val64 = readq(&vp_reg->tim_pci_cfg);
  1140. val64 |= VXGE_HW_TIM_PCI_CFG_ADD_PAD;
  1141. writeq(val64, &vp_reg->tim_pci_cfg);
  1142. /* TX configuration */
  1143. val64 = VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
  1144. (VXGE_TTI_BTIMER_VAL * 1000) / 272);
  1145. val64 |= (VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC |
  1146. VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI |
  1147. VXGE_HW_TIM_CFG1_INT_NUM_TXFRM_CNT_EN);
  1148. val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(TTI_TX_URANGE_A) |
  1149. VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(TTI_TX_URANGE_B) |
  1150. VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(TTI_TX_URANGE_C);
  1151. writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
  1152. val64 = VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(TTI_TX_UFC_A) |
  1153. VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(TTI_TX_UFC_B) |
  1154. VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(TTI_TX_UFC_C) |
  1155. VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(TTI_TX_UFC_D);
  1156. writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
  1157. val64 = VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
  1158. VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL);
  1159. val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
  1160. (VXGE_TTI_LTIMER_VAL * 1000) / 272);
  1161. writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
  1162. /* RX configuration */
  1163. val64 = VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
  1164. (VXGE_RTI_BTIMER_VAL * 1000) / 272);
  1165. val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
  1166. val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(RTI_RX_URANGE_A) |
  1167. VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(RTI_RX_URANGE_B) |
  1168. VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(RTI_RX_URANGE_C);
  1169. writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
  1170. val64 = VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(RTI_RX_UFC_A) |
  1171. VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(RTI_RX_UFC_B) |
  1172. VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(RTI_RX_UFC_C) |
  1173. VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(RTI_RX_UFC_D);
  1174. writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
  1175. val64 = VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
  1176. VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL);
  1177. val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
  1178. (VXGE_RTI_LTIMER_VAL * 1000) / 272);
  1179. writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
  1180. val64 = 0;
  1181. writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_EINTA]);
  1182. writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_EINTA]);
  1183. writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_EINTA]);
  1184. writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_BMAP]);
  1185. writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]);
  1186. writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]);
  1187. return status;
  1188. }
  1189. /*
  1190. * __vxge_hw_vpath_initialize
  1191. * This routine is the final phase of init which initializes the
  1192. * registers of the vpath using the configuration passed.
  1193. */
  1194. enum vxge_hw_status
  1195. __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
  1196. {
  1197. u64 val64;
  1198. u32 val32;
  1199. int i;
  1200. enum vxge_hw_status status = VXGE_HW_OK;
  1201. struct __vxge_hw_virtualpath *vpath;
  1202. struct vxge_hw_vpath_reg *vp_reg;
  1203. vxge_trace();
  1204. vpath = &hldev->virtual_path;
  1205. if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
  1206. status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
  1207. goto exit;
  1208. }
  1209. vp_reg = vpath->vp_reg;
  1210. status = __vxge_hw_legacy_swapper_set(hldev->legacy_reg);
  1211. if (status != VXGE_HW_OK)
  1212. goto exit;
  1213. status = __vxge_hw_vpath_swapper_set(vpath->vp_reg);
  1214. if (status != VXGE_HW_OK)
  1215. goto exit;
  1216. val64 = readq(&vpath->vpmgmt_reg->xmac_vsport_choices_vp);
  1217. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  1218. if (val64 & vxge_mBIT(i))
  1219. vpath->vsport_number = i;
  1220. }
  1221. status = __vxge_hw_vpath_mac_configure(hldev);
  1222. if (status != VXGE_HW_OK)
  1223. goto exit;
  1224. status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id);
  1225. if (status != VXGE_HW_OK)
  1226. goto exit;
  1227. status = __vxge_hw_vpath_tim_configure(hldev, vp_id);
  1228. if (status != VXGE_HW_OK)
  1229. goto exit;
  1230. val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl);
  1231. /* Get MRRS value from device control */
  1232. status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32);
  1233. if (status == VXGE_HW_OK) {
  1234. val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12;
  1235. val64 &=
  1236. ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7));
  1237. val64 |=
  1238. VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32);
  1239. val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE;
  1240. }
  1241. val64 &= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7));
  1242. val64 |=
  1243. VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(
  1244. VXGE_HW_MAX_PAYLOAD_SIZE_512);
  1245. val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN;
  1246. writeq(val64, &vp_reg->rtdma_rd_optimization_ctrl);
  1247. exit:
  1248. return status;
  1249. }
  1250. /*
  1251. * __vxge_hw_vp_initialize - Initialize Virtual Path structure
  1252. * This routine is the initial phase of init which resets the vpath and
  1253. * initializes the software support structures.
  1254. */
  1255. enum vxge_hw_status
  1256. __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
  1257. struct __vxge_hw_virtualpath *vpath)
  1258. {
  1259. enum vxge_hw_status status = VXGE_HW_OK;
  1260. vxge_trace();
  1261. if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
  1262. status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
  1263. goto exit;
  1264. }
  1265. vpath->vp_id = vp_id;
  1266. vpath->vp_open = VXGE_HW_VP_OPEN;
  1267. vpath->hldev = hldev;
  1268. vpath->vp_reg = hldev->vpath_reg[vp_id];
  1269. vpath->vpmgmt_reg = hldev->vpmgmt_reg[vp_id];
  1270. __vxge_hw_vpath_reset(hldev, vp_id);
  1271. status = __vxge_hw_vpath_reset_check(vpath);
  1272. if (status != VXGE_HW_OK) {
  1273. memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
  1274. goto exit;
  1275. }
  1276. VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev->tim_int_mask0,
  1277. hldev->tim_int_mask1, vp_id);
  1278. status = __vxge_hw_vpath_initialize(hldev, vp_id);
  1279. if (status != VXGE_HW_OK) {
  1280. __vxge_hw_vp_terminate(hldev, vpath);
  1281. goto exit;
  1282. }
  1283. status = __vxge_hw_vpath_mgmt_read(vpath);
  1284. exit:
  1285. return status;
  1286. }
  1287. /*
  1288. * __vxge_hw_vp_terminate - Terminate Virtual Path structure
  1289. * This routine closes all channels it opened and freeup memory
  1290. */
  1291. void
  1292. __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev,
  1293. struct __vxge_hw_virtualpath *vpath)
  1294. {
  1295. vxge_trace();
  1296. if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
  1297. return;
  1298. VXGE_HW_DEVICE_TIM_INT_MASK_RESET(hldev->tim_int_mask0,
  1299. hldev->tim_int_mask1, vpath->vp_id);
  1300. memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
  1301. }
  1302. /*
  1303. * vxge_hw_vpath_mtu_set - Set MTU.
  1304. * Set new MTU value. Example, to use jumbo frames:
  1305. * vxge_hw_vpath_mtu_set(my_device, 9600);
  1306. */
  1307. enum vxge_hw_status
  1308. vxge_hw_vpath_mtu_set(struct __vxge_hw_virtualpath *vpath, u32 new_mtu)
  1309. {
  1310. u64 val64;
  1311. enum vxge_hw_status status = VXGE_HW_OK;
  1312. vxge_trace();
  1313. new_mtu += VXGE_HW_MAC_HEADER_MAX_SIZE;
  1314. if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > vpath->max_mtu))
  1315. status = VXGE_HW_ERR_INVALID_MTU_SIZE;
  1316. val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
  1317. val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
  1318. val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu);
  1319. writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
  1320. return status;
  1321. }
  1322. /*
  1323. * vxge_hw_vpath_open - Open a virtual path on a given adapter
  1324. * This function is used to open access to virtual path of an
  1325. * adapter for offload, GRO operations. This function returns
  1326. * synchronously.
  1327. */
  1328. enum vxge_hw_status
  1329. vxge_hw_vpath_open(struct __vxge_hw_device *hldev, struct vxge_vpath *vpath)
  1330. {
  1331. struct __vxge_hw_virtualpath *vpathh;
  1332. enum vxge_hw_status status;
  1333. vxge_trace();
  1334. vpathh = &hldev->virtual_path;
  1335. if (vpath->vp_open == VXGE_HW_VP_OPEN) {
  1336. status = VXGE_HW_ERR_INVALID_STATE;
  1337. goto vpath_open_exit1;
  1338. }
  1339. status = __vxge_hw_vp_initialize(hldev, hldev->first_vp_id, vpathh);
  1340. if (status != VXGE_HW_OK)
  1341. goto vpath_open_exit1;
  1342. status = __vxge_hw_fifo_create(vpathh, &vpathh->fifoh);
  1343. if (status != VXGE_HW_OK)
  1344. goto vpath_open_exit2;
  1345. status = __vxge_hw_ring_create(vpathh, &vpathh->ringh);
  1346. if (status != VXGE_HW_OK)
  1347. goto vpath_open_exit3;
  1348. __vxge_hw_vpath_prc_configure(hldev);
  1349. return VXGE_HW_OK;
  1350. vpath_open_exit3:
  1351. __vxge_hw_fifo_delete(&vpathh->fifoh);
  1352. vpath_open_exit2:
  1353. __vxge_hw_vp_terminate(hldev, vpathh);
  1354. vpath_open_exit1:
  1355. return status;
  1356. }
  1357. /*
  1358. * vxge_hw_vpath_rx_doorbell_init - Post the count of the refreshed region
  1359. * of RxD list
  1360. * @vp: vpath handle
  1361. *
  1362. * This function decides on the Rxd replenish count depending on the
  1363. * descriptor memory that has been allocated to this VPath.
  1364. */
  1365. void
  1366. vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_virtualpath *vpath)
  1367. {
  1368. u64 new_count, val64;
  1369. vxge_trace();
  1370. if (vpath->hldev->titan1) {
  1371. new_count = readq(&vpath->vp_reg->rxdmem_size);
  1372. new_count &= 0x1fff;
  1373. } else
  1374. new_count = VXGE_HW_RING_RXD_QWORDS_MODE_1 * 4;
  1375. val64 = (VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count));
  1376. writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val64),
  1377. &vpath->vp_reg->prc_rxd_doorbell);
  1378. }
  1379. /*
  1380. * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
  1381. * This function is used to close access to virtual path opened
  1382. * earlier.
  1383. */
  1384. enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_virtualpath *vpath)
  1385. {
  1386. struct __vxge_hw_device *devh = NULL;
  1387. u32 vp_id = vpath->vp_id;
  1388. enum vxge_hw_status status = VXGE_HW_OK;
  1389. vxge_trace();
  1390. devh = vpath->hldev;
  1391. if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
  1392. status = VXGE_HW_ERR_VPATH_NOT_OPEN;
  1393. goto vpath_close_exit;
  1394. }
  1395. devh->vpaths_deployed &= ~vxge_mBIT(vp_id);
  1396. __vxge_hw_ring_delete(&vpath->ringh);
  1397. __vxge_hw_fifo_delete(&vpath->fifoh);
  1398. __vxge_hw_vp_terminate(devh, vpath);
  1399. vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
  1400. vpath_close_exit:
  1401. return status;
  1402. }
  1403. /*
  1404. * vxge_hw_vpath_reset - Resets vpath
  1405. * This function is used to request a reset of vpath
  1406. */
  1407. enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_virtualpath *vpath)
  1408. {
  1409. enum vxge_hw_status status;
  1410. u32 vp_id;
  1411. vxge_trace();
  1412. vp_id = vpath->vp_id;
  1413. if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
  1414. status = VXGE_HW_ERR_VPATH_NOT_OPEN;
  1415. goto exit;
  1416. }
  1417. status = __vxge_hw_vpath_reset(vpath->hldev, vp_id);
  1418. exit:
  1419. return status;
  1420. }
  1421. /*
  1422. * vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize.
  1423. * This function poll's for the vpath reset completion and re initializes
  1424. * the vpath.
  1425. */
  1426. enum vxge_hw_status
  1427. vxge_hw_vpath_recover_from_reset(struct __vxge_hw_virtualpath *vpath)
  1428. {
  1429. enum vxge_hw_status status;
  1430. struct __vxge_hw_device *hldev;
  1431. u32 vp_id;
  1432. vxge_trace();
  1433. vp_id = vpath->vp_id;
  1434. hldev = vpath->hldev;
  1435. if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
  1436. status = VXGE_HW_ERR_VPATH_NOT_OPEN;
  1437. goto exit;
  1438. }
  1439. status = __vxge_hw_vpath_reset_check(vpath);
  1440. if (status != VXGE_HW_OK)
  1441. goto exit;
  1442. status = __vxge_hw_vpath_initialize(hldev, vp_id);
  1443. if (status != VXGE_HW_OK)
  1444. goto exit;
  1445. __vxge_hw_vpath_prc_configure(hldev);
  1446. exit:
  1447. return status;
  1448. }
  1449. /*
  1450. * vxge_hw_vpath_enable - Enable vpath.
  1451. * This routine clears the vpath reset thereby enabling a vpath
  1452. * to start forwarding frames and generating interrupts.
  1453. */
  1454. void
  1455. vxge_hw_vpath_enable(struct __vxge_hw_virtualpath *vpath)
  1456. {
  1457. struct __vxge_hw_device *hldev;
  1458. u64 val64;
  1459. vxge_trace();
  1460. hldev = vpath->hldev;
  1461. val64 = VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(
  1462. 1 << (16 - vpath->vp_id));
  1463. __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
  1464. &hldev->common_reg->cmn_rsthdlr_cfg1);
  1465. }