You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

golan.c 76KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653
  1. /*
  2. * Copyright (C) 2013-2015 Mellanox Technologies Ltd.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License as
  6. * published by the Free Software Foundation; either version 2 of the
  7. * License, or any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  17. * 02110-1301, USA.
  18. */
  19. FILE_LICENCE ( GPL2_OR_LATER );
  20. #include <errno.h>
  21. #include <strings.h>
  22. #include <ipxe/malloc.h>
  23. #include <ipxe/umalloc.h>
  24. #include <ipxe/infiniband.h>
  25. #include <ipxe/ib_smc.h>
  26. #include <ipxe/iobuf.h>
  27. #include <ipxe/netdevice.h>
  28. #include "flexboot_nodnic.h"
  29. #include <ipxe/ethernet.h>
  30. #include <ipxe/if_ether.h>
  31. #include <usr/ifmgmt.h>
  32. #include <ipxe/in.h>
  33. #include <byteswap.h>
  34. #include "mlx_utils/include/public/mlx_pci_gw.h"
  35. #include <config/general.h>
  36. #include <ipxe/ipoib.h>
  37. #include "mlx_nodnic/include/mlx_port.h"
  38. #include "nodnic_shomron_prm.h"
  39. #include "golan.h"
  40. #include "mlx_utils/include/public/mlx_bail.h"
  41. #include "mlx_utils/mlx_lib/mlx_link_speed/mlx_link_speed.h"
  42. #define DEVICE_IS_CIB( device ) ( device == 0x1011 )
  43. /******************************************************************************/
  44. /************* Very simple memory management for umalloced pages **************/
  45. /******* Temporary solution until full memory management is implemented *******/
  46. /******************************************************************************/
  47. struct golan_page {
  48. struct list_head list;
  49. userptr_t addr;
  50. };
  51. static void golan_free_fw_areas ( struct golan *golan ) {
  52. int i;
  53. for (i = 0; i < GOLAN_FW_AREAS_NUM; i++) {
  54. if ( golan->fw_areas[i].area ) {
  55. ufree ( golan->fw_areas[i].area );
  56. golan->fw_areas[i].area = UNULL;
  57. }
  58. }
  59. }
  60. static int golan_init_fw_areas ( struct golan *golan ) {
  61. int rc = 0, i = 0;
  62. if ( ! golan ) {
  63. rc = -EINVAL;
  64. goto err_golan_init_fw_areas_bad_param;
  65. }
  66. for (i = 0; i < GOLAN_FW_AREAS_NUM; i++)
  67. golan->fw_areas[i].area = UNULL;
  68. return rc;
  69. err_golan_init_fw_areas_bad_param:
  70. return rc;
  71. }
  72. /******************************************************************************/
  73. const char *golan_qp_state_as_string[] = {
  74. "RESET",
  75. "INIT",
  76. "RTR",
  77. "RTS",
  78. "SQD",
  79. "SQE",
  80. "ERR"
  81. };
  82. static inline int golan_check_rc_and_cmd_status ( struct golan_cmd_layout *cmd, int rc ) {
  83. struct golan_outbox_hdr *out_hdr = ( struct golan_outbox_hdr * ) ( cmd->out );
  84. if ( rc == -EBUSY ) {
  85. DBG ( "HCA is busy (rc = -EBUSY)\n" );
  86. return rc;
  87. } else if ( out_hdr->status ) {
  88. DBG("%s status = 0x%x - syndrom = 0x%x\n", __FUNCTION__,
  89. out_hdr->status, be32_to_cpu(out_hdr->syndrome));
  90. return out_hdr->status;
  91. }
  92. return 0;
  93. }
  94. #define GOLAN_CHECK_RC_AND_CMD_STATUS(_lable) \
  95. do { \
  96. if ( ( rc = golan_check_rc_and_cmd_status ( cmd, rc ) ) ) \
  97. goto _lable; \
  98. } while (0)
  99. #define GOLAN_PRINT_RC_AND_CMD_STATUS golan_check_rc_and_cmd_status ( cmd, rc )
  100. struct mbox {
  101. union {
  102. struct golan_cmd_prot_block mblock;
  103. u8 data[MAILBOX_STRIDE];
  104. __be64 qdata[MAILBOX_STRIDE >> 3];
  105. };
  106. };
  107. static inline uint32_t ilog2(uint32_t mem)
  108. {
  109. return ( fls ( mem ) - 1 );
  110. }
  111. #define CTRL_SIG_SZ (sizeof(mailbox->mblock) - sizeof(mailbox->mblock.bdata) - 2)
  112. static inline u8 xor8_buf(void *buf, int len)
  113. {
  114. u8 sum = 0;
  115. int i;
  116. u8 *ptr = buf;
  117. for (i = 0; i < len; ++i)
  118. sum ^= ptr[i];
  119. return sum;
  120. }
  121. static inline const char *cmd_status_str(u8 status)
  122. {
  123. switch (status) {
  124. case 0x0: return "OK";
  125. case 0x1: return "internal error";
  126. case 0x2: return "bad operation";
  127. case 0x3: return "bad parameter";
  128. case 0x4: return "bad system state";
  129. case 0x5: return "bad resource";
  130. case 0x6: return "resource busy";
  131. case 0x8: return "limits exceeded";
  132. case 0x9: return "bad resource state";
  133. case 0xa: return "bad index";
  134. case 0xf: return "no resources";
  135. case 0x50: return "bad input length";
  136. case 0x51: return "bad output length";
  137. case 0x10: return "bad QP state";
  138. case 0x30: return "bad packet (discarded)";
  139. case 0x40: return "bad size too many outstanding CQEs";
  140. case 0xff: return "Command Timed Out";
  141. default: return "unknown status";
  142. }
  143. }
  144. static inline uint16_t fw_rev_maj(struct golan *golan)
  145. {
  146. return be32_to_cpu(readl(&golan->iseg->fw_rev)) & 0xffff;
  147. }
  148. static inline u16 fw_rev_min(struct golan *golan)
  149. {
  150. return be32_to_cpu(readl(&golan->iseg->fw_rev)) >> 16;
  151. }
  152. static inline u16 fw_rev_sub(struct golan *golan)
  153. {
  154. return be32_to_cpu(readl(&golan->iseg->cmdif_rev_fw_sub)) & 0xffff;
  155. }
  156. static inline u16 cmdif_rev(struct golan *golan)
  157. {
  158. return be32_to_cpu(readl(&golan->iseg->cmdif_rev_fw_sub)) >> 16;
  159. }
  160. static inline struct golan_cmd_layout *get_cmd( struct golan *golan, int idx )
  161. {
  162. return golan->cmd.addr + (idx << golan->cmd.log_stride);
  163. }
  164. static inline void golan_calc_sig(struct golan *golan, uint32_t cmd_idx,
  165. uint32_t inbox_idx, uint32_t outbox_idx)
  166. {
  167. struct golan_cmd_layout *cmd = get_cmd(golan, cmd_idx);
  168. struct mbox *mailbox = NULL;
  169. if (inbox_idx != NO_MBOX) {
  170. mailbox = GET_INBOX(golan, inbox_idx);
  171. mailbox->mblock.token = cmd->token;
  172. mailbox->mblock.ctrl_sig = ~xor8_buf(mailbox->mblock.rsvd0,
  173. CTRL_SIG_SZ);
  174. }
  175. if (outbox_idx != NO_MBOX) {
  176. mailbox = GET_OUTBOX(golan, outbox_idx);
  177. mailbox->mblock.token = cmd->token;
  178. mailbox->mblock.ctrl_sig = ~xor8_buf(mailbox->mblock.rsvd0,
  179. CTRL_SIG_SZ);
  180. }
  181. cmd->sig = ~xor8_buf(cmd, sizeof(*cmd));
  182. }
  183. static inline void show_out_status(uint32_t *out)
  184. {
  185. DBG("%x\n", be32_to_cpu(out[0]));
  186. DBG("%x\n", be32_to_cpu(out[1]));
  187. DBG("%x\n", be32_to_cpu(out[2]));
  188. DBG("%x\n", be32_to_cpu(out[3]));
  189. }
  190. /**
  191. * Check if CMD has finished.
  192. */
  193. static inline uint32_t is_command_finished( struct golan *golan, int idx)
  194. {
  195. wmb();
  196. return !(get_cmd( golan , idx )->status_own & CMD_OWNER_HW);
  197. }
  198. /**
  199. * Wait for Golan command completion
  200. *
  201. * @v golan Golan device
  202. * @ret rc Return status code
  203. */
  204. static inline int golan_cmd_wait(struct golan *golan, int idx, const char *command)
  205. {
  206. unsigned int wait;
  207. int rc = -EBUSY;
  208. for ( wait = GOLAN_HCR_MAX_WAIT_MS ; wait ; --wait ) {
  209. if (is_command_finished(golan, idx)) {
  210. rc = CMD_STATUS(golan, idx);
  211. rmb();
  212. break;
  213. } else {
  214. mdelay ( 1 );
  215. }
  216. }
  217. if (rc) {
  218. DBGC (golan ,"[%s]RC is %s[%x]\n", command, cmd_status_str(rc), rc);
  219. }
  220. golan->cmd_bm &= ~(1 << idx);
  221. return rc;
  222. }
  223. /**
  224. * Notify the HW that commands are ready
  225. */
  226. static inline void send_command(struct golan *golan)
  227. {
  228. wmb(); //Make sure the command is visible in "memory".
  229. writel(cpu_to_be32(golan->cmd_bm) , &golan->iseg->cmd_dbell);
  230. }
  231. static inline int send_command_and_wait(struct golan *golan, uint32_t cmd_idx,
  232. uint32_t inbox_idx, uint32_t outbox_idx, const char *command)
  233. {
  234. golan_calc_sig(golan, cmd_idx, inbox_idx, outbox_idx);
  235. send_command(golan);
  236. return golan_cmd_wait(golan, cmd_idx, command);
  237. }
  238. /**
  239. * Prepare a FW command,
  240. * In - comamnd idx (Must be valid)
  241. * writes the command parameters.
  242. */
  243. static inline struct golan_cmd_layout *write_cmd(struct golan *golan, int idx,
  244. uint16_t opcode, uint16_t opmod,
  245. uint16_t inbox_idx,
  246. uint16_t outbox_idx, uint16_t inlen,
  247. uint16_t outlen)
  248. {
  249. struct golan_cmd_layout *cmd = get_cmd(golan , idx);
  250. struct golan_inbox_hdr *hdr = (struct golan_inbox_hdr *)cmd->in;
  251. static uint8_t token;
  252. memset(cmd, 0, sizeof(*cmd));
  253. cmd->type = GOLAN_PCI_CMD_XPORT;
  254. cmd->status_own = CMD_OWNER_HW;
  255. cmd->outlen = cpu_to_be32(outlen);
  256. cmd->inlen = cpu_to_be32(inlen);
  257. hdr->opcode = cpu_to_be16(opcode);
  258. hdr->opmod = cpu_to_be16(opmod);
  259. if (inbox_idx != NO_MBOX) {
  260. memset(GET_INBOX(golan, inbox_idx), 0, MAILBOX_SIZE);
  261. cmd->in_ptr = VIRT_2_BE64_BUS(GET_INBOX(golan, inbox_idx));
  262. cmd->token = ++token;
  263. }
  264. if (outbox_idx != NO_MBOX) {
  265. memset(GET_OUTBOX(golan, outbox_idx), 0, MAILBOX_SIZE);
  266. cmd->out_ptr = VIRT_2_BE64_BUS(GET_OUTBOX(golan, outbox_idx));
  267. }
  268. golan->cmd_bm |= 1 << idx;
  269. assert ( cmd != NULL );
  270. return cmd;
  271. }
  272. static inline int golan_core_enable_hca(struct golan *golan)
  273. {
  274. struct golan_cmd_layout *cmd;
  275. int rc = 0;
  276. DBGC(golan, "%s\n", __FUNCTION__);
  277. cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_ENABLE_HCA, 0x0,
  278. NO_MBOX, NO_MBOX,
  279. sizeof(struct golan_enable_hca_mbox_in),
  280. sizeof(struct golan_enable_hca_mbox_out));
  281. rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
  282. GOLAN_PRINT_RC_AND_CMD_STATUS;
  283. return rc;
  284. }
  285. static inline void golan_disable_hca(struct golan *golan)
  286. {
  287. struct golan_cmd_layout *cmd;
  288. int rc;
  289. cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DISABLE_HCA, 0x0,
  290. NO_MBOX, NO_MBOX,
  291. sizeof(struct golan_disable_hca_mbox_in),
  292. sizeof(struct golan_disable_hca_mbox_out));
  293. rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
  294. GOLAN_PRINT_RC_AND_CMD_STATUS;
  295. }
  296. static inline int golan_set_hca_cap(struct golan *golan)
  297. {
  298. struct golan_cmd_layout *cmd;
  299. int rc;
  300. DBGC(golan, "%s\n", __FUNCTION__);
  301. cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_SET_HCA_CAP, 0x0,
  302. GEN_MBOX, NO_MBOX,
  303. sizeof(struct golan_cmd_set_hca_cap_mbox_in),
  304. sizeof(struct golan_cmd_set_hca_cap_mbox_out));
  305. golan->caps.flags &= ~GOLAN_DEV_CAP_FLAG_CMDIF_CSUM;
  306. DBGC( golan , "%s caps.uar_sz = %d\n", __FUNCTION__, golan->caps.uar_sz);
  307. DBGC( golan , "%s caps.log_pg_sz = %d\n", __FUNCTION__, golan->caps.log_pg_sz);
  308. DBGC( golan , "%s caps.log_uar_sz = %d\n", __FUNCTION__, be32_to_cpu(golan->caps.uar_page_sz));
  309. golan->caps.uar_page_sz = 0;
  310. golan->caps.log_max_qp = GOLAN_LOG_MAX_QP;
  311. memcpy(((struct golan_hca_cap *)GET_INBOX(golan, GEN_MBOX)),
  312. &(golan->caps),
  313. sizeof(struct golan_hca_cap));
  314. //if command failed we should reset the caps in golan->caps
  315. rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__);
  316. GOLAN_PRINT_RC_AND_CMD_STATUS;
  317. return rc;
  318. }
  319. static inline int golan_qry_hca_cap(struct golan *golan)
  320. {
  321. struct golan_cmd_layout *cmd;
  322. int rc = 0;
  323. cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_QUERY_HCA_CAP, 0x1,
  324. NO_MBOX, GEN_MBOX,
  325. sizeof(struct golan_cmd_query_hca_cap_mbox_in),
  326. sizeof(struct golan_cmd_query_hca_cap_mbox_out));
  327. rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, GEN_MBOX, __FUNCTION__);
  328. GOLAN_CHECK_RC_AND_CMD_STATUS( err_query_hca_cap );
  329. memcpy(&(golan->caps),
  330. ((struct golan_hca_cap *)GET_OUTBOX(golan, GEN_MBOX)),
  331. sizeof(struct golan_hca_cap));
  332. err_query_hca_cap:
  333. return rc;
  334. }
  335. static inline int golan_take_pages ( struct golan *golan, uint32_t pages, __be16 func_id ) {
  336. uint32_t out_num_entries = 0;
  337. int size_ibox = 0;
  338. int size_obox = 0;
  339. int rc = 0;
  340. DBGC(golan, "%s\n", __FUNCTION__);
  341. while ( pages > 0 ) {
  342. uint32_t pas_num = min(pages, MAX_PASE_MBOX);
  343. struct golan_cmd_layout *cmd;
  344. struct golan_manage_pages_inbox *in;
  345. size_ibox = sizeof(struct golan_manage_pages_inbox) + (pas_num * GOLAN_PAS_SIZE);
  346. size_obox = sizeof(struct golan_manage_pages_outbox) + (pas_num * GOLAN_PAS_SIZE);
  347. cmd = write_cmd(golan, MEM_CMD_IDX, GOLAN_CMD_OP_MANAGE_PAGES, GOLAN_PAGES_TAKE,
  348. MEM_MBOX, MEM_MBOX,
  349. size_ibox,
  350. size_obox);
  351. in = (struct golan_manage_pages_inbox *)cmd->in; /* Warning (WE CANT USE THE LAST 2 FIELDS) */
  352. in->func_id = func_id; /* Already BE */
  353. in->num_entries = cpu_to_be32(pas_num);
  354. if ( ( rc = send_command_and_wait(golan, MEM_CMD_IDX, MEM_MBOX, MEM_MBOX, __FUNCTION__) ) == 0 ) {
  355. out_num_entries = be32_to_cpu(((struct golan_manage_pages_outbox *)(cmd->out))->num_entries);
  356. } else {
  357. if ( rc == -EBUSY ) {
  358. DBGC (golan ,"HCA is busy (rc = -EBUSY)\n" );
  359. } else {
  360. DBGC (golan ,"%s: rc =0x%x[%s]<%x> syn 0x%x[0x%x] for %d pages\n",
  361. __FUNCTION__, rc, cmd_status_str(rc),
  362. CMD_SYND(golan, MEM_CMD_IDX),
  363. get_cmd( golan , MEM_CMD_IDX )->status_own,
  364. be32_to_cpu(CMD_SYND(golan, MEM_CMD_IDX)), pas_num);
  365. }
  366. return rc;
  367. }
  368. pages -= out_num_entries;
  369. }
  370. DBGC( golan , "%s Pages handled\n", __FUNCTION__);
  371. return rc;
  372. }
  373. static inline int golan_provide_pages ( struct golan *golan , uint32_t pages
  374. , __be16 func_id,struct golan_firmware_area *fw_area) {
  375. struct mbox *mailbox;
  376. int size_ibox = 0;
  377. int size_obox = 0;
  378. int rc = 0;
  379. userptr_t next_page_addr = UNULL;
  380. DBGC(golan, "%s\n", __FUNCTION__);
  381. if ( ! fw_area->area ) {
  382. fw_area->area = umalloc ( GOLAN_PAGE_SIZE * pages );
  383. if ( fw_area->area == UNULL ) {
  384. rc = -ENOMEM;
  385. DBGC (golan ,"Failed to allocated %d pages \n",pages);
  386. goto err_golan_alloc_fw_area;
  387. }
  388. fw_area->npages = pages;
  389. }
  390. assert ( fw_area->npages == pages );
  391. next_page_addr = fw_area->area;
  392. while ( pages > 0 ) {
  393. uint32_t pas_num = min(pages, MAX_PASE_MBOX);
  394. unsigned i, j;
  395. struct golan_cmd_layout *cmd;
  396. struct golan_manage_pages_inbox *in;
  397. userptr_t addr = 0;
  398. mailbox = GET_INBOX(golan, MEM_MBOX);
  399. size_ibox = sizeof(struct golan_manage_pages_inbox) + (pas_num * GOLAN_PAS_SIZE);
  400. size_obox = sizeof(struct golan_manage_pages_outbox) + (pas_num * GOLAN_PAS_SIZE);
  401. cmd = write_cmd(golan, MEM_CMD_IDX, GOLAN_CMD_OP_MANAGE_PAGES, GOLAN_PAGES_GIVE,
  402. MEM_MBOX, MEM_MBOX,
  403. size_ibox,
  404. size_obox);
  405. in = (struct golan_manage_pages_inbox *)cmd->in; /* Warning (WE CANT USE THE LAST 2 FIELDS) */
  406. in->func_id = func_id; /* Already BE */
  407. in->num_entries = cpu_to_be32(pas_num);
  408. for ( i = 0 , j = MANAGE_PAGES_PSA_OFFSET; i < pas_num; ++i ,++j,
  409. next_page_addr += GOLAN_PAGE_SIZE ) {
  410. addr = next_page_addr;
  411. if (GOLAN_PAGE_MASK & user_to_phys(addr, 0)) {
  412. DBGC (golan ,"Addr not Page alligned [%lx %lx]\n", user_to_phys(addr, 0), addr);
  413. }
  414. mailbox->mblock.data[j] = USR_2_BE64_BUS(addr);
  415. }
  416. if ( ( rc = send_command_and_wait(golan, MEM_CMD_IDX, MEM_MBOX, MEM_MBOX, __FUNCTION__) ) == 0 ) {
  417. pages -= pas_num;
  418. golan->total_dma_pages += pas_num;
  419. } else {
  420. if ( rc == -EBUSY ) {
  421. DBGC (golan ,"HCA is busy (rc = -EBUSY)\n" );
  422. } else {
  423. DBGC (golan ,"%s: rc =0x%x[%s]<%x> syn 0x%x[0x%x] for %d pages\n",
  424. __FUNCTION__, rc, cmd_status_str(rc),
  425. CMD_SYND(golan, MEM_CMD_IDX),
  426. get_cmd( golan , MEM_CMD_IDX )->status_own,
  427. be32_to_cpu(CMD_SYND(golan, MEM_CMD_IDX)), pas_num);
  428. }
  429. goto err_send_command;
  430. }
  431. }
  432. DBGC( golan , "%s Pages handled\n", __FUNCTION__);
  433. return 0;
  434. err_send_command:
  435. err_golan_alloc_fw_area:
  436. /* Go over In box and free pages */
  437. /* Send Error to FW */
  438. /* What is next - Disable HCA? */
  439. DBGC (golan ,"%s Failed (rc = 0x%x)\n", __FUNCTION__, rc);
  440. return rc;
  441. }
  442. static inline int golan_handle_pages(struct golan *golan,
  443. enum golan_qry_pages_mode qry,
  444. enum golan_manage_pages_mode mode)
  445. {
  446. struct golan_cmd_layout *cmd;
  447. int rc = 0;
  448. int32_t pages;
  449. uint16_t total_pages;
  450. __be16 func_id;
  451. DBGC(golan, "%s\n", __FUNCTION__);
  452. cmd = write_cmd(golan, MEM_CMD_IDX, GOLAN_CMD_OP_QUERY_PAGES, qry,
  453. NO_MBOX, NO_MBOX,
  454. sizeof(struct golan_query_pages_inbox),
  455. sizeof(struct golan_query_pages_outbox));
  456. rc = send_command_and_wait(golan, MEM_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
  457. GOLAN_CHECK_RC_AND_CMD_STATUS( err_handle_pages_query );
  458. pages = be32_to_cpu(QRY_PAGES_OUT(golan, MEM_CMD_IDX)->num_pages);
  459. DBGC( golan , "%s pages needed: %d\n", __FUNCTION__, pages);
  460. func_id = QRY_PAGES_OUT(golan, MEM_CMD_IDX)->func_id;
  461. total_pages = (( pages >= 0 ) ? pages : ( pages * ( -1 ) ));
  462. if ( mode == GOLAN_PAGES_GIVE ) {
  463. rc = golan_provide_pages(golan, total_pages, func_id, & ( golan->fw_areas[qry-1] ));
  464. } else {
  465. rc = golan_take_pages(golan, golan->total_dma_pages, func_id);
  466. golan->total_dma_pages = 0;
  467. }
  468. if ( rc ) {
  469. DBGC (golan , "Failed to %s pages (rc = %d) - DMA pages allocated = %d\n",
  470. ( ( mode == GOLAN_PAGES_GIVE ) ? "give" : "take" ), rc , golan->total_dma_pages );
  471. return rc;
  472. }
  473. return 0;
  474. err_handle_pages_query:
  475. DBGC (golan ,"%s Qyery pages failed (rc = 0x%x)\n", __FUNCTION__, rc);
  476. return rc;
  477. }
  478. static inline int golan_set_access_reg ( struct golan *golan __attribute__ (( unused )), uint32_t reg __attribute__ (( unused )))
  479. {
  480. #if 0
  481. write_cmd(golan, _CMD_IDX, GOLAN_CMD_OP_QUERY_PAGES, 0x0,
  482. NO_MBOX, NO_MBOX,
  483. sizeof(struct golan_reg_host_endianess),
  484. sizeof(struct golan_reg_host_endianess));
  485. in->arg = cpu_to_be32(arg);
  486. in->register_id = cpu_to_be16(reg_num);
  487. #endif
  488. DBGC (golan ," %s Not implemented yet\n", __FUNCTION__);
  489. return 0;
  490. }
  491. static inline void golan_cmd_uninit ( struct golan *golan )
  492. {
  493. free_dma(golan->mboxes.outbox, GOLAN_PAGE_SIZE);
  494. free_dma(golan->mboxes.inbox, GOLAN_PAGE_SIZE);
  495. free_dma(golan->cmd.addr, GOLAN_PAGE_SIZE);
  496. }
  497. /**
  498. * Initialise Golan Command Q parameters
  499. * -- Alocate a 4kb page for the Command Q
  500. * -- Read the stride and log num commands available
  501. * -- Write the address to cmdq_phy_addr in iseg
  502. * @v golan Golan device
  503. */
  504. static inline int golan_cmd_init ( struct golan *golan )
  505. {
  506. int rc = 0;
  507. uint32_t addr_l_sz;
  508. if (!(golan->cmd.addr = malloc_dma(GOLAN_PAGE_SIZE , GOLAN_PAGE_SIZE))) {
  509. rc = -ENOMEM;
  510. goto malloc_dma_failed;
  511. }
  512. if (!(golan->mboxes.inbox = malloc_dma(GOLAN_PAGE_SIZE , GOLAN_PAGE_SIZE))) {
  513. rc = -ENOMEM;
  514. goto malloc_dma_inbox_failed;
  515. }
  516. if (!(golan->mboxes.outbox = malloc_dma(GOLAN_PAGE_SIZE , GOLAN_PAGE_SIZE))) {
  517. rc = -ENOMEM;
  518. goto malloc_dma_outbox_failed;
  519. }
  520. addr_l_sz = be32_to_cpu(readl(&golan->iseg->cmdq_addr_l_sz));
  521. golan->cmd.log_stride = addr_l_sz & 0xf;
  522. golan->cmd.size = 1 << (( addr_l_sz >> 4 ) & 0xf);
  523. addr_l_sz = virt_to_bus(golan->cmd.addr);
  524. writel(0 /* cpu_to_be32(golan->cmd.addr) >> 32 */, &golan->iseg->cmdq_addr_h);
  525. writel(cpu_to_be32(addr_l_sz), &golan->iseg->cmdq_addr_l_sz);
  526. wmb(); //Make sure the addr is visible in "memory".
  527. addr_l_sz = be32_to_cpu(readl(&golan->iseg->cmdq_addr_l_sz));
  528. DBGC( golan , "%s Command interface was initialized\n", __FUNCTION__);
  529. return 0;
  530. malloc_dma_outbox_failed:
  531. free_dma(golan->mboxes.inbox, GOLAN_PAGE_SIZE);
  532. malloc_dma_inbox_failed:
  533. free_dma(golan->cmd.addr, GOLAN_PAGE_SIZE);
  534. malloc_dma_failed:
  535. DBGC (golan ,"%s Failed to initialize command interface (rc = 0x%x)\n",
  536. __FUNCTION__, rc);
  537. return rc;
  538. }
  539. static inline int golan_hca_init(struct golan *golan)
  540. {
  541. struct golan_cmd_layout *cmd;
  542. int rc = 0;
  543. DBGC(golan, "%s\n", __FUNCTION__);
  544. cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_INIT_HCA, 0x0,
  545. NO_MBOX, NO_MBOX,
  546. sizeof(struct golan_cmd_init_hca_mbox_in),
  547. sizeof(struct golan_cmd_init_hca_mbox_out));
  548. rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
  549. GOLAN_PRINT_RC_AND_CMD_STATUS;
  550. return rc;
  551. }
  552. static inline void golan_teardown_hca(struct golan *golan, enum golan_teardown op_mod)
  553. {
  554. struct golan_cmd_layout *cmd;
  555. int rc;
  556. DBGC (golan, "%s in\n", __FUNCTION__);
  557. cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_TEARDOWN_HCA, op_mod,
  558. NO_MBOX, NO_MBOX,
  559. sizeof(struct golan_cmd_teardown_hca_mbox_in),
  560. sizeof(struct golan_cmd_teardown_hca_mbox_out));
  561. rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
  562. GOLAN_PRINT_RC_AND_CMD_STATUS;
  563. DBGC (golan, "%s HCA teardown compleated\n", __FUNCTION__);
  564. }
  565. static inline int golan_alloc_uar(struct golan *golan)
  566. {
  567. struct golan_uar *uar = &golan->uar;
  568. struct golan_cmd_layout *cmd;
  569. struct golan_alloc_uar_mbox_out *out;
  570. int rc;
  571. cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_ALLOC_UAR, 0x0,
  572. NO_MBOX, NO_MBOX,
  573. sizeof(struct golan_alloc_uar_mbox_in),
  574. sizeof(struct golan_alloc_uar_mbox_out));
  575. rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
  576. GOLAN_CHECK_RC_AND_CMD_STATUS( err_alloc_uar_cmd );
  577. out = (struct golan_alloc_uar_mbox_out *) ( cmd->out );
  578. uar->index = be32_to_cpu(out->uarn) & 0xffffff;
  579. uar->phys = (pci_bar_start(golan->pci, GOLAN_HCA_BAR) + (uar->index << GOLAN_PAGE_SHIFT));
  580. uar->virt = (void *)(ioremap(uar->phys, GOLAN_PAGE_SIZE));
  581. DBGC( golan , "%s: UAR allocated with index 0x%x\n", __FUNCTION__, uar->index);
  582. return 0;
  583. err_alloc_uar_cmd:
  584. DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
  585. return rc;
  586. }
  587. static void golan_dealloc_uar(struct golan *golan)
  588. {
  589. struct golan_cmd_layout *cmd;
  590. uint32_t uar_index = golan->uar.index;
  591. int rc;
  592. DBGC (golan, "%s in\n", __FUNCTION__);
  593. cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DEALLOC_UAR, 0x0,
  594. NO_MBOX, NO_MBOX,
  595. sizeof(struct golan_free_uar_mbox_in),
  596. sizeof(struct golan_free_uar_mbox_out));
  597. ((struct golan_free_uar_mbox_in *)(cmd->in))->uarn = cpu_to_be32(uar_index);
  598. rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
  599. GOLAN_PRINT_RC_AND_CMD_STATUS;
  600. golan->uar.index = 0;
  601. DBGC (golan, "%s UAR (0x%x) was destroyed\n", __FUNCTION__, uar_index);
  602. }
  603. static void golan_eq_update_ci(struct golan_event_queue *eq, int arm)
  604. {
  605. __be32 *addr = eq->doorbell + (arm ? 0 : 2);
  606. u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
  607. writel(cpu_to_be32(val) , addr);
  608. /* We still want ordering, just not swabbing, so add a barrier */
  609. wmb();
  610. }
  611. static int golan_create_eq(struct golan *golan)
  612. {
  613. struct golan_event_queue *eq = &golan->eq;
  614. struct golan_create_eq_mbox_in_data *in;
  615. struct golan_cmd_layout *cmd;
  616. struct golan_create_eq_mbox_out *out;
  617. int rc, i;
  618. eq->cons_index = 0;
  619. eq->size = GOLAN_NUM_EQES * sizeof(eq->eqes[0]);
  620. eq->eqes = malloc_dma ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE );
  621. if (!eq->eqes) {
  622. rc = -ENOMEM;
  623. goto err_create_eq_eqe_alloc;
  624. }
  625. /* Set EQEs ownership bit to HW ownership */
  626. for (i = 0; i < GOLAN_NUM_EQES; ++i) {
  627. eq->eqes[i].owner = GOLAN_EQE_HW_OWNERSHIP;
  628. }
  629. cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_CREATE_EQ, 0x0,
  630. GEN_MBOX, NO_MBOX,
  631. sizeof(struct golan_create_eq_mbox_in) + GOLAN_PAS_SIZE,
  632. sizeof(struct golan_create_eq_mbox_out));
  633. in = (struct golan_create_eq_mbox_in_data *)GET_INBOX(golan, GEN_MBOX);
  634. /* Fill the physical address of the page */
  635. in->pas[0] = VIRT_2_BE64_BUS( eq->eqes );
  636. in->ctx.log_sz_usr_page = cpu_to_be32((ilog2(GOLAN_NUM_EQES)) << 24 | golan->uar.index);
  637. DBGC( golan , "UAR idx %x (BE %x)\n", golan->uar.index, in->ctx.log_sz_usr_page);
  638. in->events_mask = cpu_to_be64(1 << GOLAN_EVENT_TYPE_PORT_CHANGE);
  639. rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__);
  640. GOLAN_CHECK_RC_AND_CMD_STATUS( err_create_eq_cmd );
  641. out = (struct golan_create_eq_mbox_out *)cmd->out;
  642. eq->eqn = out->eq_number;
  643. eq->doorbell = ((void *)golan->uar.virt) + GOLAN_EQ_DOORBELL_OFFSET;
  644. /* EQs are created in ARMED state */
  645. golan_eq_update_ci(eq, GOLAN_EQ_UNARMED);
  646. DBGC( golan , "%s: Event queue created (EQN = 0x%x)\n", __FUNCTION__, eq->eqn);
  647. return 0;
  648. err_create_eq_cmd:
  649. free_dma ( eq->eqes , GOLAN_PAGE_SIZE );
  650. err_create_eq_eqe_alloc:
  651. DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
  652. return rc;
  653. }
  654. static void golan_destory_eq(struct golan *golan)
  655. {
  656. struct golan_cmd_layout *cmd;
  657. struct golan_destroy_eq_mbox_in *in;
  658. uint8_t eqn = golan->eq.eqn;
  659. int rc;
  660. DBGC (golan, "%s in\n", __FUNCTION__);
  661. cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DESTROY_EQ, 0x0,
  662. NO_MBOX, NO_MBOX,
  663. sizeof(struct golan_destroy_eq_mbox_in),
  664. sizeof(struct golan_destroy_eq_mbox_out));
  665. in = GOLAN_MBOX_IN ( cmd, in );
  666. in->eqn = eqn;
  667. rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
  668. GOLAN_PRINT_RC_AND_CMD_STATUS;
  669. free_dma ( golan->eq.eqes , GOLAN_PAGE_SIZE );
  670. golan->eq.eqn = 0;
  671. DBGC( golan, "%s Event queue (0x%x) was destroyed\n", __FUNCTION__, eqn);
  672. }
  673. static int golan_alloc_pd(struct golan *golan)
  674. {
  675. struct golan_cmd_layout *cmd;
  676. struct golan_alloc_pd_mbox_out *out;
  677. int rc;
  678. cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_ALLOC_PD, 0x0,
  679. NO_MBOX, NO_MBOX,
  680. sizeof(struct golan_alloc_pd_mbox_in),
  681. sizeof(struct golan_alloc_pd_mbox_out));
  682. rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
  683. GOLAN_CHECK_RC_AND_CMD_STATUS( err_alloc_pd_cmd );
  684. out = (struct golan_alloc_pd_mbox_out *) ( cmd->out );
  685. golan->pdn = (be32_to_cpu(out->pdn) & 0xffffff);
  686. DBGC( golan , "%s: Protection domain created (PDN = 0x%x)\n", __FUNCTION__,
  687. golan->pdn);
  688. return 0;
  689. err_alloc_pd_cmd:
  690. DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
  691. return rc;
  692. }
  693. static void golan_dealloc_pd(struct golan *golan)
  694. {
  695. struct golan_cmd_layout *cmd;
  696. uint32_t pdn = golan->pdn;
  697. int rc;
  698. DBGC (golan,"%s in\n", __FUNCTION__);
  699. cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DEALLOC_PD, 0x0,
  700. NO_MBOX, NO_MBOX,
  701. sizeof(struct golan_alloc_pd_mbox_in),
  702. sizeof(struct golan_alloc_pd_mbox_out));
  703. ((struct golan_dealloc_pd_mbox_in *)(cmd->in))->pdn = cpu_to_be32(pdn);
  704. rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
  705. GOLAN_PRINT_RC_AND_CMD_STATUS;
  706. golan->pdn = 0;
  707. DBGC (golan ,"%s Protection domain (0x%x) was destroyed\n", __FUNCTION__, pdn);
  708. }
  709. static int golan_create_mkey(struct golan *golan)
  710. {
  711. struct golan_create_mkey_mbox_in_data *in;
  712. struct golan_cmd_layout *cmd;
  713. struct golan_create_mkey_mbox_out *out;
  714. int rc;
  715. cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_CREATE_MKEY, 0x0,
  716. GEN_MBOX, NO_MBOX,
  717. sizeof(struct golan_create_mkey_mbox_in),
  718. sizeof(struct golan_create_mkey_mbox_out));
  719. in = (struct golan_create_mkey_mbox_in_data *)GET_INBOX(golan, GEN_MBOX);
  720. in->seg.flags = GOLAN_IB_ACCESS_LOCAL_WRITE | GOLAN_IB_ACCESS_LOCAL_READ;
  721. in->seg.flags_pd = cpu_to_be32(golan->pdn | GOLAN_MKEY_LEN64);
  722. in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << GOLAN_CREATE_MKEY_SEG_QPN_BIT);
  723. rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__);
  724. GOLAN_CHECK_RC_AND_CMD_STATUS( err_create_mkey_cmd );
  725. out = (struct golan_create_mkey_mbox_out *) ( cmd->out );
  726. golan->mkey = ((be32_to_cpu(out->mkey) & 0xffffff) << 8);
  727. DBGC( golan , "%s: Got DMA Key for local access read/write (MKEY = 0x%x)\n",
  728. __FUNCTION__, golan->mkey);
  729. return 0;
  730. err_create_mkey_cmd:
  731. DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
  732. return rc;
  733. }
  734. static void golan_destroy_mkey(struct golan *golan)
  735. {
  736. struct golan_cmd_layout *cmd;
  737. u32 mkey = golan->mkey;
  738. int rc;
  739. cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DESTROY_MKEY, 0x0,
  740. NO_MBOX, NO_MBOX,
  741. sizeof(struct golan_destroy_mkey_mbox_in),
  742. sizeof(struct golan_destroy_mkey_mbox_out));
  743. ((struct golan_destroy_mkey_mbox_in *)(cmd->in))->mkey = cpu_to_be32(mkey >> 8);
  744. rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
  745. GOLAN_PRINT_RC_AND_CMD_STATUS;
  746. golan->mkey = 0;
  747. DBGC( golan , "%s DMA Key (0x%x) for local access write was destroyed\n"
  748. , __FUNCTION__, mkey);
  749. }
  750. /**
  751. * Initialise Golan PCI parameters
  752. *
  753. * @v golan Golan device
  754. */
  755. static inline void golan_pci_init(struct golan *golan)
  756. {
  757. struct pci_device *pci = golan->pci;
  758. /* Fix up PCI device */
  759. adjust_pci_device ( pci );
  760. /* Get HCA BAR */
  761. golan->iseg = ioremap ( pci_bar_start ( pci, GOLAN_HCA_BAR),
  762. GOLAN_PCI_CONFIG_BAR_SIZE );
  763. }
  764. static inline struct golan *golan_alloc()
  765. {
  766. void *golan = zalloc(sizeof(struct golan));
  767. if ( !golan )
  768. goto err_zalloc;
  769. return golan;
  770. err_zalloc:
  771. return NULL;
  772. }
  773. /**
  774. * Create completion queue
  775. *
  776. * @v ibdev Infiniband device
  777. * @v cq Completion queue
  778. * @ret rc Return status code
  779. */
  780. static int golan_create_cq(struct ib_device *ibdev,
  781. struct ib_completion_queue *cq)
  782. {
  783. struct golan *golan = ib_get_drvdata(ibdev);
  784. struct golan_completion_queue *golan_cq;
  785. struct golan_cmd_layout *cmd;
  786. struct golan_create_cq_mbox_in_data *in;
  787. struct golan_create_cq_mbox_out *out;
  788. int rc;
  789. unsigned int i;
  790. golan_cq = zalloc(sizeof(*golan_cq));
  791. if (!golan_cq) {
  792. rc = -ENOMEM;
  793. goto err_create_cq;
  794. }
  795. golan_cq->size = sizeof(golan_cq->cqes[0]) * cq->num_cqes;
  796. golan_cq->doorbell_record = malloc_dma(GOLAN_CQ_DB_RECORD_SIZE,
  797. GOLAN_CQ_DB_RECORD_SIZE);
  798. if (!golan_cq->doorbell_record) {
  799. rc = -ENOMEM;
  800. goto err_create_cq_db_alloc;
  801. }
  802. golan_cq->cqes = malloc_dma ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE );
  803. if (!golan_cq->cqes) {
  804. rc = -ENOMEM;
  805. goto err_create_cq_cqe_alloc;
  806. }
  807. /* Set CQEs ownership bit to HW ownership */
  808. for (i = 0; i < cq->num_cqes; ++i) {
  809. golan_cq->cqes[i].op_own = ((GOLAN_CQE_OPCODE_NOT_VALID <<
  810. GOLAN_CQE_OPCODE_BIT) |
  811. GOLAN_CQE_HW_OWNERSHIP);
  812. }
  813. cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_CREATE_CQ, 0x0,
  814. GEN_MBOX, NO_MBOX,
  815. sizeof(struct golan_create_cq_mbox_in) + GOLAN_PAS_SIZE,
  816. sizeof(struct golan_create_cq_mbox_out));
  817. in = (struct golan_create_cq_mbox_in_data *)GET_INBOX(golan, GEN_MBOX);
  818. /* Fill the physical address of the page */
  819. in->pas[0] = VIRT_2_BE64_BUS( golan_cq->cqes );
  820. in->ctx.cqe_sz_flags = GOLAN_CQE_SIZE_64 << 5;
  821. in->ctx.log_sz_usr_page = cpu_to_be32(((ilog2(cq->num_cqes)) << 24) | golan->uar.index);
  822. in->ctx.c_eqn = cpu_to_be16(golan->eq.eqn);
  823. in->ctx.db_record_addr = VIRT_2_BE64_BUS(golan_cq->doorbell_record);
  824. rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__);
  825. GOLAN_CHECK_RC_AND_CMD_STATUS( err_create_cq_cmd );
  826. out = (struct golan_create_cq_mbox_out *) ( cmd->out );
  827. cq->cqn = (be32_to_cpu(out->cqn) & 0xffffff);
  828. ib_cq_set_drvdata(cq, golan_cq);
  829. DBGC( golan , "%s CQ created successfully (CQN = 0x%lx)\n", __FUNCTION__, cq->cqn);
  830. return 0;
  831. err_create_cq_cmd:
  832. free_dma( golan_cq->cqes , GOLAN_PAGE_SIZE );
  833. err_create_cq_cqe_alloc:
  834. free_dma(golan_cq->doorbell_record, GOLAN_CQ_DB_RECORD_SIZE);
  835. err_create_cq_db_alloc:
  836. free ( golan_cq );
  837. err_create_cq:
  838. DBGC (golan ,"%s out rc = 0x%x\n", __FUNCTION__, rc);
  839. return rc;
  840. }
  841. /**
  842. * Destroy completion queue
  843. *
  844. * @v ibdev Infiniband device
  845. * @v cq Completion queue
  846. */
  847. static void golan_destroy_cq(struct ib_device *ibdev,
  848. struct ib_completion_queue *cq)
  849. {
  850. struct golan *golan = ib_get_drvdata(ibdev);
  851. struct golan_completion_queue *golan_cq = ib_cq_get_drvdata(cq);
  852. struct golan_cmd_layout *cmd;
  853. uint32_t cqn = cq->cqn;
  854. int rc;
  855. DBGC (golan, "%s in\n", __FUNCTION__);
  856. cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DESTROY_CQ, 0x0,
  857. NO_MBOX, NO_MBOX,
  858. sizeof(struct golan_destroy_cq_mbox_in),
  859. sizeof(struct golan_destroy_cq_mbox_out));
  860. ((struct golan_destroy_cq_mbox_in *)(cmd->in))->cqn = cpu_to_be32(cqn);
  861. rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
  862. GOLAN_PRINT_RC_AND_CMD_STATUS;
  863. cq->cqn = 0;
  864. ib_cq_set_drvdata(cq, NULL);
  865. free_dma ( golan_cq->cqes , GOLAN_PAGE_SIZE );
  866. free_dma(golan_cq->doorbell_record, GOLAN_CQ_DB_RECORD_SIZE);
  867. free(golan_cq);
  868. DBGC (golan, "%s CQ number 0x%x was destroyed\n", __FUNCTION__, cqn);
  869. }
  870. static void golan_cq_clean(struct ib_completion_queue *cq)
  871. {
  872. ib_poll_cq(cq->ibdev, cq);
  873. }
  874. static int golan_qp_type_to_st(enum ib_queue_pair_type type)
  875. {
  876. int qpt = type;
  877. switch (qpt) {
  878. case IB_QPT_RC:
  879. return GOLAN_QP_ST_RC;
  880. case IB_QPT_UD:
  881. return GOLAN_QP_ST_UD;
  882. case IB_QPT_SMI:
  883. return GOLAN_QP_ST_QP0;
  884. case IB_QPT_GSI:
  885. return GOLAN_QP_ST_QP1;
  886. case IB_QPT_ETH:
  887. default:
  888. return -EINVAL;
  889. }
  890. }
  891. #if 0
  892. static int golan_is_special_qp(enum ib_queue_pair_type type)
  893. {
  894. return (type == IB_QPT_GSI || type == IB_QPT_SMI);
  895. }
  896. #endif
  897. static int golan_create_qp_aux(struct ib_device *ibdev,
  898. struct ib_queue_pair *qp,
  899. int *qpn)
  900. {
  901. struct golan *golan = ib_get_drvdata(ibdev);
  902. struct golan_queue_pair *golan_qp;
  903. struct golan_create_qp_mbox_in_data *in;
  904. struct golan_cmd_layout *cmd;
  905. struct golan_wqe_data_seg *data;
  906. struct golan_create_qp_mbox_out *out;
  907. uint32_t wqe_size_in_bytes;
  908. uint32_t max_qp_size_in_wqes;
  909. unsigned int i;
  910. int rc;
  911. golan_qp = zalloc(sizeof(*golan_qp));
  912. if (!golan_qp) {
  913. rc = -ENOMEM;
  914. goto err_create_qp;
  915. }
  916. if ( ( qp->type == IB_QPT_SMI ) || ( qp->type == IB_QPT_GSI ) ||
  917. ( qp->type == IB_QPT_UD ) ) {
  918. golan_qp->rq.grh_size = ( qp->recv.num_wqes *
  919. sizeof ( golan_qp->rq.grh[0] ));
  920. }
  921. /* Calculate receive queue size */
  922. golan_qp->rq.size = qp->recv.num_wqes * GOLAN_RECV_WQE_SIZE;
  923. if (GOLAN_RECV_WQE_SIZE > be16_to_cpu(golan->caps.max_wqe_sz_rq)) {
  924. DBGC (golan ,"%s receive wqe size [%zd] > max wqe size [%d]\n", __FUNCTION__,
  925. GOLAN_RECV_WQE_SIZE, be16_to_cpu(golan->caps.max_wqe_sz_rq));
  926. rc = -EINVAL;
  927. goto err_create_qp_rq_size;
  928. }
  929. wqe_size_in_bytes = sizeof(golan_qp->sq.wqes[0]);
  930. /* Calculate send queue size */
  931. if (wqe_size_in_bytes > be16_to_cpu(golan->caps.max_wqe_sz_sq)) {
  932. DBGC (golan ,"%s send WQE size [%d] > max WQE size [%d]\n", __FUNCTION__,
  933. wqe_size_in_bytes,
  934. be16_to_cpu(golan->caps.max_wqe_sz_sq));
  935. rc = -EINVAL;
  936. goto err_create_qp_sq_wqe_size;
  937. }
  938. golan_qp->sq.size = (qp->send.num_wqes * wqe_size_in_bytes);
  939. max_qp_size_in_wqes = (1 << ((uint32_t)(golan->caps.log_max_qp_sz)));
  940. if (qp->send.num_wqes > max_qp_size_in_wqes) {
  941. DBGC (golan ,"%s send wq size [%d] > max wq size [%d]\n", __FUNCTION__,
  942. golan_qp->sq.size, max_qp_size_in_wqes);
  943. rc = -EINVAL;
  944. goto err_create_qp_sq_size;
  945. }
  946. golan_qp->size = golan_qp->sq.size + golan_qp->rq.size;
  947. /* allocate dma memory for WQEs (1 page is enough) - should change it */
  948. golan_qp->wqes = malloc_dma ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE );
  949. if (!golan_qp->wqes) {
  950. rc = -ENOMEM;
  951. goto err_create_qp_wqe_alloc;
  952. }
  953. golan_qp->rq.wqes = golan_qp->wqes;
  954. golan_qp->sq.wqes = golan_qp->wqes + golan_qp->rq.size;//(union golan_send_wqe *)&
  955. //(((struct golan_recv_wqe_ud *)(golan_qp->wqes))[qp->recv.num_wqes]);
  956. if ( golan_qp->rq.grh_size ) {
  957. golan_qp->rq.grh = ( golan_qp->wqes +
  958. golan_qp->sq.size +
  959. golan_qp->rq.size );
  960. }
  961. /* Invalidate all WQEs */
  962. data = &golan_qp->rq.wqes[0].data[0];
  963. for ( i = 0 ; i < ( golan_qp->rq.size / sizeof ( *data ) ); i++ ){
  964. data->lkey = cpu_to_be32 ( GOLAN_INVALID_LKEY );
  965. data++;
  966. }
  967. golan_qp->doorbell_record = malloc_dma(sizeof(struct golan_qp_db),
  968. sizeof(struct golan_qp_db));
  969. if (!golan_qp->doorbell_record) {
  970. rc = -ENOMEM;
  971. goto err_create_qp_db_alloc;
  972. }
  973. memset(golan_qp->doorbell_record, 0, sizeof(struct golan_qp_db));
  974. cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_CREATE_QP, 0x0,
  975. GEN_MBOX, NO_MBOX,
  976. sizeof(struct golan_create_qp_mbox_in) + GOLAN_PAS_SIZE,
  977. sizeof(struct golan_create_qp_mbox_out));
  978. in = (struct golan_create_qp_mbox_in_data *)GET_INBOX(golan, GEN_MBOX);
  979. /* Fill the physical address of the page */
  980. in->pas[0] = VIRT_2_BE64_BUS(golan_qp->wqes);
  981. in->ctx.qp_counter_set_usr_page = cpu_to_be32(golan->uar.index);
  982. in->ctx.flags_pd = cpu_to_be32(golan->pdn);
  983. in->ctx.flags = cpu_to_be32((golan_qp_type_to_st(qp->type)
  984. << GOLAN_QP_CTX_ST_BIT) |
  985. (GOLAN_QP_PM_MIGRATED <<
  986. GOLAN_QP_CTX_PM_STATE_BIT));
  987. // cgs set to 0, initialy.
  988. // atomic mode
  989. in->ctx.rq_size_stride = ((ilog2(qp->recv.num_wqes) <<
  990. GOLAN_QP_CTX_RQ_SIZE_BIT) |
  991. (sizeof(golan_qp->rq.wqes[0]) / GOLAN_RECV_WQE_SIZE));
  992. in->ctx.sq_crq_size = cpu_to_be16(ilog2(golan_qp->sq.size / GOLAN_SEND_WQE_BB_SIZE)
  993. << GOLAN_QP_CTX_SQ_SIZE_BIT);
  994. in->ctx.cqn_send = cpu_to_be32(qp->send.cq->cqn);
  995. in->ctx.cqn_recv = cpu_to_be32(qp->recv.cq->cqn);
  996. in->ctx.db_rec_addr = VIRT_2_BE64_BUS(golan_qp->doorbell_record);
  997. rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__);
  998. GOLAN_CHECK_RC_AND_CMD_STATUS( err_create_qp_cmd );
  999. out = (struct golan_create_qp_mbox_out *)cmd->out;
  1000. *qpn = (be32_to_cpu(out->qpn) & 0xffffff);
  1001. /*
  1002. * Hardware wants QPN written in big-endian order (after
  1003. * shifting) for send doorbell. Precompute this value to save
  1004. * a little bit when posting sends.
  1005. */
  1006. golan_qp->doorbell_qpn = cpu_to_be32(*qpn << 8);
  1007. golan_qp->state = GOLAN_IB_QPS_RESET;
  1008. ib_qp_set_drvdata(qp, golan_qp);
  1009. return 0;
  1010. err_create_qp_cmd:
  1011. free_dma(golan_qp->doorbell_record, sizeof(struct golan_qp_db));
  1012. err_create_qp_db_alloc:
  1013. free_dma ( golan_qp->wqes, GOLAN_PAGE_SIZE );
  1014. err_create_qp_wqe_alloc:
  1015. err_create_qp_sq_size:
  1016. err_create_qp_sq_wqe_size:
  1017. err_create_qp_rq_size:
  1018. free ( golan_qp );
  1019. err_create_qp:
  1020. return rc;
  1021. }
  1022. /**
  1023. * Create queue pair
  1024. *
  1025. * @v ibdev Infiniband device
  1026. * @v qp Queue pair
  1027. * @ret rc Return status code
  1028. */
  1029. static int golan_create_qp(struct ib_device *ibdev,
  1030. struct ib_queue_pair *qp)
  1031. {
  1032. int rc, qpn = -1;
  1033. switch (qp->type) {
  1034. case IB_QPT_UD:
  1035. case IB_QPT_SMI:
  1036. case IB_QPT_GSI:
  1037. rc = golan_create_qp_aux(ibdev, qp, &qpn);
  1038. if (rc) {
  1039. DBG ( "%s Failed to create QP (rc = 0x%x)\n", __FUNCTION__, rc);
  1040. return rc;
  1041. }
  1042. qp->qpn = qpn;
  1043. break;
  1044. case IB_QPT_ETH:
  1045. case IB_QPT_RC:
  1046. default:
  1047. DBG ( "%s unsupported QP type (0x%x)\n", __FUNCTION__, qp->type);
  1048. return -EINVAL;
  1049. }
  1050. return 0;
  1051. }
  1052. static int golan_modify_qp_rst_to_init(struct ib_device *ibdev,
  1053. struct ib_queue_pair *qp __unused,
  1054. struct golan_modify_qp_mbox_in_data *in)
  1055. {
  1056. int rc = 0;
  1057. in->ctx.qkey = cpu_to_be32((uint32_t)(qp->qkey));
  1058. in->ctx.pri_path.port = ibdev->port;
  1059. in->ctx.flags |= cpu_to_be32(GOLAN_QP_PM_MIGRATED << GOLAN_QP_CTX_PM_STATE_BIT);
  1060. in->ctx.pri_path.pkey_index = 0;
  1061. /* QK is 0 */
  1062. /* QP cntr set 0 */
  1063. return rc;
  1064. }
  1065. static int golan_modify_qp_init_to_rtr(struct ib_device *ibdev __unused,
  1066. struct ib_queue_pair *qp __unused,
  1067. struct golan_modify_qp_mbox_in_data *in)
  1068. {
  1069. int rc = 0;
  1070. in->optparam = 0;
  1071. return rc;
  1072. }
  1073. static int golan_modify_qp_rtr_to_rts(struct ib_device *ibdev __unused,
  1074. struct ib_queue_pair *qp __unused,
  1075. struct golan_modify_qp_mbox_in_data *in __unused)
  1076. {
  1077. int rc = 0;
  1078. in->optparam = 0;
  1079. /* In good flow psn in 0 */
  1080. return rc;
  1081. }
  1082. static int golan_modify_qp_to_rst(struct ib_device *ibdev,
  1083. struct ib_queue_pair *qp)
  1084. {
  1085. struct golan *golan = ib_get_drvdata(ibdev);
  1086. struct golan_queue_pair *golan_qp = ib_qp_get_drvdata(qp);
  1087. struct golan_cmd_layout *cmd;
  1088. int rc;
  1089. cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_2RST_QP, 0x0,
  1090. NO_MBOX, NO_MBOX,
  1091. sizeof(struct golan_modify_qp_mbox_in),
  1092. sizeof(struct golan_modify_qp_mbox_out));
  1093. ((struct golan_modify_qp_mbox_in *)(cmd->in))->qpn = cpu_to_be32(qp->qpn);
  1094. rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
  1095. GOLAN_CHECK_RC_AND_CMD_STATUS( err_modify_qp_2rst_cmd );
  1096. golan_qp->state = GOLAN_IB_QPS_RESET;
  1097. DBGC( golan , "%s QP number 0x%lx was modified to RESET\n",
  1098. __FUNCTION__, qp->qpn);
  1099. return 0;
  1100. err_modify_qp_2rst_cmd:
  1101. DBGC (golan ,"%s Failed to modify QP number 0x%lx (rc = 0x%x)\n",
  1102. __FUNCTION__, qp->qpn, rc);
  1103. return rc;
  1104. }
  1105. static int (*golan_modify_qp_methods[])(struct ib_device *ibdev,
  1106. struct ib_queue_pair *qp,
  1107. struct golan_modify_qp_mbox_in_data *in) = {
  1108. [GOLAN_IB_QPS_RESET] = golan_modify_qp_rst_to_init,
  1109. [GOLAN_IB_QPS_INIT] = golan_modify_qp_init_to_rtr,
  1110. [GOLAN_IB_QPS_RTR] = golan_modify_qp_rtr_to_rts
  1111. };
  1112. static int golan_modify_qp(struct ib_device *ibdev,
  1113. struct ib_queue_pair *qp)
  1114. {
  1115. struct golan *golan = ib_get_drvdata(ibdev);
  1116. struct golan_queue_pair *golan_qp = ib_qp_get_drvdata(qp);
  1117. struct golan_modify_qp_mbox_in_data *in;
  1118. struct golan_cmd_layout *cmd;
  1119. enum golan_ib_qp_state prev_state;
  1120. int rc;
  1121. int modify_cmd[] = {GOLAN_CMD_OP_RST2INIT_QP,
  1122. GOLAN_CMD_OP_INIT2RTR_QP,
  1123. GOLAN_CMD_OP_RTR2RTS_QP};
  1124. while (golan_qp->state < GOLAN_IB_QPS_RTS) {
  1125. prev_state = golan_qp->state;
  1126. cmd = write_cmd(golan, DEF_CMD_IDX, modify_cmd[golan_qp->state], 0x0,
  1127. GEN_MBOX, NO_MBOX,
  1128. sizeof(struct golan_modify_qp_mbox_in),
  1129. sizeof(struct golan_modify_qp_mbox_out));
  1130. in = (struct golan_modify_qp_mbox_in_data *)GET_INBOX(golan, GEN_MBOX);
  1131. ((struct golan_modify_qp_mbox_in *)(cmd->in))->qpn = cpu_to_be32(qp->qpn);
  1132. rc = golan_modify_qp_methods[golan_qp->state](ibdev, qp, in);
  1133. if (rc) {
  1134. goto err_modify_qp_fill_inbox;
  1135. }
  1136. // in->ctx.qp_counter_set_usr_page = cpu_to_be32(golan->uar.index);
  1137. rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__);
  1138. GOLAN_CHECK_RC_AND_CMD_STATUS( err_modify_qp_cmd );
  1139. ++(golan_qp->state);
  1140. DBGC( golan , "%s QP number 0x%lx was modified from %s to %s\n",
  1141. __FUNCTION__, qp->qpn, golan_qp_state_as_string[prev_state],
  1142. golan_qp_state_as_string[golan_qp->state]);
  1143. }
  1144. DBGC( golan , "%s QP number 0x%lx is ready to receive/send packets.\n",
  1145. __FUNCTION__, qp->qpn);
  1146. return 0;
  1147. err_modify_qp_cmd:
  1148. err_modify_qp_fill_inbox:
  1149. DBGC (golan ,"%s Failed to modify QP number 0x%lx (rc = 0x%x)\n",
  1150. __FUNCTION__, qp->qpn, rc);
  1151. return rc;
  1152. }
  1153. /**
  1154. * Destroy queue pair
  1155. *
  1156. * @v ibdev Infiniband device
  1157. * @v qp Queue pair
  1158. */
  1159. static void golan_destroy_qp(struct ib_device *ibdev,
  1160. struct ib_queue_pair *qp)
  1161. {
  1162. struct golan *golan = ib_get_drvdata(ibdev);
  1163. struct golan_queue_pair *golan_qp = ib_qp_get_drvdata(qp);
  1164. struct golan_cmd_layout *cmd;
  1165. unsigned long qpn = qp->qpn;
  1166. int rc;
  1167. DBGC (golan, "%s in\n", __FUNCTION__);
  1168. if (golan_qp->state != GOLAN_IB_QPS_RESET) {
  1169. if (golan_modify_qp_to_rst(ibdev, qp)) {
  1170. DBGC (golan ,"%s Failed to modify QP 0x%lx to RESET\n", __FUNCTION__,
  1171. qp->qpn);
  1172. }
  1173. }
  1174. if (qp->recv.cq) {
  1175. golan_cq_clean(qp->recv.cq);
  1176. }
  1177. if (qp->send.cq && (qp->send.cq != qp->recv.cq)) {
  1178. golan_cq_clean(qp->send.cq);
  1179. }
  1180. cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DESTROY_QP, 0x0,
  1181. NO_MBOX, NO_MBOX,
  1182. sizeof(struct golan_destroy_qp_mbox_in),
  1183. sizeof(struct golan_destroy_qp_mbox_out));
  1184. ((struct golan_destroy_qp_mbox_in *)(cmd->in))->qpn = cpu_to_be32(qpn);
  1185. rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
  1186. GOLAN_PRINT_RC_AND_CMD_STATUS;
  1187. qp->qpn = 0;
  1188. ib_qp_set_drvdata(qp, NULL);
  1189. free_dma(golan_qp->doorbell_record, sizeof(struct golan_qp_db));
  1190. free_dma ( golan_qp->wqes, GOLAN_PAGE_SIZE );
  1191. free(golan_qp);
  1192. DBGC( golan ,"%s QP 0x%lx was destroyed\n", __FUNCTION__, qpn);
  1193. }
  1194. /**
  1195. * Calculate transmission rate
  1196. *
  1197. * @v av Address vector
  1198. * @ret golan_rate Golan rate
  1199. */
  1200. static unsigned int golan_rate(enum ib_rate rate) {
  1201. return (((rate >= IB_RATE_2_5) && (rate <= IB_RATE_120)) ? (rate + 5) : 0);
  1202. }
  1203. /**
  1204. * Post send work queue entry
  1205. *
  1206. * @v ibdev Infiniband device
  1207. * @v qp Queue pair
  1208. * @v av Address vector
  1209. * @v iobuf I/O buffer
  1210. * @ret rc Return status code
  1211. */
  1212. static int golan_post_send(struct ib_device *ibdev,
  1213. struct ib_queue_pair *qp,
  1214. struct ib_address_vector *av,
  1215. struct io_buffer *iobuf)
  1216. {
  1217. struct golan *golan = ib_get_drvdata(ibdev);
  1218. struct golan_queue_pair *golan_qp = ib_qp_get_drvdata(qp);
  1219. struct golan_send_wqe_ud *wqe = NULL;
  1220. struct golan_av *datagram = NULL;
  1221. unsigned long wqe_idx_mask;
  1222. unsigned long wqe_idx;
  1223. struct golan_wqe_data_seg *data = NULL;
  1224. struct golan_wqe_ctrl_seg *ctrl = NULL;
  1225. wqe_idx_mask = (qp->send.num_wqes - 1);
  1226. wqe_idx = (qp->send.next_idx & wqe_idx_mask);
  1227. if (qp->send.iobufs[wqe_idx]) {
  1228. DBGC (golan ,"%s Send queue of QPN 0x%lx is full\n", __FUNCTION__, qp->qpn);
  1229. return -ENOMEM;
  1230. }
  1231. qp->send.iobufs[wqe_idx] = iobuf;
  1232. // change to this
  1233. //wqe_size_in_octa_words = golan_qp->sq.wqe_size_in_wqebb >> 4;
  1234. wqe = &golan_qp->sq.wqes[wqe_idx].ud;
  1235. //CHECK HW OWNERSHIP BIT ???
  1236. memset(wqe, 0, sizeof(*wqe));
  1237. ctrl = &wqe->ctrl;
  1238. ctrl->opmod_idx_opcode = cpu_to_be32(GOLAN_SEND_OPCODE |
  1239. ((u32)(golan_qp->sq.next_idx) <<
  1240. GOLAN_WQE_CTRL_WQE_IDX_BIT));
  1241. ctrl->qpn_ds = cpu_to_be32(GOLAN_SEND_UD_WQE_SIZE >> 4) |
  1242. golan_qp->doorbell_qpn;
  1243. ctrl->fm_ce_se = 0x8;//10 - 0 - 0
  1244. data = &wqe->data;
  1245. data->byte_count = cpu_to_be32(iob_len(iobuf));
  1246. data->lkey = cpu_to_be32(golan->mkey);
  1247. data->addr = VIRT_2_BE64_BUS(iobuf->data);
  1248. datagram = &wqe->datagram;
  1249. datagram->key.qkey.qkey = cpu_to_be32(av->qkey);
  1250. datagram->dqp_dct = cpu_to_be32((1 << 31) | av->qpn);
  1251. datagram->stat_rate_sl = ((golan_rate(av->rate) << 4) | av->sl);
  1252. datagram->fl_mlid = (ibdev->lid & 0x007f); /* take only the 7 low bits of the LID */
  1253. datagram->rlid = cpu_to_be16(av->lid);
  1254. datagram->grh_gid_fl = cpu_to_be32(av->gid_present << 30);
  1255. memcpy(datagram->rgid, av->gid.bytes, 16 /* sizeof(datagram->rgid) */);
  1256. /*
  1257. * Make sure that descriptors are written before
  1258. * updating doorbell record and ringing the doorbell
  1259. */
  1260. ++(qp->send.next_idx);
  1261. golan_qp->sq.next_idx = (golan_qp->sq.next_idx + GOLAN_WQEBBS_PER_SEND_UD_WQE);
  1262. golan_qp->doorbell_record->send_db = cpu_to_be16(golan_qp->sq.next_idx);
  1263. wmb();
  1264. writeq(*((__be64 *)ctrl), golan->uar.virt
  1265. + ( ( golan_qp->sq.next_idx & 0x1 ) ? DB_BUFFER0_EVEN_OFFSET
  1266. : DB_BUFFER0_ODD_OFFSET ) );
  1267. return 0;
  1268. }
  1269. /**
  1270. * Post receive work queue entry
  1271. *
  1272. * @v ibdev Infiniband device
  1273. * @v qp Queue pair
  1274. * @v iobuf I/O buffer
  1275. * @ret rc Return status code
  1276. */
  1277. static int golan_post_recv(struct ib_device *ibdev,
  1278. struct ib_queue_pair *qp,
  1279. struct io_buffer *iobuf)
  1280. {
  1281. struct golan *golan = ib_get_drvdata(ibdev);
  1282. struct golan_queue_pair *golan_qp = ib_qp_get_drvdata(qp);
  1283. struct ib_work_queue *wq = &qp->recv;
  1284. struct golan_recv_wqe_ud *wqe;
  1285. struct ib_global_route_header *grh;
  1286. struct golan_wqe_data_seg *data;
  1287. unsigned int wqe_idx_mask;
  1288. /* Allocate work queue entry */
  1289. wqe_idx_mask = (wq->num_wqes - 1);
  1290. if (wq->iobufs[wq->next_idx & wqe_idx_mask]) {
  1291. DBGC (golan ,"%s Receive queue of QPN 0x%lx is full\n", __FUNCTION__, qp->qpn);
  1292. return -ENOMEM;
  1293. }
  1294. wq->iobufs[wq->next_idx & wqe_idx_mask] = iobuf;
  1295. wqe = & golan_qp->rq.wqes[wq->next_idx & wqe_idx_mask];
  1296. memset(wqe, 0, sizeof(*wqe));
  1297. data = &wqe->data[0];
  1298. if ( golan_qp->rq.grh ) {
  1299. grh = &golan_qp->rq.grh[wq->next_idx & wqe_idx_mask];
  1300. data->byte_count = cpu_to_be32 ( sizeof ( *grh ) );
  1301. data->lkey = cpu_to_be32 ( golan->mkey );
  1302. data->addr = VIRT_2_BE64_BUS ( grh );
  1303. data++;
  1304. }
  1305. data->byte_count = cpu_to_be32(iob_tailroom(iobuf));
  1306. data->lkey = cpu_to_be32(golan->mkey);
  1307. data->addr = VIRT_2_BE64_BUS(iobuf->data);
  1308. ++wq->next_idx;
  1309. /*
  1310. * Make sure that descriptors are written before
  1311. * updating doorbell record and ringing the doorbell
  1312. */
  1313. wmb();
  1314. golan_qp->doorbell_record->recv_db = cpu_to_be16(qp->recv.next_idx & 0xffff);
  1315. return 0;
  1316. }
  1317. static int golan_query_vport_context ( struct ib_device *ibdev ) {
  1318. struct golan *golan = ib_get_drvdata ( ibdev );
  1319. struct golan_cmd_layout *cmd;
  1320. struct golan_query_hca_vport_context_inbox *in;
  1321. struct golan_query_hca_vport_context_data *context_data;
  1322. int rc;
  1323. cmd = write_cmd ( golan, DEF_CMD_IDX, GOLAN_CMD_OP_QUERY_HCA_VPORT_CONTEXT,
  1324. 0x0, GEN_MBOX, GEN_MBOX,
  1325. sizeof(struct golan_query_hca_vport_context_inbox),
  1326. sizeof(struct golan_query_hca_vport_context_outbox) );
  1327. in = GOLAN_MBOX_IN ( cmd, in );
  1328. in->port_num = (u8)ibdev->port;
  1329. rc = send_command_and_wait ( golan, DEF_CMD_IDX, GEN_MBOX, GEN_MBOX, __FUNCTION__ );
  1330. GOLAN_CHECK_RC_AND_CMD_STATUS( err_query_vport_context_cmd );
  1331. context_data = (struct golan_query_hca_vport_context_data *)( GET_OUTBOX ( golan, GEN_MBOX ) );
  1332. ibdev->node_guid.dwords[0] = context_data->node_guid[0];
  1333. ibdev->node_guid.dwords[1] = context_data->node_guid[1];
  1334. ibdev->lid = be16_to_cpu( context_data->lid );
  1335. ibdev->sm_lid = be16_to_cpu( context_data->sm_lid );
  1336. ibdev->sm_sl = context_data->sm_sl;
  1337. ibdev->port_state = context_data->port_state;
  1338. return 0;
  1339. err_query_vport_context_cmd:
  1340. DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
  1341. return rc;
  1342. }
  1343. static int golan_query_vport_gid ( struct ib_device *ibdev ) {
  1344. struct golan *golan = ib_get_drvdata( ibdev );
  1345. struct golan_cmd_layout *cmd;
  1346. struct golan_query_hca_vport_gid_inbox *in;
  1347. union ib_gid *ib_gid;
  1348. int rc;
  1349. cmd = write_cmd( golan, DEF_CMD_IDX, GOLAN_CMD_OP_QUERY_HCA_VPORT_GID,
  1350. 0x0, GEN_MBOX, GEN_MBOX,
  1351. sizeof(struct golan_query_hca_vport_gid_inbox),
  1352. sizeof(struct golan_query_hca_vport_gid_outbox) );
  1353. in = GOLAN_MBOX_IN ( cmd, in );
  1354. in->port_num = (u8)ibdev->port;
  1355. in->gid_index = 0;
  1356. rc = send_command_and_wait ( golan, DEF_CMD_IDX, GEN_MBOX, GEN_MBOX, __FUNCTION__ );
  1357. GOLAN_CHECK_RC_AND_CMD_STATUS( err_query_vport_gid_cmd );
  1358. ib_gid = (union ib_gid *)( GET_OUTBOX ( golan, GEN_MBOX ) );
  1359. memcpy ( &ibdev->gid, ib_gid, sizeof(ibdev->gid) );
  1360. return 0;
  1361. err_query_vport_gid_cmd:
  1362. DBGC ( golan, "%s [%d] out\n", __FUNCTION__, rc);
  1363. return rc;
  1364. }
  1365. static int golan_query_vport_pkey ( struct ib_device *ibdev ) {
  1366. struct golan *golan = ib_get_drvdata ( ibdev );
  1367. struct golan_cmd_layout *cmd;
  1368. struct golan_query_hca_vport_pkey_inbox *in;
  1369. int pkey_table_size_in_entries = (1 << (7 + golan->caps.pkey_table_size));
  1370. int rc;
  1371. cmd = write_cmd ( golan, DEF_CMD_IDX, GOLAN_CMD_OP_QUERY_HCA_VPORT_PKEY,
  1372. 0x0, GEN_MBOX, GEN_MBOX,
  1373. sizeof(struct golan_query_hca_vport_pkey_inbox),
  1374. sizeof(struct golan_outbox_hdr) + 8 +
  1375. sizeof(struct golan_query_hca_vport_pkey_data) * pkey_table_size_in_entries );
  1376. in = GOLAN_MBOX_IN ( cmd, in );
  1377. in->port_num = (u8)ibdev->port;
  1378. in->pkey_index = 0xffff;
  1379. rc = send_command_and_wait ( golan, DEF_CMD_IDX, GEN_MBOX, GEN_MBOX, __FUNCTION__ );
  1380. GOLAN_CHECK_RC_AND_CMD_STATUS( err_query_vport_pkey_cmd );
  1381. return 0;
  1382. err_query_vport_pkey_cmd:
  1383. DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
  1384. return rc;
  1385. }
  1386. static int golan_get_ib_info ( struct ib_device *ibdev ) {
  1387. int rc;
  1388. rc = golan_query_vport_context ( ibdev );
  1389. if ( rc != 0 ) {
  1390. DBG ( "golan_get_ib_info: golan_query_vport_context Failed (rc = %d)\n",rc );
  1391. goto err_query_vport_context;
  1392. }
  1393. rc = golan_query_vport_gid ( ibdev );
  1394. if ( rc != 0 ) {
  1395. DBG ( "golan_get_ib_info: golan_query_vport_gid Failed (rc = %d)\n",rc );
  1396. goto err_query_vport_gid;
  1397. }
  1398. rc = golan_query_vport_pkey ( ibdev );
  1399. if ( rc != 0 ) {
  1400. DBG ( "golan_get_ib_info: golan_query_vport_pkey Failed (rc = %d)\n",rc );
  1401. goto err_query_vport_pkey;
  1402. }
  1403. return rc;
  1404. err_query_vport_pkey:
  1405. err_query_vport_gid:
  1406. err_query_vport_context:
  1407. DBG ( "%s [%d] out\n", __FUNCTION__, rc);
  1408. return rc;
  1409. }
  1410. static int golan_complete(struct ib_device *ibdev,
  1411. struct ib_completion_queue *cq,
  1412. struct golan_cqe64 *cqe64)
  1413. {
  1414. struct golan *golan = ib_get_drvdata(ibdev);
  1415. struct ib_work_queue *wq;
  1416. struct golan_queue_pair *golan_qp;
  1417. struct ib_queue_pair *qp;
  1418. struct io_buffer *iobuf = NULL;
  1419. struct ib_address_vector recv_dest;
  1420. struct ib_address_vector recv_source;
  1421. struct ib_global_route_header *grh;
  1422. struct golan_err_cqe *err_cqe64;
  1423. int gid_present, idx;
  1424. u16 wqe_ctr;
  1425. uint8_t opcode;
  1426. static int error_state;
  1427. uint32_t qpn = be32_to_cpu(cqe64->sop_drop_qpn) & 0xffffff;
  1428. int is_send = 0;
  1429. size_t len;
  1430. opcode = cqe64->op_own >> GOLAN_CQE_OPCODE_BIT;
  1431. DBGC2( golan , "%s completion with opcode 0x%x\n", __FUNCTION__, opcode);
  1432. if (opcode == GOLAN_CQE_REQ || opcode == GOLAN_CQE_REQ_ERR) {
  1433. is_send = 1;
  1434. } else {
  1435. is_send = 0;
  1436. }
  1437. if (opcode == GOLAN_CQE_REQ_ERR || opcode == GOLAN_CQE_RESP_ERR) {
  1438. err_cqe64 = (struct golan_err_cqe *)cqe64;
  1439. int i = 0;
  1440. if (!error_state++) {
  1441. DBGC (golan ,"\n");
  1442. for ( i = 0 ; i < 16 ; i += 2 ) {
  1443. DBGC (golan ,"%x %x\n",
  1444. be32_to_cpu(((uint32_t *)(err_cqe64))[i]),
  1445. be32_to_cpu(((uint32_t *)(err_cqe64))[i + 1]));
  1446. }
  1447. DBGC (golan ,"CQE with error: Syndrome(0x%x), VendorSynd(0x%x), HW_SYN(0x%x)\n",
  1448. err_cqe64->syndrome, err_cqe64->vendor_err_synd,
  1449. err_cqe64->hw_syndrom);
  1450. }
  1451. }
  1452. /* Identify work queue */
  1453. wq = ib_find_wq(cq, qpn, is_send);
  1454. if (!wq) {
  1455. DBGC (golan ,"%s unknown %s QPN 0x%x in CQN 0x%lx\n",
  1456. __FUNCTION__, (is_send ? "send" : "recv"), qpn, cq->cqn);
  1457. return -EINVAL;
  1458. }
  1459. qp = wq->qp;
  1460. golan_qp = ib_qp_get_drvdata ( qp );
  1461. wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
  1462. if (is_send) {
  1463. wqe_ctr &= ((GOLAN_WQEBBS_PER_SEND_UD_WQE * wq->num_wqes) - 1);
  1464. idx = wqe_ctr / GOLAN_WQEBBS_PER_SEND_UD_WQE;
  1465. } else {
  1466. idx = wqe_ctr & (wq->num_wqes - 1);
  1467. }
  1468. iobuf = wq->iobufs[idx];
  1469. if (!iobuf) {
  1470. DBGC (golan ,"%s IO Buffer 0x%x not found in QPN 0x%x\n",
  1471. __FUNCTION__, idx, qpn);
  1472. return -EINVAL;
  1473. }
  1474. wq->iobufs[idx] = NULL;
  1475. if (is_send) {
  1476. ib_complete_send(ibdev, qp, iobuf, (opcode == GOLAN_CQE_REQ_ERR));
  1477. } else {
  1478. len = be32_to_cpu(cqe64->byte_cnt);
  1479. memset(&recv_dest, 0, sizeof(recv_dest));
  1480. recv_dest.qpn = qpn;
  1481. /* Construct address vector */
  1482. memset(&recv_source, 0, sizeof(recv_source));
  1483. switch (qp->type) {
  1484. case IB_QPT_SMI:
  1485. case IB_QPT_GSI:
  1486. case IB_QPT_UD:
  1487. /* Locate corresponding GRH */
  1488. assert ( golan_qp->rq.grh != NULL );
  1489. grh = &golan_qp->rq.grh[ idx ];
  1490. recv_source.qpn = be32_to_cpu(cqe64->flags_rqpn) & 0xffffff;
  1491. recv_source.lid = be16_to_cpu(cqe64->slid);
  1492. recv_source.sl = (be32_to_cpu(cqe64->flags_rqpn) >> 24) & 0xf;
  1493. gid_present = (be32_to_cpu(cqe64->flags_rqpn) >> 28) & 3;
  1494. if (!gid_present) {
  1495. recv_dest.gid_present = recv_source.gid_present = 0;
  1496. } else {
  1497. recv_dest.gid_present = recv_source.gid_present = 1;
  1498. //if (recv_source.gid_present == 0x1) {
  1499. memcpy(&recv_source.gid, &grh->sgid, sizeof(recv_source.gid));
  1500. memcpy(&recv_dest.gid, &grh->dgid, sizeof(recv_dest.gid));
  1501. //} else { // recv_source.gid_present = 0x3
  1502. /* GRH is located in the upper 64 byte of the CQE128
  1503. * currently not supported */
  1504. //;
  1505. //}
  1506. }
  1507. len -= sizeof ( *grh );
  1508. break;
  1509. case IB_QPT_RC:
  1510. case IB_QPT_ETH:
  1511. default:
  1512. DBGC (golan ,"%s Unsupported QP type (0x%x)\n", __FUNCTION__, qp->type);
  1513. return -EINVAL;
  1514. }
  1515. assert(len <= iob_tailroom(iobuf));
  1516. iob_put(iobuf, len);
  1517. ib_complete_recv(ibdev, qp, &recv_dest, &recv_source, iobuf, (opcode == GOLAN_CQE_RESP_ERR));
  1518. }
  1519. return 0;
  1520. }
  1521. static int golan_is_hw_ownership(struct ib_completion_queue *cq,
  1522. struct golan_cqe64 *cqe64)
  1523. {
  1524. return ((cqe64->op_own & GOLAN_CQE_OWNER_MASK) !=
  1525. ((cq->next_idx >> ilog2(cq->num_cqes)) & 1));
  1526. }
  1527. static void golan_poll_cq(struct ib_device *ibdev,
  1528. struct ib_completion_queue *cq)
  1529. {
  1530. unsigned int i;
  1531. int rc = 0;
  1532. unsigned int cqe_idx_mask;
  1533. struct golan_cqe64 *cqe64;
  1534. struct golan_completion_queue *golan_cq = ib_cq_get_drvdata(cq);
  1535. struct golan *golan = ib_get_drvdata(ibdev);
  1536. for (i = 0; i < cq->num_cqes; ++i) {
  1537. /* Look for completion entry */
  1538. cqe_idx_mask = (cq->num_cqes - 1);
  1539. cqe64 = &golan_cq->cqes[cq->next_idx & cqe_idx_mask];
  1540. /* temporary valid only for 64 byte CQE */
  1541. if (golan_is_hw_ownership(cq, cqe64) ||
  1542. ((cqe64->op_own >> GOLAN_CQE_OPCODE_BIT) ==
  1543. GOLAN_CQE_OPCODE_NOT_VALID)) {
  1544. break; /* HW ownership */
  1545. }
  1546. DBGC2( golan , "%s CQN 0x%lx [%ld] \n", __FUNCTION__, cq->cqn, cq->next_idx);
  1547. /*
  1548. * Make sure we read CQ entry contents after we've checked the
  1549. * ownership bit. (PRM - 6.5.3.2)
  1550. */
  1551. rmb();
  1552. rc = golan_complete(ibdev, cq, cqe64);
  1553. if (rc != 0) {
  1554. DBGC (golan ,"%s CQN 0x%lx failed to complete\n", __FUNCTION__, cq->cqn);
  1555. }
  1556. /* Update completion queue's index */
  1557. cq->next_idx++;
  1558. /* Update doorbell record */
  1559. *(golan_cq->doorbell_record) = cpu_to_be32(cq->next_idx & 0xffffff);
  1560. }
  1561. }
  1562. static const char *golan_eqe_type_str(u8 type)
  1563. {
  1564. switch (type) {
  1565. case GOLAN_EVENT_TYPE_COMP:
  1566. return "GOLAN_EVENT_TYPE_COMP";
  1567. case GOLAN_EVENT_TYPE_PATH_MIG:
  1568. return "GOLAN_EVENT_TYPE_PATH_MIG";
  1569. case GOLAN_EVENT_TYPE_COMM_EST:
  1570. return "GOLAN_EVENT_TYPE_COMM_EST";
  1571. case GOLAN_EVENT_TYPE_SQ_DRAINED:
  1572. return "GOLAN_EVENT_TYPE_SQ_DRAINED";
  1573. case GOLAN_EVENT_TYPE_SRQ_LAST_WQE:
  1574. return "GOLAN_EVENT_TYPE_SRQ_LAST_WQE";
  1575. case GOLAN_EVENT_TYPE_SRQ_RQ_LIMIT:
  1576. return "GOLAN_EVENT_TYPE_SRQ_RQ_LIMIT";
  1577. case GOLAN_EVENT_TYPE_CQ_ERROR:
  1578. return "GOLAN_EVENT_TYPE_CQ_ERROR";
  1579. case GOLAN_EVENT_TYPE_WQ_CATAS_ERROR:
  1580. return "GOLAN_EVENT_TYPE_WQ_CATAS_ERROR";
  1581. case GOLAN_EVENT_TYPE_PATH_MIG_FAILED:
  1582. return "GOLAN_EVENT_TYPE_PATH_MIG_FAILED";
  1583. case GOLAN_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
  1584. return "GOLAN_EVENT_TYPE_WQ_INVAL_REQ_ERROR";
  1585. case GOLAN_EVENT_TYPE_WQ_ACCESS_ERROR:
  1586. return "GOLAN_EVENT_TYPE_WQ_ACCESS_ERROR";
  1587. case GOLAN_EVENT_TYPE_SRQ_CATAS_ERROR:
  1588. return "GOLAN_EVENT_TYPE_SRQ_CATAS_ERROR";
  1589. case GOLAN_EVENT_TYPE_INTERNAL_ERROR:
  1590. return "GOLAN_EVENT_TYPE_INTERNAL_ERROR";
  1591. case GOLAN_EVENT_TYPE_PORT_CHANGE:
  1592. return "GOLAN_EVENT_TYPE_PORT_CHANGE";
  1593. case GOLAN_EVENT_TYPE_GPIO_EVENT:
  1594. return "GOLAN_EVENT_TYPE_GPIO_EVENT";
  1595. case GOLAN_EVENT_TYPE_REMOTE_CONFIG:
  1596. return "GOLAN_EVENT_TYPE_REMOTE_CONFIG";
  1597. case GOLAN_EVENT_TYPE_DB_BF_CONGESTION:
  1598. return "GOLAN_EVENT_TYPE_DB_BF_CONGESTION";
  1599. case GOLAN_EVENT_TYPE_STALL_EVENT:
  1600. return "GOLAN_EVENT_TYPE_STALL_EVENT";
  1601. case GOLAN_EVENT_TYPE_CMD:
  1602. return "GOLAN_EVENT_TYPE_CMD";
  1603. case GOLAN_EVENT_TYPE_PAGE_REQUEST:
  1604. return "GOLAN_EVENT_TYPE_PAGE_REQUEST";
  1605. default:
  1606. return "Unrecognized event";
  1607. }
  1608. }
  1609. static const char *golan_eqe_port_subtype_str(u8 subtype)
  1610. {
  1611. switch (subtype) {
  1612. case GOLAN_PORT_CHANGE_SUBTYPE_DOWN:
  1613. return "GOLAN_PORT_CHANGE_SUBTYPE_DOWN";
  1614. case GOLAN_PORT_CHANGE_SUBTYPE_ACTIVE:
  1615. return "GOLAN_PORT_CHANGE_SUBTYPE_ACTIVE";
  1616. case GOLAN_PORT_CHANGE_SUBTYPE_INITIALIZED:
  1617. return "GOLAN_PORT_CHANGE_SUBTYPE_INITIALIZED";
  1618. case GOLAN_PORT_CHANGE_SUBTYPE_LID:
  1619. return "GOLAN_PORT_CHANGE_SUBTYPE_LID";
  1620. case GOLAN_PORT_CHANGE_SUBTYPE_PKEY:
  1621. return "GOLAN_PORT_CHANGE_SUBTYPE_PKEY";
  1622. case GOLAN_PORT_CHANGE_SUBTYPE_GUID:
  1623. return "GOLAN_PORT_CHANGE_SUBTYPE_GUID";
  1624. case GOLAN_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
  1625. return "GOLAN_PORT_CHANGE_SUBTYPE_CLIENT_REREG";
  1626. default:
  1627. return "Unrecognized event";
  1628. }
  1629. }
  1630. /**
  1631. * Update Infiniband parameters using Commands
  1632. *
  1633. * @v ibdev Infiniband device
  1634. * @ret rc Return status code
  1635. */
  1636. static int golan_ib_update ( struct ib_device *ibdev ) {
  1637. int rc;
  1638. /* Get IB parameters */
  1639. if ( ( rc = golan_get_ib_info ( ibdev ) ) != 0 )
  1640. return rc;
  1641. /* Notify Infiniband core of potential link state change */
  1642. ib_link_state_changed ( ibdev );
  1643. return 0;
  1644. }
  1645. static inline void golan_handle_port_event(struct golan *golan, struct golan_eqe *eqe)
  1646. {
  1647. struct ib_device *ibdev;
  1648. u8 port;
  1649. port = (eqe->data.port.port >> 4) & 0xf;
  1650. ibdev = golan->ports[port - 1].ibdev;
  1651. if ( ! ib_is_open ( ibdev ) )
  1652. return;
  1653. switch (eqe->sub_type) {
  1654. case GOLAN_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
  1655. case GOLAN_PORT_CHANGE_SUBTYPE_ACTIVE:
  1656. golan_ib_update ( ibdev );
  1657. /* Fall through */
  1658. case GOLAN_PORT_CHANGE_SUBTYPE_DOWN:
  1659. case GOLAN_PORT_CHANGE_SUBTYPE_LID:
  1660. case GOLAN_PORT_CHANGE_SUBTYPE_PKEY:
  1661. case GOLAN_PORT_CHANGE_SUBTYPE_GUID:
  1662. case GOLAN_PORT_CHANGE_SUBTYPE_INITIALIZED:
  1663. DBGC( golan , "%s event %s(%d) (sub event %s(%d))arrived on port %d\n",
  1664. __FUNCTION__, golan_eqe_type_str(eqe->type), eqe->type,
  1665. golan_eqe_port_subtype_str(eqe->sub_type),
  1666. eqe->sub_type, port);
  1667. break;
  1668. default:
  1669. DBGC (golan ,"%s Port event with unrecognized subtype: port %d, sub_type %d\n",
  1670. __FUNCTION__, port, eqe->sub_type);
  1671. }
  1672. }
  1673. static struct golan_eqe *golan_next_eqe_sw(struct golan_event_queue *eq)
  1674. {
  1675. uint32_t entry = (eq->cons_index & (GOLAN_NUM_EQES - 1));
  1676. struct golan_eqe *eqe = &(eq->eqes[entry]);
  1677. return ((eqe->owner != ((eq->cons_index >> ilog2(GOLAN_NUM_EQES)) & 1)) ? NULL : eqe);
  1678. }
  1679. /**
  1680. * Poll event queue
  1681. *
  1682. * @v ibdev Infiniband device
  1683. */
  1684. static void golan_poll_eq(struct ib_device *ibdev)
  1685. {
  1686. struct golan *golan = ib_get_drvdata(ibdev);
  1687. struct golan_event_queue *eq = &(golan->eq);
  1688. struct golan_eqe *eqe;
  1689. u32 cqn;
  1690. int counter = 0;
  1691. while ((eqe = golan_next_eqe_sw(eq)) && (counter < GOLAN_NUM_EQES)) {
  1692. /*
  1693. * Make sure we read EQ entry contents after we've
  1694. * checked the ownership bit.
  1695. */
  1696. rmb();
  1697. DBGC( golan , "%s eqn %d, eqe type %s\n", __FUNCTION__, eq->eqn,
  1698. golan_eqe_type_str(eqe->type));
  1699. switch (eqe->type) {
  1700. case GOLAN_EVENT_TYPE_COMP:
  1701. /* We dont need to handle completion events since we
  1702. * poll all the CQs after polling the EQ */
  1703. break;
  1704. case GOLAN_EVENT_TYPE_PATH_MIG:
  1705. case GOLAN_EVENT_TYPE_COMM_EST:
  1706. case GOLAN_EVENT_TYPE_SQ_DRAINED:
  1707. case GOLAN_EVENT_TYPE_SRQ_LAST_WQE:
  1708. case GOLAN_EVENT_TYPE_WQ_CATAS_ERROR:
  1709. case GOLAN_EVENT_TYPE_PATH_MIG_FAILED:
  1710. case GOLAN_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
  1711. case GOLAN_EVENT_TYPE_WQ_ACCESS_ERROR:
  1712. case GOLAN_EVENT_TYPE_SRQ_RQ_LIMIT:
  1713. case GOLAN_EVENT_TYPE_SRQ_CATAS_ERROR:
  1714. DBGC( golan , "%s event %s(%d) arrived\n", __FUNCTION__,
  1715. golan_eqe_type_str(eqe->type), eqe->type);
  1716. break;
  1717. case GOLAN_EVENT_TYPE_CMD:
  1718. // golan_cmd_comp_handler(be32_to_cpu(eqe->data.cmd.vector));
  1719. break;
  1720. case GOLAN_EVENT_TYPE_PORT_CHANGE:
  1721. golan_handle_port_event(golan, eqe);
  1722. break;
  1723. case GOLAN_EVENT_TYPE_CQ_ERROR:
  1724. cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
  1725. DBGC (golan ,"CQ error on CQN 0x%x, syndrom 0x%x\n",
  1726. cqn, eqe->data.cq_err.syndrome);
  1727. // mlx5_cq_event(dev, cqn, eqe->type);
  1728. break;
  1729. /*
  1730. * currently the driver do not support dynamic memory request
  1731. * during FW run, a follow up change will allocate FW pages once and
  1732. * never release them till driver shutdown, this change will not support
  1733. * this request as currently this request is not issued anyway.
  1734. case GOLAN_EVENT_TYPE_PAGE_REQUEST:
  1735. {
  1736. // we should check if we get this event while we
  1737. // waiting for a command
  1738. u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
  1739. s16 npages = be16_to_cpu(eqe->data.req_pages.num_pages);
  1740. DBGC (golan ,"%s page request for func 0x%x, napges %d\n",
  1741. __FUNCTION__, func_id, npages);
  1742. golan_provide_pages(golan, npages, func_id);
  1743. }
  1744. break;
  1745. */
  1746. default:
  1747. DBGC (golan ,"%s Unhandled event 0x%x on EQ 0x%x\n", __FUNCTION__,
  1748. eqe->type, eq->eqn);
  1749. break;
  1750. }
  1751. ++eq->cons_index;
  1752. golan_eq_update_ci(eq, GOLAN_EQ_UNARMED);
  1753. ++counter;
  1754. }
  1755. }
  1756. /**
  1757. * Attach to multicast group
  1758. *
  1759. * @v ibdev Infiniband device
  1760. * @v qp Queue pair
  1761. * @v gid Multicast GID
  1762. * @ret rc Return status code
  1763. */
  1764. static int golan_mcast_attach(struct ib_device *ibdev,
  1765. struct ib_queue_pair *qp,
  1766. union ib_gid *gid)
  1767. {
  1768. struct golan *golan = ib_get_drvdata(ibdev);
  1769. struct golan_cmd_layout *cmd;
  1770. int rc;
  1771. if ( qp == NULL ) {
  1772. DBGC( golan, "%s: Invalid pointer, could not attach QPN to MCG\n",
  1773. __FUNCTION__ );
  1774. return -EFAULT;
  1775. }
  1776. cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_ATTACH_TO_MCG, 0x0,
  1777. GEN_MBOX, NO_MBOX,
  1778. sizeof(struct golan_attach_mcg_mbox_in),
  1779. sizeof(struct golan_attach_mcg_mbox_out));
  1780. ((struct golan_attach_mcg_mbox_in *)(cmd->in))->qpn = cpu_to_be32(qp->qpn);
  1781. memcpy(GET_INBOX(golan, GEN_MBOX), gid, sizeof(*gid));
  1782. rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__);
  1783. GOLAN_CHECK_RC_AND_CMD_STATUS( err_attach_to_mcg_cmd );
  1784. DBGC( golan , "%s: QPN 0x%lx was attached to MCG\n", __FUNCTION__, qp->qpn);
  1785. return 0;
  1786. err_attach_to_mcg_cmd:
  1787. DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
  1788. return rc;
  1789. }
  1790. /**
  1791. * Detach from multicast group
  1792. *
  1793. * @v ibdev Infiniband device
  1794. * @v qp Queue pair
  1795. * @v gid Multicast GID
  1796. * @ret rc Return status code
  1797. */
  1798. static void golan_mcast_detach(struct ib_device *ibdev,
  1799. struct ib_queue_pair *qp,
  1800. union ib_gid *gid)
  1801. {
  1802. struct golan *golan = ib_get_drvdata(ibdev);
  1803. struct golan_cmd_layout *cmd;
  1804. int rc;
  1805. cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DETACH_FROM_MCG, 0x0,
  1806. GEN_MBOX, NO_MBOX,
  1807. sizeof(struct golan_detach_mcg_mbox_in),
  1808. sizeof(struct golan_detach_mcg_mbox_out));
  1809. ((struct golan_detach_mcg_mbox_in *)(cmd->in))->qpn = cpu_to_be32(qp->qpn);
  1810. memcpy(GET_INBOX(golan, GEN_MBOX), gid, sizeof(*gid));
  1811. rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__);
  1812. GOLAN_PRINT_RC_AND_CMD_STATUS;
  1813. DBGC( golan , "%s: QPN 0x%lx was detached from MCG\n", __FUNCTION__, qp->qpn);
  1814. }
  1815. /**
  1816. * Inform embedded subnet management agent of a received MAD
  1817. *
  1818. * @v ibdev Infiniband device
  1819. * @v mad MAD
  1820. * @ret rc Return status code
  1821. */
  1822. static int golan_inform_sma(struct ib_device *ibdev,
  1823. union ib_mad *mad)
  1824. {
  1825. if (!ibdev || !mad) {
  1826. return 1;
  1827. }
  1828. return 0;
  1829. }
  1830. static int golan_register_ibdev(struct golan_port *port)
  1831. {
  1832. struct ib_device *ibdev = port->ibdev;
  1833. int rc;
  1834. golan_get_ib_info ( ibdev );
  1835. /* Register Infiniband device */
  1836. if ((rc = register_ibdev(ibdev)) != 0) {
  1837. DBG ( "%s port %d could not register IB device: (rc = %d)\n",
  1838. __FUNCTION__, ibdev->port, rc);
  1839. return rc;
  1840. }
  1841. port->netdev = ipoib_netdev( ibdev );
  1842. return 0;
  1843. }
  1844. static inline void golan_bring_down(struct golan *golan)
  1845. {
  1846. DBGC(golan, "%s: start\n", __FUNCTION__);
  1847. if (~golan->flags & GOLAN_OPEN) {
  1848. DBGC(golan, "%s: end (already closed)\n", __FUNCTION__);
  1849. return;
  1850. }
  1851. golan_destroy_mkey(golan);
  1852. golan_dealloc_pd(golan);
  1853. golan_destory_eq(golan);
  1854. golan_dealloc_uar(golan);
  1855. golan_teardown_hca(golan, GOLAN_TEARDOWN_GRACEFUL);
  1856. golan_handle_pages(golan, GOLAN_REG_PAGES , GOLAN_PAGES_TAKE);
  1857. golan_disable_hca(golan);
  1858. golan_cmd_uninit(golan);
  1859. golan->flags &= ~GOLAN_OPEN;
  1860. DBGC(golan, "%s: end\n", __FUNCTION__);
  1861. }
  1862. static int golan_set_link_speed ( struct golan *golan ){
  1863. mlx_status status;
  1864. int i = 0;
  1865. int utils_inited = 0;
  1866. if ( ! golan->utils ) {
  1867. utils_inited = 1;
  1868. status = init_mlx_utils ( & golan->utils, golan->pci );
  1869. MLX_CHECK_STATUS ( golan->pci, status, utils_init_err, "mlx_utils_init failed" );
  1870. }
  1871. for ( i = 0; i < golan->caps.num_ports; ++i ) {
  1872. status = mlx_set_link_speed ( golan->utils, i + 1, LINK_SPEED_IB, LINK_SPEED_SDR );
  1873. MLX_CHECK_STATUS ( golan->pci, status, set_link_speed_err, "mlx_set_link_speed failed" );
  1874. }
  1875. set_link_speed_err:
  1876. if ( utils_inited )
  1877. free_mlx_utils ( & golan->utils );
  1878. utils_init_err:
  1879. return status;
  1880. }
  1881. static inline int golan_bring_up(struct golan *golan)
  1882. {
  1883. int rc = 0;
  1884. DBGC(golan, "%s\n", __FUNCTION__);
  1885. if (golan->flags & GOLAN_OPEN)
  1886. return 0;
  1887. if (( rc = golan_cmd_init(golan) ))
  1888. goto out;
  1889. if (( rc = golan_core_enable_hca(golan) ))
  1890. goto cmd_uninit;
  1891. /* Query for need for boot pages */
  1892. if (( rc = golan_handle_pages(golan, GOLAN_BOOT_PAGES, GOLAN_PAGES_GIVE) ))
  1893. goto disable;
  1894. if (( rc = golan_qry_hca_cap(golan) ))
  1895. goto pages;
  1896. if (( rc = golan_set_hca_cap(golan) ))
  1897. goto pages;
  1898. if (( rc = golan_handle_pages(golan, GOLAN_INIT_PAGES, GOLAN_PAGES_GIVE) ))
  1899. goto pages;
  1900. if (( rc = golan_set_link_speed ( golan ) ))
  1901. goto pages_teardown;
  1902. //Reg Init?
  1903. if (( rc = golan_hca_init(golan) ))
  1904. goto pages_2;
  1905. if (( rc = golan_alloc_uar(golan) ))
  1906. goto teardown;
  1907. if (( rc = golan_create_eq(golan) ))
  1908. goto de_uar;
  1909. if (( rc = golan_alloc_pd(golan) ))
  1910. goto de_eq;
  1911. if (( rc = golan_create_mkey(golan) ))
  1912. goto de_pd;
  1913. golan->flags |= GOLAN_OPEN;
  1914. return 0;
  1915. golan_destroy_mkey(golan);
  1916. de_pd:
  1917. golan_dealloc_pd(golan);
  1918. de_eq:
  1919. golan_destory_eq(golan);
  1920. de_uar:
  1921. golan_dealloc_uar(golan);
  1922. teardown:
  1923. golan_teardown_hca(golan, GOLAN_TEARDOWN_GRACEFUL);
  1924. pages_2:
  1925. pages_teardown:
  1926. golan_handle_pages(golan, GOLAN_INIT_PAGES, GOLAN_PAGES_TAKE);
  1927. pages:
  1928. golan_handle_pages(golan, GOLAN_BOOT_PAGES, GOLAN_PAGES_TAKE);
  1929. disable:
  1930. golan_disable_hca(golan);
  1931. cmd_uninit:
  1932. golan_cmd_uninit(golan);
  1933. out:
  1934. return rc;
  1935. }
  1936. /**
  1937. * Close Infiniband link
  1938. *
  1939. * @v ibdev Infiniband device
  1940. */
  1941. static void golan_ib_close ( struct ib_device *ibdev ) {
  1942. struct golan *golan = NULL;
  1943. DBG ( "%s start\n", __FUNCTION__ );
  1944. if ( ! ibdev )
  1945. return;
  1946. golan = ib_get_drvdata ( ibdev );
  1947. golan_bring_down ( golan );
  1948. DBG ( "%s end\n", __FUNCTION__ );
  1949. }
  1950. /**
  1951. * Initialise Infiniband link
  1952. *
  1953. * @v ibdev Infiniband device
  1954. * @ret rc Return status code
  1955. */
  1956. static int golan_ib_open ( struct ib_device *ibdev ) {
  1957. struct golan *golan = NULL;
  1958. DBG ( "%s start\n", __FUNCTION__ );
  1959. if ( ! ibdev )
  1960. return -EINVAL;
  1961. golan = ib_get_drvdata ( ibdev );
  1962. golan_bring_up ( golan );
  1963. golan_ib_update ( ibdev );
  1964. DBG ( "%s end\n", __FUNCTION__ );
  1965. return 0;
  1966. }
  1967. /** Golan Infiniband operations */
  1968. static struct ib_device_operations golan_ib_operations = {
  1969. .create_cq = golan_create_cq,
  1970. .destroy_cq = golan_destroy_cq,
  1971. .create_qp = golan_create_qp,
  1972. .modify_qp = golan_modify_qp,
  1973. .destroy_qp = golan_destroy_qp,
  1974. .post_send = golan_post_send,
  1975. .post_recv = golan_post_recv,
  1976. .poll_cq = golan_poll_cq,
  1977. .poll_eq = golan_poll_eq,
  1978. .open = golan_ib_open,
  1979. .close = golan_ib_close,
  1980. .mcast_attach = golan_mcast_attach,
  1981. .mcast_detach = golan_mcast_detach,
  1982. .set_port_info = golan_inform_sma,
  1983. .set_pkey_table = golan_inform_sma,
  1984. };
  1985. static int golan_probe_normal ( struct pci_device *pci ) {
  1986. struct golan *golan;
  1987. struct ib_device *ibdev;
  1988. struct golan_port *port;
  1989. int i;
  1990. int rc = 0;
  1991. golan = golan_alloc();
  1992. if ( !golan ) {
  1993. rc = -ENOMEM;
  1994. goto err_golan_alloc;
  1995. }
  1996. /* at POST stage some BIOSes have limited available dynamic memory */
  1997. if ( golan_init_fw_areas ( golan ) ) {
  1998. rc = -ENOMEM;
  1999. goto err_golan_golan_init_pages;
  2000. }
  2001. /* Setup PCI bus and HCA BAR */
  2002. pci_set_drvdata( pci, golan );
  2003. golan->pci = pci;
  2004. golan_pci_init( golan );
  2005. /* config command queues */
  2006. if ( golan_bring_up( golan ) ) {
  2007. DBGC (golan ,"golan bringup failed\n");
  2008. rc = -1;
  2009. goto err_golan_bringup;
  2010. }
  2011. if ( ! DEVICE_IS_CIB ( pci->device ) ) {
  2012. if ( init_mlx_utils ( & golan->utils, pci ) ) {
  2013. rc = -1;
  2014. goto err_utils_init;
  2015. }
  2016. }
  2017. /* Allocate Infiniband devices */
  2018. for (i = 0; i < golan->caps.num_ports; ++i) {
  2019. ibdev = alloc_ibdev( 0 );
  2020. if ( !ibdev ) {
  2021. rc = -ENOMEM;
  2022. goto err_golan_probe_alloc_ibdev;
  2023. }
  2024. golan->ports[i].ibdev = ibdev;
  2025. golan->ports[i].vep_number = 0;
  2026. ibdev->op = &golan_ib_operations;
  2027. ibdev->dev = &pci->dev;
  2028. ibdev->port = (GOLAN_PORT_BASE + i);
  2029. ib_set_drvdata( ibdev, golan );
  2030. }
  2031. /* Register devices */
  2032. for ( i = 0; i < golan->caps.num_ports; ++i ) {
  2033. port = &golan->ports[i];
  2034. if ((rc = golan_register_ibdev ( port ) ) != 0 ) {
  2035. goto err_golan_probe_register_ibdev;
  2036. }
  2037. }
  2038. golan_bring_down ( golan );
  2039. return 0;
  2040. i = golan->caps.num_ports;
  2041. err_golan_probe_register_ibdev:
  2042. for ( i-- ; ( signed int ) i >= 0 ; i-- )
  2043. unregister_ibdev ( golan->ports[i].ibdev );
  2044. i = golan->caps.num_ports;
  2045. err_golan_probe_alloc_ibdev:
  2046. for ( i-- ; ( signed int ) i >= 0 ; i-- )
  2047. ibdev_put ( golan->ports[i].ibdev );
  2048. if ( ! DEVICE_IS_CIB ( pci->device ) ) {
  2049. free_mlx_utils ( & golan->utils );
  2050. }
  2051. err_utils_init:
  2052. golan_bring_down ( golan );
  2053. err_golan_bringup:
  2054. iounmap( golan->iseg );
  2055. golan_free_fw_areas ( golan );
  2056. err_golan_golan_init_pages:
  2057. free ( golan );
  2058. err_golan_alloc:
  2059. DBGC (golan ,"%s rc = %d\n", __FUNCTION__, rc);
  2060. return rc;
  2061. }
  2062. static void golan_remove_normal ( struct pci_device *pci ) {
  2063. struct golan *golan = pci_get_drvdata(pci);
  2064. struct golan_port *port;
  2065. int i;
  2066. DBGC(golan, "%s\n", __FUNCTION__);
  2067. for ( i = ( golan->caps.num_ports - 1 ) ; i >= 0 ; i-- ) {
  2068. port = &golan->ports[i];
  2069. unregister_ibdev ( port->ibdev );
  2070. }
  2071. for ( i = ( golan->caps.num_ports - 1 ) ; i >= 0 ; i-- ) {
  2072. netdev_nullify ( golan->ports[i].netdev );
  2073. }
  2074. for ( i = ( golan->caps.num_ports - 1 ) ; i >= 0 ; i-- ) {
  2075. ibdev_put ( golan->ports[i].ibdev );
  2076. }
  2077. if ( ! DEVICE_IS_CIB ( pci->device ) ) {
  2078. free_mlx_utils ( & golan->utils );
  2079. }
  2080. iounmap( golan->iseg );
  2081. golan_free_fw_areas ( golan );
  2082. free(golan);
  2083. }
  2084. /***************************************************************************
  2085. * NODNIC operations
  2086. **************************************************************************/
  2087. static mlx_status shomron_tx_uar_send_db ( struct ib_device *ibdev,
  2088. struct nodnic_send_wqbb *wqbb ) {
  2089. mlx_status status = MLX_SUCCESS;
  2090. struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
  2091. struct shomron_nodnic_eth_send_wqe *eth_wqe =
  2092. ( struct shomron_nodnic_eth_send_wqe * )wqbb;
  2093. struct shomronprm_wqe_segment_ctrl_send *ctrl;
  2094. if ( ! eth_wqe || ! flexboot_nodnic->device_priv.uar.virt ) {
  2095. DBG("%s: Invalid parameters\n",__FUNCTION__);
  2096. status = MLX_FAILED;
  2097. goto err;
  2098. }
  2099. wmb();
  2100. ctrl = & eth_wqe->ctrl;
  2101. writeq(*((__be64 *)ctrl), flexboot_nodnic->device_priv.uar.virt +
  2102. ( ( MLX_GET ( ctrl, wqe_index ) & 0x1 ) ? DB_BUFFER0_ODD_OFFSET
  2103. : DB_BUFFER0_EVEN_OFFSET ) );
  2104. err:
  2105. return status;
  2106. }
  2107. static mlx_status shomron_fill_eth_send_wqe ( struct ib_device *ibdev,
  2108. struct ib_queue_pair *qp, struct ib_address_vector *av __unused,
  2109. struct io_buffer *iobuf, struct nodnic_send_wqbb *wqbb,
  2110. unsigned long wqe_index ) {
  2111. mlx_status status = MLX_SUCCESS;
  2112. struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
  2113. struct shomron_nodnic_eth_send_wqe *eth_wqe = NULL;
  2114. struct flexboot_nodnic_port *port = &flexboot_nodnic->port[ibdev->port - 1];
  2115. struct flexboot_nodnic_queue_pair *flexboot_nodnic_qp =
  2116. ib_qp_get_drvdata ( qp );
  2117. nodnic_qp *nodnic_qp = flexboot_nodnic_qp->nodnic_queue_pair;
  2118. struct nodnic_send_ring *send_ring = &nodnic_qp->send;
  2119. mlx_uint32 qpn = 0;
  2120. eth_wqe = (struct shomron_nodnic_eth_send_wqe *)wqbb;
  2121. memset ( ( ( ( void * ) eth_wqe ) ), 0,
  2122. ( sizeof ( *eth_wqe ) ) );
  2123. status = nodnic_port_get_qpn(&port->port_priv, &send_ring->nodnic_ring,
  2124. &qpn);
  2125. if ( status != MLX_SUCCESS ) {
  2126. DBG("nodnic_port_get_qpn failed\n");
  2127. goto err;
  2128. }
  2129. #define SHOMRON_GENERATE_CQE 0x3
  2130. #define SHOMRON_INLINE_HEADERS_SIZE 18
  2131. #define SHOMRON_INLINE_HEADERS_OFFSET 32
  2132. MLX_FILL_2 ( &eth_wqe->ctrl, 0, opcode, FLEXBOOT_NODNIC_OPCODE_SEND,
  2133. wqe_index, wqe_index & 0xFFFF);
  2134. MLX_FILL_2 ( &eth_wqe->ctrl, 1, ds, 0x4 , qpn, qpn );
  2135. MLX_FILL_1 ( &eth_wqe->ctrl, 2,
  2136. ce, SHOMRON_GENERATE_CQE /* generate completion */
  2137. );
  2138. MLX_FILL_2 ( &eth_wqe->ctrl, 7,
  2139. inline_headers1,
  2140. cpu_to_be16(*(mlx_uint16 *)iobuf->data),
  2141. inline_headers_size, SHOMRON_INLINE_HEADERS_SIZE
  2142. );
  2143. memcpy((void *)&eth_wqe->ctrl + SHOMRON_INLINE_HEADERS_OFFSET,
  2144. iobuf->data + 2, SHOMRON_INLINE_HEADERS_SIZE - 2);
  2145. iob_pull(iobuf, SHOMRON_INLINE_HEADERS_SIZE);
  2146. MLX_FILL_1 ( &eth_wqe->data[0], 0,
  2147. byte_count, iob_len ( iobuf ) );
  2148. MLX_FILL_1 ( &eth_wqe->data[0], 1, l_key,
  2149. flexboot_nodnic->device_priv.lkey );
  2150. MLX_FILL_H ( &eth_wqe->data[0], 2,
  2151. local_address_h, virt_to_bus ( iobuf->data ) );
  2152. MLX_FILL_1 ( &eth_wqe->data[0], 3,
  2153. local_address_l, virt_to_bus ( iobuf->data ) );
  2154. err:
  2155. return status;
  2156. }
  2157. static mlx_status shomron_fill_completion( void *cqe, struct cqe_data *cqe_data ) {
  2158. union shomronprm_completion_entry *cq_entry;
  2159. uint32_t opcode;
  2160. cq_entry = (union shomronprm_completion_entry *)cqe;
  2161. cqe_data->owner = MLX_GET ( &cq_entry->normal, owner );
  2162. opcode = MLX_GET ( &cq_entry->normal, opcode );
  2163. #define FLEXBOOT_NODNIC_OPCODE_CQ_SEND 0
  2164. #define FLEXBOOT_NODNIC_OPCODE_CQ_RECV 2
  2165. #define FLEXBOOT_NODNIC_OPCODE_CQ_SEND_ERR 13
  2166. #define FLEXBOOT_NODNIC_OPCODE_CQ_RECV_ERR 14
  2167. cqe_data->is_error =
  2168. ( opcode >= FLEXBOOT_NODNIC_OPCODE_CQ_RECV_ERR);
  2169. if ( cqe_data->is_error ) {
  2170. cqe_data->syndrome = MLX_GET ( &cq_entry->error, syndrome );
  2171. cqe_data->vendor_err_syndrome =
  2172. MLX_GET ( &cq_entry->error, vendor_error_syndrome );
  2173. cqe_data->is_send =
  2174. (opcode == FLEXBOOT_NODNIC_OPCODE_CQ_SEND_ERR);
  2175. } else {
  2176. cqe_data->is_send =
  2177. (opcode == FLEXBOOT_NODNIC_OPCODE_CQ_SEND);
  2178. cqe_data->wqe_counter = MLX_GET ( &cq_entry->normal, wqe_counter );
  2179. cqe_data->byte_cnt = MLX_GET ( &cq_entry->normal, byte_cnt );
  2180. }
  2181. if ( cqe_data->is_send == TRUE )
  2182. cqe_data->qpn = MLX_GET ( &cq_entry->normal, qpn );
  2183. else
  2184. cqe_data->qpn = MLX_GET ( &cq_entry->normal, srqn );
  2185. return 0;
  2186. }
  2187. static mlx_status shomron_cqe_set_owner ( void *cq, unsigned int num_cqes ) {
  2188. unsigned int i = 0;
  2189. union shomronprm_completion_entry *cq_list;
  2190. cq_list = (union shomronprm_completion_entry *)cq;
  2191. for ( ; i < num_cqes ; i++ )
  2192. MLX_FILL_1 ( &cq_list[i].normal, 15, owner, 1 );
  2193. return 0;
  2194. }
  2195. static mlx_size shomron_get_cqe_size () {
  2196. return sizeof ( union shomronprm_completion_entry );
  2197. }
  2198. struct flexboot_nodnic_callbacks shomron_nodnic_callbacks = {
  2199. .get_cqe_size = shomron_get_cqe_size,
  2200. .fill_send_wqe[IB_QPT_ETH] = shomron_fill_eth_send_wqe,
  2201. .fill_completion = shomron_fill_completion,
  2202. .cqe_set_owner = shomron_cqe_set_owner,
  2203. .irq = flexboot_nodnic_eth_irq,
  2204. .tx_uar_send_doorbell_fn = shomron_tx_uar_send_db,
  2205. };
  2206. static int shomron_nodnic_is_supported ( struct pci_device *pci ) {
  2207. if ( DEVICE_IS_CIB ( pci->device ) )
  2208. return 0;
  2209. return flexboot_nodnic_is_supported ( pci );
  2210. }
  2211. /**************************************************************************/
  2212. static int golan_probe ( struct pci_device *pci ) {
  2213. int rc = -ENOTSUP;
  2214. DBG ( "%s: start\n", __FUNCTION__ );
  2215. if ( ! pci ) {
  2216. DBG ( "%s: PCI is NULL\n", __FUNCTION__ );
  2217. rc = -EINVAL;
  2218. goto probe_done;
  2219. }
  2220. if ( shomron_nodnic_is_supported ( pci ) ) {
  2221. DBG ( "%s: Using NODNIC driver\n", __FUNCTION__ );
  2222. rc = flexboot_nodnic_probe ( pci, &shomron_nodnic_callbacks, NULL );
  2223. } else {
  2224. DBG ( "%s: Using normal driver\n", __FUNCTION__ );
  2225. rc = golan_probe_normal ( pci );
  2226. }
  2227. probe_done:
  2228. DBG ( "%s: rc = %d\n", __FUNCTION__, rc );
  2229. return rc;
  2230. }
  2231. static void golan_remove ( struct pci_device *pci ) {
  2232. DBG ( "%s: start\n", __FUNCTION__ );
  2233. if ( ! shomron_nodnic_is_supported ( pci ) ) {
  2234. DBG ( "%s: Using normal driver remove\n", __FUNCTION__ );
  2235. golan_remove_normal ( pci );
  2236. return;
  2237. }
  2238. DBG ( "%s: Using NODNIC driver remove\n", __FUNCTION__ );
  2239. flexboot_nodnic_remove ( pci );
  2240. DBG ( "%s: end\n", __FUNCTION__ );
  2241. }
  2242. static struct pci_device_id golan_nics[] = {
  2243. PCI_ROM ( 0x15b3, 0x1011, "ConnectIB", "ConnectIB HCA driver: DevID 4113", 0 ),
  2244. PCI_ROM ( 0x15b3, 0x1013, "ConnectX-4", "ConnectX-4 HCA driver, DevID 4115", 0 ),
  2245. PCI_ROM ( 0x15b3, 0x1015, "ConnectX-4Lx", "ConnectX-4Lx HCA driver, DevID 4117", 0 ),
  2246. PCI_ROM ( 0x15b3, 0x1017, "ConnectX-5", "ConnectX-5 HCA driver, DevID 4119", 0 ),
  2247. PCI_ROM ( 0x15b3, 0x1019, "ConnectX-5EX", "ConnectX-5EX HCA driver, DevID 4121", 0 ),
  2248. PCI_ROM ( 0x15b3, 0x101b, "ConnectX-6", "ConnectX-6 HCA driver, DevID 4123", 0 ),
  2249. PCI_ROM ( 0x15b3, 0x101d, "ConnectX-6DX", "ConnectX-6DX HCA driver, DevID 4125", 0 ),
  2250. PCI_ROM ( 0x15b3, 0xa2d2, "BlueField", "BlueField integrated ConnectX-5 network controller HCA driver, DevID 41682", 0 ),
  2251. };
  2252. struct pci_driver golan_driver __pci_driver = {
  2253. .ids = golan_nics,
  2254. .id_count = (sizeof(golan_nics) / sizeof(golan_nics[0])),
  2255. .probe = golan_probe,
  2256. .remove = golan_remove,
  2257. };