You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

ib_mt25218.c 53KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929
  1. /*
  2. This software is available to you under a choice of one of two
  3. licenses. You may choose to be licensed under the terms of the GNU
  4. General Public License (GPL) Version 2, available at
  5. <http://www.fsf.org/copyleft/gpl.html>, or the OpenIB.org BSD
  6. license, available in the LICENSE.TXT file accompanying this
  7. software. These details are also available at
  8. <http://openib.org/license.html>.
  9. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  10. EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  11. MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  12. NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  13. BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  14. ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  15. CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  16. SOFTWARE.
  17. Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
  18. */
  19. #include "mt25218.h"
  20. #include "ib_driver.h"
  21. #include <gpxe/pci.h>
  22. #define MOD_INC(counter, max_count) (counter) = ((counter)+1) & ((max_count) - 1)
  23. #define breakpoint {volatile __u32 *p=(__u32 *)0x1234;printf("breakpoint\n");do {} while((*p) != 0x1234);}
  24. #define WRITE_BYTE_VOL(addr, off, val) \
  25. do { \
  26. (*((volatile __u8 *)(((volatile __u8 *)(addr)) + off))) = (val); \
  27. } while(0)
  28. #define WRITE_WORD_VOL(addr, off, val) \
  29. do { \
  30. (*((volatile __u16 *)(((volatile __u8 *)(addr)) + off))) = (val); \
  31. } while(0)
  32. #define WRITE_DWORD_VOL(addr, off, val) \
  33. do { \
  34. (*((volatile __u32 *)(((volatile __u8 *)(addr)) + off))) = (val); \
  35. } while(0)
  36. struct device_buffers_st {
  37. /* inprm and outprm do not have alignnemet constraint sice that
  38. is acheived programatically */
  39. u8 inprm_buf[INPRM_BUF_SZ];
  40. u8 outprm_buf[OUTPRM_BUF_SZ];
  41. union recv_wqe_u mads_qp_rcv_queue[NUM_MADS_RCV_WQES]
  42. __attribute__ ((aligned(RECV_WQE_U_ALIGN)));
  43. union recv_wqe_u ipoib_qp_rcv_queue[NUM_IPOIB_RCV_WQES]
  44. __attribute__ ((aligned(RECV_WQE_U_ALIGN)));
  45. union ud_send_wqe_u mads_qp_snd_queue[NUM_MADS_SND_WQES]
  46. __attribute__ ((aligned(UD_SEND_WQE_U_ALIGN)));
  47. union ud_send_wqe_u ipoib_qp_snd_queue[NUM_IPOIB_SND_WQES]
  48. __attribute__ ((aligned(UD_SEND_WQE_U_ALIGN)));
  49. struct eqe_t eq_buf[1 << LOG2_EQ_SZ]
  50. __attribute__ ((aligned(sizeof(struct eqe_t))));
  51. union cqe_st mads_snd_cq_buf[NUM_MADS_SND_CQES]
  52. __attribute__ ((aligned(sizeof(union cqe_st))));
  53. union cqe_st ipoib_snd_cq_buf[NUM_IPOIB_SND_CQES]
  54. __attribute__ ((aligned(sizeof(union cqe_st))));
  55. union cqe_st mads_rcv_cq_buf[NUM_MADS_RCV_CQES]
  56. __attribute__ ((aligned(sizeof(union cqe_st))));
  57. union cqe_st ipoib_rcv_cq_buf[NUM_IPOIB_RCV_CQES]
  58. __attribute__ ((aligned(sizeof(union cqe_st))));
  59. union ud_av_u av_array[NUM_AVS];
  60. } __attribute__ ((packed));
  61. #define STRUCT_ALIGN_SZ 4096
  62. #define SRC_BUF_SZ (sizeof(struct device_buffers_st) + STRUCT_ALIGN_SZ - 1)
  63. /* the following must be kept in this order
  64. for the memory region to cover the buffers */
  65. static u8 src_buf[SRC_BUF_SZ];
  66. static struct ib_buffers_st ib_buffers;
  67. static __u32 memreg_size;
  68. /* end of order constraint */
  69. struct phys_mem_desc_st {
  70. unsigned long base;
  71. unsigned long offset;
  72. };
  73. static struct phys_mem_desc_st phys_mem;
  74. static struct dev_pci_struct memfree_pci_dev;
  75. static struct device_buffers_st *dev_buffers_p;
  76. static struct device_ib_data_st dev_ib_data;
  77. static int gw_write_cr(__u32 addr, __u32 data)
  78. {
  79. writel(htonl(data), memfree_pci_dev.cr_space + addr);
  80. return 0;
  81. }
  82. static int gw_read_cr(__u32 addr, __u32 * result)
  83. {
  84. *result = ntohl(readl(memfree_pci_dev.cr_space + addr));
  85. return 0;
  86. }
  87. static int reset_hca(void)
  88. {
  89. return gw_write_cr(MEMFREE_RESET_OFFSET, 1);
  90. }
  91. static int ib_device_init(struct pci_device *dev)
  92. {
  93. int i;
  94. int rc;
  95. tprintf("");
  96. memset(&dev_ib_data, 0, sizeof dev_ib_data);
  97. /* save bars */
  98. tprintf("bus=%d devfn=0x%x", dev->bus, dev->devfn);
  99. for (i = 0; i < 6; ++i) {
  100. memfree_pci_dev.dev.bar[i] =
  101. pci_bar_start(dev, PCI_BASE_ADDRESS_0 + (i << 2));
  102. tprintf("bar[%d]= 0x%08lx", i, memfree_pci_dev.dev.bar[i]);
  103. }
  104. tprintf("");
  105. /* save config space */
  106. for (i = 0; i < 64; ++i) {
  107. rc = pci_read_config_dword(dev, i << 2,
  108. &memfree_pci_dev.dev.
  109. dev_config_space[i]);
  110. if (rc) {
  111. eprintf("");
  112. return rc;
  113. }
  114. tprintf("config[%d]= 0x%08lx", i << 2,
  115. memfree_pci_dev.dev.dev_config_space[i]);
  116. }
  117. tprintf("");
  118. memfree_pci_dev.dev.dev = dev;
  119. /* map cr-space */
  120. memfree_pci_dev.cr_space =
  121. ioremap(memfree_pci_dev.dev.bar[0], 0x100000);
  122. if (!memfree_pci_dev.cr_space) {
  123. eprintf("");
  124. return -1;
  125. }
  126. /* map uar */
  127. memfree_pci_dev.uar =
  128. ioremap(memfree_pci_dev.dev.bar[2] + UAR_IDX * 0x1000, 0x1000);
  129. if (!memfree_pci_dev.uar) {
  130. eprintf("");
  131. return -1;
  132. }
  133. tprintf("uar_base (pa:va) = 0x%lx 0x%lx",
  134. memfree_pci_dev.dev.bar[2] + UAR_IDX * 0x1000,
  135. memfree_pci_dev.uar);
  136. tprintf("");
  137. return 0;
  138. }
  139. static inline unsigned long lalign(unsigned long buf, unsigned long align)
  140. {
  141. return (unsigned long)((buf + align - 1) &
  142. (~(((unsigned long)align) - 1)));
  143. }
  144. static int init_dev_data(void)
  145. {
  146. unsigned long tmp;
  147. unsigned long reserve_size = 32 * 1024 * 1024;
  148. tmp = lalign(virt_to_bus(src_buf), STRUCT_ALIGN_SZ);
  149. dev_buffers_p = bus_to_virt(tmp);
  150. memreg_size = (__u32) (&memreg_size) - (__u32) dev_buffers_p;
  151. tprintf("src_buf=0x%lx, dev_buffers_p=0x%lx, memreg_size=0x%x", src_buf,
  152. dev_buffers_p, memreg_size);
  153. tprintf("inprm: va=0x%lx, pa=0x%lx", dev_buffers_p->inprm_buf,
  154. virt_to_bus(dev_buffers_p->inprm_buf));
  155. tprintf("outprm: va=0x%lx, pa=0x%lx", dev_buffers_p->outprm_buf,
  156. virt_to_bus(dev_buffers_p->outprm_buf));
  157. phys_mem.base =
  158. (virt_to_phys(_text) - reserve_size) & (~(reserve_size - 1));
  159. phys_mem.offset = 0;
  160. return 0;
  161. }
  162. static int restore_config(void)
  163. {
  164. int i;
  165. int rc;
  166. for (i = 0; i < 64; ++i) {
  167. if (i != 22 && i != 23) {
  168. rc = pci_write_config_dword(memfree_pci_dev.dev.dev,
  169. i << 2,
  170. memfree_pci_dev.dev.
  171. dev_config_space[i]);
  172. if (rc) {
  173. return rc;
  174. }
  175. }
  176. }
  177. return 0;
  178. }
  179. static void prep_init_hca_buf(struct init_hca_st *init_hca_p, void *buf)
  180. {
  181. unsigned long ptr;
  182. __u8 shift;
  183. memset(buf, 0, MT_STRUCT_SIZE(arbelprm_init_hca_st));
  184. ptr = (unsigned long)buf +
  185. MT_BYTE_OFFSET(arbelprm_init_hca_st,
  186. qpc_eec_cqc_eqc_rdb_parameters);
  187. shift = 32 - MT_BIT_SIZE(arbelprm_qpcbaseaddr_st, qpc_base_addr_l);
  188. INS_FLD(init_hca_p->qpc_base_addr_h, ptr, arbelprm_qpcbaseaddr_st,
  189. qpc_base_addr_h);
  190. INS_FLD(init_hca_p->qpc_base_addr_l >> shift, ptr,
  191. arbelprm_qpcbaseaddr_st, qpc_base_addr_l);
  192. INS_FLD(init_hca_p->log_num_of_qp, ptr, arbelprm_qpcbaseaddr_st,
  193. log_num_of_qp);
  194. shift = 32 - MT_BIT_SIZE(arbelprm_qpcbaseaddr_st, eec_base_addr_l);
  195. INS_FLD(init_hca_p->eec_base_addr_h, ptr, arbelprm_qpcbaseaddr_st,
  196. eec_base_addr_h);
  197. INS_FLD(init_hca_p->eec_base_addr_l >> shift, ptr,
  198. arbelprm_qpcbaseaddr_st, eec_base_addr_l);
  199. INS_FLD(init_hca_p->log_num_of_ee, ptr, arbelprm_qpcbaseaddr_st,
  200. log_num_of_ee);
  201. shift = 32 - MT_BIT_SIZE(arbelprm_qpcbaseaddr_st, srqc_base_addr_l);
  202. INS_FLD(init_hca_p->srqc_base_addr_h, ptr, arbelprm_qpcbaseaddr_st,
  203. srqc_base_addr_h);
  204. INS_FLD(init_hca_p->srqc_base_addr_l >> shift, ptr,
  205. arbelprm_qpcbaseaddr_st, srqc_base_addr_l);
  206. INS_FLD(init_hca_p->log_num_of_srq, ptr, arbelprm_qpcbaseaddr_st,
  207. log_num_of_srq);
  208. shift = 32 - MT_BIT_SIZE(arbelprm_qpcbaseaddr_st, cqc_base_addr_l);
  209. INS_FLD(init_hca_p->cqc_base_addr_h, ptr, arbelprm_qpcbaseaddr_st,
  210. cqc_base_addr_h);
  211. INS_FLD(init_hca_p->cqc_base_addr_l >> shift, ptr,
  212. arbelprm_qpcbaseaddr_st, cqc_base_addr_l);
  213. INS_FLD(init_hca_p->log_num_of_cq, ptr, arbelprm_qpcbaseaddr_st,
  214. log_num_of_cq);
  215. INS_FLD(init_hca_p->eqpc_base_addr_h, ptr, arbelprm_qpcbaseaddr_st,
  216. eqpc_base_addr_h);
  217. INS_FLD(init_hca_p->eqpc_base_addr_l, ptr, arbelprm_qpcbaseaddr_st,
  218. eqpc_base_addr_l);
  219. INS_FLD(init_hca_p->eeec_base_addr_h, ptr, arbelprm_qpcbaseaddr_st,
  220. eeec_base_addr_h);
  221. INS_FLD(init_hca_p->eeec_base_addr_l, ptr, arbelprm_qpcbaseaddr_st,
  222. eeec_base_addr_l);
  223. shift = 32 - MT_BIT_SIZE(arbelprm_qpcbaseaddr_st, eqc_base_addr_l);
  224. INS_FLD(init_hca_p->eqc_base_addr_h, ptr, arbelprm_qpcbaseaddr_st,
  225. eqc_base_addr_h);
  226. INS_FLD(init_hca_p->eqc_base_addr_l >> shift, ptr,
  227. arbelprm_qpcbaseaddr_st, eqc_base_addr_l);
  228. INS_FLD(init_hca_p->log_num_of_eq, ptr, arbelprm_qpcbaseaddr_st,
  229. log_num_eq);
  230. INS_FLD(init_hca_p->rdb_base_addr_h, ptr, arbelprm_qpcbaseaddr_st,
  231. rdb_base_addr_h);
  232. INS_FLD(init_hca_p->rdb_base_addr_l, ptr, arbelprm_qpcbaseaddr_st,
  233. rdb_base_addr_l);
  234. ptr = (unsigned long)buf +
  235. MT_BYTE_OFFSET(arbelprm_init_hca_st, multicast_parameters);
  236. INS_FLD(init_hca_p->mc_base_addr_h, ptr, arbelprm_multicastparam_st,
  237. mc_base_addr_h);
  238. INS_FLD(init_hca_p->mc_base_addr_l, ptr, arbelprm_multicastparam_st,
  239. mc_base_addr_l);
  240. INS_FLD(init_hca_p->log_mc_table_entry_sz, ptr,
  241. arbelprm_multicastparam_st, log_mc_table_entry_sz);
  242. INS_FLD(init_hca_p->mc_table_hash_sz, ptr, arbelprm_multicastparam_st,
  243. mc_table_hash_sz);
  244. INS_FLD(init_hca_p->log_mc_table_sz, ptr, arbelprm_multicastparam_st,
  245. log_mc_table_sz);
  246. ptr = (unsigned long)buf +
  247. MT_BYTE_OFFSET(arbelprm_init_hca_st, tpt_parameters);
  248. INS_FLD(init_hca_p->mpt_base_addr_h, ptr, arbelprm_tptparams_st,
  249. mpt_base_adr_h);
  250. INS_FLD(init_hca_p->mpt_base_addr_l, ptr, arbelprm_tptparams_st,
  251. mpt_base_adr_l);
  252. INS_FLD(init_hca_p->log_mpt_sz, ptr, arbelprm_tptparams_st, log_mpt_sz);
  253. INS_FLD(init_hca_p->mtt_base_addr_h, ptr, arbelprm_tptparams_st,
  254. mtt_base_addr_h);
  255. INS_FLD(init_hca_p->mtt_base_addr_l, ptr, arbelprm_tptparams_st,
  256. mtt_base_addr_l);
  257. ptr = (unsigned long)buf +
  258. MT_BYTE_OFFSET(arbelprm_init_hca_st, uar_parameters);
  259. INS_FLD(init_hca_p->log_max_uars, ptr, arbelprm_uar_params_st,
  260. log_max_uars);
  261. }
  262. static void prep_sw2hw_mpt_buf(void *buf, __u32 mkey)
  263. {
  264. INS_FLD(1, buf, arbelprm_mpt_st, lw);
  265. INS_FLD(1, buf, arbelprm_mpt_st, lr);
  266. INS_FLD(1, buf, arbelprm_mpt_st, pa);
  267. INS_FLD(1, buf, arbelprm_mpt_st, r_w);
  268. INS_FLD(mkey, buf, arbelprm_mpt_st, mem_key);
  269. INS_FLD(GLOBAL_PD, buf, arbelprm_mpt_st, pd);
  270. INS_FLD(virt_to_bus(dev_buffers_p), buf, arbelprm_mpt_st,
  271. start_address_l);
  272. INS_FLD(memreg_size, buf, arbelprm_mpt_st, reg_wnd_len_l);
  273. }
  274. static void prep_sw2hw_eq_buf(void *buf, struct eqe_t *eq_buf)
  275. {
  276. memset(buf, 0, MT_STRUCT_SIZE(arbelprm_eqc_st));
  277. INS_FLD(0xa, buf, arbelprm_eqc_st, st); /* fired */
  278. INS_FLD(virt_to_bus(eq_buf), buf, arbelprm_eqc_st, start_address_l);
  279. INS_FLD(LOG2_EQ_SZ, buf, arbelprm_eqc_st, log_eq_size);
  280. INS_FLD(GLOBAL_PD, buf, arbelprm_eqc_st, pd);
  281. INS_FLD(dev_ib_data.mkey, buf, arbelprm_eqc_st, lkey);
  282. }
  283. static void init_eq_buf(void *eq_buf)
  284. {
  285. struct eqe_t *eq = eq_buf;
  286. int i, num_eqes = 1 << LOG2_EQ_SZ;
  287. memset(eq, 0, num_eqes * sizeof eq[0]);
  288. for (i = 0; i < num_eqes; ++i)
  289. WRITE_BYTE_VOL(&eq[i], EQE_OWNER_OFFSET, EQE_OWNER_VAL_HW);
  290. }
  291. static void prep_init_ib_buf(void *buf)
  292. {
  293. memset(buf, 0, MT_STRUCT_SIZE(arbelprm_init_ib_st));
  294. INS_FLD(MTU_2048, buf, arbelprm_init_ib_st, mtu_cap);
  295. INS_FLD(3, buf, arbelprm_init_ib_st, port_width_cap);
  296. INS_FLD(1, buf, arbelprm_init_ib_st, vl_cap);
  297. INS_FLD(1, buf, arbelprm_init_ib_st, max_gid);
  298. INS_FLD(64, buf, arbelprm_init_ib_st, max_pkey);
  299. }
  300. static void prep_sw2hw_cq_buf(void *buf, __u8 eqn,
  301. __u32 cqn,
  302. union cqe_st *cq_buf,
  303. __u32 cq_ci_db_record, __u32 cq_state_db_record)
  304. {
  305. memset(buf, 0, MT_STRUCT_SIZE(arbelprm_completion_queue_context_st));
  306. INS_FLD(0xA, buf, arbelprm_completion_queue_context_st, st);
  307. INS_FLD(virt_to_bus(cq_buf), buf, arbelprm_completion_queue_context_st,
  308. start_address_l);
  309. INS_FLD(LOG2_CQ_SZ, buf, arbelprm_completion_queue_context_st,
  310. log_cq_size);
  311. INS_FLD(dev_ib_data.uar_idx, buf, arbelprm_completion_queue_context_st,
  312. usr_page);
  313. INS_FLD(eqn, buf, arbelprm_completion_queue_context_st, c_eqn);
  314. INS_FLD(GLOBAL_PD, buf, arbelprm_completion_queue_context_st, pd);
  315. INS_FLD(dev_ib_data.mkey, buf, arbelprm_completion_queue_context_st,
  316. l_key);
  317. INS_FLD(cqn, buf, arbelprm_completion_queue_context_st, cqn);
  318. INS_FLD(cq_ci_db_record, buf, arbelprm_completion_queue_context_st,
  319. cq_ci_db_record);
  320. INS_FLD(cq_state_db_record, buf, arbelprm_completion_queue_context_st,
  321. cq_state_db_record);
  322. }
  323. static void prep_rst2init_qpee_buf(void *buf,
  324. __u32 snd_cqn,
  325. __u32 rcv_cqn,
  326. __u32 qkey,
  327. __u32 log_rq_size,
  328. __u32 log_rq_stride,
  329. __u32 log_sq_size,
  330. __u32 log_sq_stride,
  331. __u32 snd_wqe_base_adr_l,
  332. __u32 snd_db_record_index,
  333. __u32 rcv_wqe_base_adr_l,
  334. __u32 rcv_db_record_index)
  335. {
  336. void *tmp;
  337. int shift;
  338. struct qp_ee_state_tarnisition_st *prm = buf;
  339. memset(buf, 0, sizeof *prm);
  340. tprintf("snd_cqn=0x%lx", snd_cqn);
  341. tprintf("rcv_cqn=0x%lx", rcv_cqn);
  342. tprintf("qkey=0x%lx", qkey);
  343. tprintf("log_rq_size=0x%lx", log_rq_size);
  344. tprintf("log_rq_stride=0x%lx", log_rq_stride);
  345. tprintf("log_sq_size=0x%lx", log_sq_size);
  346. tprintf("log_sq_stride=0x%lx", log_sq_stride);
  347. tprintf("snd_wqe_base_adr_l=0x%lx", snd_wqe_base_adr_l);
  348. tprintf("snd_db_record_index=0x%lx", snd_db_record_index);
  349. tprintf("rcv_wqe_base_adr_l=0x%lx", rcv_wqe_base_adr_l);
  350. tprintf("rcv_db_record_index=0x%lx", rcv_db_record_index);
  351. tmp = &prm->ctx;
  352. INS_FLD(TS_UD, tmp, arbelprm_queue_pair_ee_context_entry_st, st);
  353. INS_FLD(PM_STATE_MIGRATED, tmp, arbelprm_queue_pair_ee_context_entry_st,
  354. pm_state);
  355. INS_FLD(1, tmp, arbelprm_queue_pair_ee_context_entry_st, de);
  356. INS_FLD(MTU_2048, tmp, arbelprm_queue_pair_ee_context_entry_st, mtu);
  357. INS_FLD(11, tmp, arbelprm_queue_pair_ee_context_entry_st, msg_max);
  358. INS_FLD(log_rq_size, tmp, arbelprm_queue_pair_ee_context_entry_st,
  359. log_rq_size);
  360. INS_FLD(log_rq_stride, tmp, arbelprm_queue_pair_ee_context_entry_st,
  361. log_rq_stride);
  362. INS_FLD(log_sq_size, tmp, arbelprm_queue_pair_ee_context_entry_st,
  363. log_sq_size);
  364. INS_FLD(log_sq_stride, tmp, arbelprm_queue_pair_ee_context_entry_st,
  365. log_sq_stride);
  366. INS_FLD(dev_ib_data.uar_idx, tmp,
  367. arbelprm_queue_pair_ee_context_entry_st, usr_page);
  368. INS_FLD(GLOBAL_PD, tmp, arbelprm_queue_pair_ee_context_entry_st, pd);
  369. INS_FLD(dev_ib_data.mkey, tmp, arbelprm_queue_pair_ee_context_entry_st,
  370. wqe_lkey);
  371. INS_FLD(1, tmp, arbelprm_queue_pair_ee_context_entry_st, ssc);
  372. INS_FLD(snd_cqn, tmp, arbelprm_queue_pair_ee_context_entry_st, cqn_snd);
  373. shift =
  374. 32 - MT_BIT_SIZE(arbelprm_queue_pair_ee_context_entry_st,
  375. snd_wqe_base_adr_l);
  376. INS_FLD(snd_wqe_base_adr_l >> shift, tmp,
  377. arbelprm_queue_pair_ee_context_entry_st, snd_wqe_base_adr_l);
  378. INS_FLD(snd_db_record_index, tmp,
  379. arbelprm_queue_pair_ee_context_entry_st, snd_db_record_index);
  380. INS_FLD(1, tmp, arbelprm_queue_pair_ee_context_entry_st, rsc);
  381. INS_FLD(rcv_cqn, tmp, arbelprm_queue_pair_ee_context_entry_st, cqn_rcv);
  382. shift =
  383. 32 - MT_BIT_SIZE(arbelprm_queue_pair_ee_context_entry_st,
  384. rcv_wqe_base_adr_l);
  385. INS_FLD(rcv_wqe_base_adr_l >> shift, tmp,
  386. arbelprm_queue_pair_ee_context_entry_st, rcv_wqe_base_adr_l);
  387. INS_FLD(rcv_db_record_index, tmp,
  388. arbelprm_queue_pair_ee_context_entry_st, rcv_db_record_index);
  389. INS_FLD(qkey, tmp, arbelprm_queue_pair_ee_context_entry_st, q_key);
  390. tmp =
  391. (__u8 *) (&prm->ctx) +
  392. MT_BYTE_OFFSET(arbelprm_queue_pair_ee_context_entry_st,
  393. primary_address_path);
  394. INS_FLD(dev_ib_data.port, tmp, arbelprm_address_path_st, port_number);
  395. }
  396. static void prep_init2rtr_qpee_buf(void *buf)
  397. {
  398. struct qp_ee_state_tarnisition_st *prm;
  399. prm = (struct qp_ee_state_tarnisition_st *)buf;
  400. memset(prm, 0, sizeof *prm);
  401. INS_FLD(MTU_2048, &prm->ctx, arbelprm_queue_pair_ee_context_entry_st,
  402. mtu);
  403. INS_FLD(11, &prm->ctx, arbelprm_queue_pair_ee_context_entry_st,
  404. msg_max);
  405. }
  406. static void init_av_array(void)
  407. {
  408. }
  409. /*
  410. * my_log2()
  411. */
  412. static int my_log2(unsigned long arg)
  413. {
  414. int i;
  415. __u32 tmp;
  416. if (arg == 0) {
  417. return INT_MIN; /* log2(0) = -infinity */
  418. }
  419. tmp = 1;
  420. i = 0;
  421. while (tmp < arg) {
  422. tmp = tmp << 1;
  423. ++i;
  424. }
  425. return i;
  426. }
  427. /*
  428. * get_req_icm_pages
  429. */
  430. static unsigned long get_req_icm_pages(unsigned long log2_reserved,
  431. unsigned long app_rsrc,
  432. unsigned long entry_size,
  433. unsigned long *log2_entries_p)
  434. {
  435. unsigned long size;
  436. unsigned long log2_entries;
  437. log2_entries = my_log2((1 << log2_reserved) + app_rsrc);
  438. *log2_entries_p = log2_entries;
  439. size = (1 << log2_entries) * entry_size;
  440. return (size + 4095) >> 12;
  441. }
  442. static void init_uar_context(void *uar_context_va)
  443. {
  444. void *ptr;
  445. /* clear all uar context */
  446. memset(uar_context_va, 0, 4096);
  447. ptr = uar_context_va + MADS_RCV_CQ_ARM_DB_IDX * 8;
  448. INS_FLD_TO_BE(UAR_RES_CQ_ARM, ptr, arbelprm_cq_arm_db_record_st, res);
  449. INS_FLD_TO_BE(dev_ib_data.mads_qp.rcv_cq.cqn, ptr,
  450. arbelprm_cq_arm_db_record_st, cq_number);
  451. ptr = uar_context_va + MADS_SND_CQ_ARM_DB_IDX * 8;
  452. INS_FLD_TO_BE(UAR_RES_CQ_ARM, ptr, arbelprm_cq_arm_db_record_st, res);
  453. INS_FLD_TO_BE(dev_ib_data.mads_qp.snd_cq.cqn, ptr,
  454. arbelprm_cq_arm_db_record_st, cq_number);
  455. ptr = uar_context_va + IPOIB_RCV_CQ_ARM_DB_IDX * 8;
  456. INS_FLD_TO_BE(UAR_RES_CQ_ARM, ptr, arbelprm_cq_arm_db_record_st, res);
  457. INS_FLD_TO_BE(dev_ib_data.ipoib_qp.rcv_cq.cqn, ptr,
  458. arbelprm_cq_arm_db_record_st, cq_number);
  459. ptr = uar_context_va + IPOIB_SND_CQ_ARM_DB_IDX * 8;
  460. INS_FLD_TO_BE(UAR_RES_CQ_ARM, ptr, arbelprm_cq_arm_db_record_st, res);
  461. INS_FLD_TO_BE(dev_ib_data.ipoib_qp.snd_cq.cqn, ptr,
  462. arbelprm_cq_arm_db_record_st, cq_number);
  463. ptr = uar_context_va + MADS_SND_QP_DB_IDX * 8;
  464. INS_FLD_TO_BE(UAR_RES_SQ_DBELL, ptr, arbelprm_qp_db_record_st, res);
  465. INS_FLD_TO_BE(dev_ib_data.mads_qp.qpn, ptr, arbelprm_qp_db_record_st,
  466. qp_number);
  467. ptr = uar_context_va + IPOIB_SND_QP_DB_IDX * 8;
  468. INS_FLD_TO_BE(UAR_RES_SQ_DBELL, ptr, arbelprm_qp_db_record_st, res);
  469. INS_FLD_TO_BE(dev_ib_data.ipoib_qp.qpn, ptr, arbelprm_qp_db_record_st,
  470. qp_number);
  471. ptr = uar_context_va + GROUP_SEP_IDX * 8;
  472. INS_FLD_TO_BE(UAR_RES_GROUP_SEP, ptr, arbelprm_cq_arm_db_record_st,
  473. res);
  474. ptr = uar_context_va + MADS_RCV_QP_DB_IDX * 8;
  475. INS_FLD_TO_BE(UAR_RES_RQ_DBELL, ptr, arbelprm_qp_db_record_st, res);
  476. INS_FLD_TO_BE(dev_ib_data.mads_qp.qpn, ptr, arbelprm_qp_db_record_st,
  477. qp_number);
  478. ptr = uar_context_va + IPOIB_RCV_QP_DB_IDX * 8;
  479. INS_FLD_TO_BE(UAR_RES_RQ_DBELL, ptr, arbelprm_qp_db_record_st, res);
  480. INS_FLD_TO_BE(dev_ib_data.ipoib_qp.qpn, ptr, arbelprm_qp_db_record_st,
  481. qp_number);
  482. ptr = uar_context_va + MADS_RCV_CQ_CI_DB_IDX * 8;
  483. INS_FLD_TO_BE(UAR_RES_CQ_SET_CI, ptr, arbelprm_cq_ci_db_record_st, res);
  484. INS_FLD_TO_BE(dev_ib_data.mads_qp.rcv_cq.cqn, ptr,
  485. arbelprm_cq_ci_db_record_st, cq_number);
  486. ptr = uar_context_va + MADS_SND_CQ_CI_DB_IDX * 8;
  487. INS_FLD_TO_BE(UAR_RES_CQ_SET_CI, ptr, arbelprm_cq_ci_db_record_st, res);
  488. INS_FLD_TO_BE(dev_ib_data.mads_qp.snd_cq.cqn, ptr,
  489. arbelprm_cq_ci_db_record_st, cq_number);
  490. ptr = uar_context_va + IPOIB_RCV_CQ_CI_DB_IDX * 8;
  491. INS_FLD_TO_BE(UAR_RES_CQ_SET_CI, ptr, arbelprm_cq_ci_db_record_st, res);
  492. INS_FLD_TO_BE(dev_ib_data.ipoib_qp.rcv_cq.cqn, ptr,
  493. arbelprm_cq_ci_db_record_st, cq_number);
  494. ptr = uar_context_va + IPOIB_SND_CQ_CI_DB_IDX * 8;
  495. INS_FLD_TO_BE(UAR_RES_CQ_SET_CI, ptr, arbelprm_cq_ci_db_record_st, res);
  496. INS_FLD_TO_BE(dev_ib_data.ipoib_qp.snd_cq.cqn, ptr,
  497. arbelprm_cq_ci_db_record_st, cq_number);
  498. }
  499. static int setup_hca(__u8 port, void **eq_p)
  500. {
  501. int ret;
  502. int rc;
  503. struct query_fw_st qfw;
  504. struct map_icm_st map_obj;
  505. struct dev_lim_st dev_lim;
  506. struct init_hca_st init_hca;
  507. __u8 log2_pages;
  508. unsigned long icm_start, icm_size, tmp;
  509. unsigned long log2_entries;
  510. __u32 aux_pages;
  511. __u32 mem_key, key, tmp_key;
  512. __u8 eqn;
  513. __u32 event_mask;
  514. struct eqe_t *eq_buf;
  515. void *inprm;
  516. unsigned long bus_addr;
  517. struct query_adapter_st qa;
  518. __u8 log_max_uars = 1;
  519. void *uar_context_va;
  520. __u32 uar_context_pa;
  521. tprintf("called");
  522. init_dev_data();
  523. inprm = get_inprm_buf();
  524. rc = reset_hca();
  525. if (rc) {
  526. eprintf("");
  527. return rc;
  528. } else {
  529. tprintf("reset_hca() success");
  530. }
  531. mdelay(1000); /* wait for 1 sec */
  532. rc = restore_config();
  533. if (rc) {
  534. eprintf("");
  535. return rc;
  536. } else {
  537. tprintf("restore_config() success");
  538. }
  539. dev_ib_data.pd = GLOBAL_PD;
  540. dev_ib_data.port = port;
  541. dev_ib_data.qkey = GLOBAL_QKEY;
  542. rc = cmd_query_fw(&qfw);
  543. if (rc) {
  544. eprintf("");
  545. return rc;
  546. }
  547. else {
  548. tprintf("cmd_query_fw() success");
  549. if (print_info) {
  550. printf("FW ver = %d.%d.%d\n",
  551. qfw.fw_rev_major,
  552. qfw.fw_rev_minor,
  553. qfw.fw_rev_subminor);
  554. }
  555. tprintf("fw_rev_major=%d", qfw.fw_rev_major);
  556. tprintf("fw_rev_minor=%d", qfw.fw_rev_minor);
  557. tprintf("fw_rev_subminor=%d", qfw.fw_rev_subminor);
  558. tprintf("error_buf_start_h=0x%x", qfw.error_buf_start_h);
  559. tprintf("error_buf_start_l=0x%x", qfw.error_buf_start_l);
  560. tprintf("error_buf_size=%d", qfw.error_buf_size);
  561. }
  562. bus_addr =
  563. ((unsigned long)((u64) qfw.error_buf_start_h << 32) | qfw.
  564. error_buf_start_l);
  565. dev_ib_data.error_buf_addr= ioremap(bus_addr,
  566. qfw.error_buf_size*4);
  567. dev_ib_data.error_buf_size= qfw.error_buf_size;
  568. if (!dev_ib_data.error_buf_addr) {
  569. eprintf("");
  570. return -1;
  571. }
  572. bus_addr =
  573. ((unsigned long)((u64) qfw.clear_int_addr.addr_h << 32) | qfw.
  574. clear_int_addr.addr_l);
  575. dev_ib_data.clr_int_addr = bus_to_virt(bus_addr);
  576. rc = cmd_enable_lam();
  577. if (rc == 0x22 /* LAM_NOT_PRE -- need to put a name here */ ) {
  578. // ??????
  579. } else if (rc == 0) {
  580. // ??????
  581. } else {
  582. eprintf("");
  583. return rc;
  584. }
  585. log2_pages = my_log2(qfw.fw_pages);
  586. memset(&map_obj, 0, sizeof map_obj);
  587. map_obj.num_vpm = 1;
  588. map_obj.vpm_arr[0].log2_size = log2_pages;
  589. map_obj.vpm_arr[0].pa_l = phys_mem.base + phys_mem.offset;
  590. rc = cmd_map_fa(&map_obj);
  591. if (rc) {
  592. eprintf("");
  593. return rc;
  594. }
  595. phys_mem.offset += 1 << (log2_pages + 12);
  596. rc = cmd_run_fw();
  597. if (rc) {
  598. ret = -1;
  599. eprintf("");
  600. goto undo_map_fa;
  601. }
  602. rc = cmd_mod_stat_cfg();
  603. if (rc) {
  604. ret = -1;
  605. eprintf("");
  606. goto undo_map_fa;
  607. }
  608. rc = cmd_query_dev_lim(&dev_lim);
  609. if (rc) {
  610. ret = -1;
  611. eprintf("");
  612. goto undo_map_fa;
  613. }
  614. dev_ib_data.uar_idx = dev_lim.num_rsvd_uars;
  615. tprintf("max_icm_size_h=0x%lx", dev_lim.max_icm_size_h);
  616. tprintf("max_icm_size_l=0x%lx", dev_lim.max_icm_size_l);
  617. memset(&init_hca, 0, sizeof init_hca);
  618. icm_start = 0;
  619. icm_size = 0;
  620. icm_start += ((dev_lim.num_rsvd_uars + 1) << 12);
  621. icm_size += ((dev_lim.num_rsvd_uars + 1) << 12);
  622. tmp = get_req_icm_pages(dev_lim.log2_rsvd_qps,
  623. MAX_APP_QPS,
  624. dev_lim.qpc_entry_sz, &log2_entries);
  625. init_hca.qpc_base_addr_l = icm_start;
  626. init_hca.log_num_of_qp = log2_entries;
  627. icm_start += (tmp << 12);
  628. icm_size += (tmp << 12);
  629. init_hca.eqpc_base_addr_l = icm_start;
  630. icm_start += (tmp << 12);
  631. icm_size += (tmp << 12);
  632. tmp = get_req_icm_pages(dev_lim.log2_rsvd_srqs,
  633. 0, dev_lim.srq_entry_sz, &log2_entries);
  634. init_hca.srqc_base_addr_l = icm_start;
  635. init_hca.log_num_of_srq = log2_entries;
  636. icm_start += (tmp << 12);
  637. icm_size += (tmp << 12);
  638. tmp = get_req_icm_pages(dev_lim.log2_rsvd_ees,
  639. 0, dev_lim.eec_entry_sz, &log2_entries);
  640. init_hca.eec_base_addr_l = icm_start;
  641. init_hca.log_num_of_ee = log2_entries;
  642. icm_start += (tmp << 12);
  643. icm_size += (tmp << 12);
  644. init_hca.eeec_base_addr_l = icm_start;
  645. icm_start += (tmp << 12);
  646. icm_size += (tmp << 12);
  647. tmp = get_req_icm_pages(dev_lim.log2_rsvd_cqs,
  648. MAX_APP_CQS,
  649. dev_lim.cqc_entry_sz, &log2_entries);
  650. init_hca.cqc_base_addr_l = icm_start;
  651. init_hca.log_num_of_cq = log2_entries;
  652. icm_start += (tmp << 12);
  653. icm_size += (tmp << 12);
  654. tmp = get_req_icm_pages(dev_lim.log2_rsvd_mtts,
  655. 0, dev_lim.mtt_entry_sz, &log2_entries);
  656. init_hca.mtt_base_addr_l = icm_start;
  657. icm_start += (tmp << 12);
  658. icm_size += (tmp << 12);
  659. tmp = get_req_icm_pages(dev_lim.log2_rsvd_mrws,
  660. 1, dev_lim.mpt_entry_sz, &log2_entries);
  661. init_hca.mpt_base_addr_l = icm_start;
  662. init_hca.log_mpt_sz = log2_entries;
  663. icm_start += (tmp << 12);
  664. icm_size += (tmp << 12);
  665. tmp = get_req_icm_pages(dev_lim.log2_rsvd_rdbs, 1, 32, /* size of rdb entry */
  666. &log2_entries);
  667. init_hca.rdb_base_addr_l = icm_start;
  668. icm_start += (tmp << 12);
  669. icm_size += (tmp << 12);
  670. init_hca.eqc_base_addr_l = icm_start;
  671. init_hca.log_num_of_eq = LOG2_EQS;
  672. tmp = dev_lim.eqc_entry_sz * (1 << LOG2_EQS);
  673. icm_start += tmp;
  674. icm_size += tmp;
  675. init_hca.mc_base_addr_l = icm_start;
  676. init_hca.log_mc_table_entry_sz =
  677. my_log2(MT_STRUCT_SIZE(arbelprm_mgm_entry_st));
  678. init_hca.mc_table_hash_sz = 8;
  679. init_hca.log_mc_table_sz = 3;
  680. icm_size +=
  681. (MT_STRUCT_SIZE(arbelprm_mgm_entry_st) * init_hca.mc_table_hash_sz);
  682. icm_start +=
  683. (MT_STRUCT_SIZE(arbelprm_mgm_entry_st) * init_hca.mc_table_hash_sz);
  684. rc = cmd_set_icm_size(icm_size, &aux_pages);
  685. if (rc) {
  686. ret = -1;
  687. eprintf("");
  688. goto undo_map_fa;
  689. }
  690. memset(&map_obj, 0, sizeof map_obj);
  691. map_obj.num_vpm = 1;
  692. map_obj.vpm_arr[0].pa_l = phys_mem.base + phys_mem.offset;
  693. map_obj.vpm_arr[0].log2_size = my_log2(aux_pages);
  694. rc = cmd_map_icm_aux(&map_obj);
  695. if (rc) {
  696. ret = -1;
  697. eprintf("");
  698. goto undo_map_fa;
  699. }
  700. phys_mem.offset += (1 << (map_obj.vpm_arr[0].log2_size + 12));
  701. uar_context_pa = phys_mem.base + phys_mem.offset +
  702. dev_ib_data.uar_idx * 4096;
  703. uar_context_va = phys_to_virt(uar_context_pa);
  704. tprintf("uar_context: va=0x%lx, pa=0x%lx", uar_context_va,
  705. uar_context_pa);
  706. dev_ib_data.uar_context_base = uar_context_va;
  707. memset(&map_obj, 0, sizeof map_obj);
  708. map_obj.num_vpm = 1;
  709. map_obj.vpm_arr[0].pa_l = phys_mem.base + phys_mem.offset;
  710. map_obj.vpm_arr[0].log2_size = my_log2((icm_size + 4095) >> 12);
  711. rc = cmd_map_icm(&map_obj);
  712. if (rc) {
  713. ret = -1;
  714. eprintf("");
  715. goto undo_map_fa;
  716. }
  717. phys_mem.offset += (1 << (map_obj.vpm_arr[0].log2_size + 12));
  718. init_hca.log_max_uars = log_max_uars;
  719. tprintf("inprm: va=0x%lx, pa=0x%lx", inprm, virt_to_bus(inprm));
  720. prep_init_hca_buf(&init_hca, inprm);
  721. rc = cmd_init_hca(inprm, MT_STRUCT_SIZE(arbelprm_init_hca_st));
  722. if (rc) {
  723. ret = -1;
  724. eprintf("");
  725. goto undo_map_fa;
  726. }
  727. rc = cmd_query_adapter(&qa);
  728. if (rc) {
  729. eprintf("");
  730. return rc;
  731. }
  732. dev_ib_data.clr_int_data = 1 << qa.intapin;
  733. tmp_key = 1 << dev_lim.log2_rsvd_mrws | MKEY_PREFIX;
  734. mem_key = 1 << (dev_lim.log2_rsvd_mrws + 8) | (MKEY_PREFIX >> 24);
  735. prep_sw2hw_mpt_buf(inprm, tmp_key);
  736. rc = cmd_sw2hw_mpt(&key, 1 << dev_lim.log2_rsvd_mrws, inprm,
  737. SW2HW_MPT_IBUF_SZ);
  738. if (rc) {
  739. ret = -1;
  740. eprintf("");
  741. goto undo_map_fa;
  742. } else {
  743. tprintf("cmd_sw2hw_mpt() success, key=0x%lx", mem_key);
  744. }
  745. dev_ib_data.mkey = mem_key;
  746. eqn = EQN;
  747. /* allocate a single EQ which will receive
  748. all the events */
  749. eq_buf = dev_buffers_p->eq_buf;
  750. init_eq_buf(eq_buf); /* put in HW ownership */
  751. prep_sw2hw_eq_buf(inprm, eq_buf);
  752. rc = cmd_sw2hw_eq(SW2HW_EQ_IBUF_SZ);
  753. if (rc) {
  754. ret = -1;
  755. eprintf("");
  756. goto undo_sw2hw_mpt;
  757. } else
  758. tprintf("cmd_sw2hw_eq() success");
  759. event_mask = (1 << XDEV_EV_TYPE_CQ_COMP) |
  760. (1 << XDEV_EV_TYPE_CQ_ERR) |
  761. (1 << XDEV_EV_TYPE_LOCAL_WQ_CATAS_ERR) |
  762. (1 << XDEV_EV_TYPE_PORT_ERR) |
  763. (1 << XDEV_EV_TYPE_LOCAL_WQ_INVALID_REQ_ERR) |
  764. (1 << XDEV_EV_TYPE_LOCAL_WQ_ACCESS_VIOL_ERR) |
  765. (1 << TAVOR_IF_EV_TYPE_OVERRUN);
  766. rc = cmd_map_eq(eqn, event_mask, 1);
  767. if (rc) {
  768. ret = -1;
  769. eprintf("");
  770. goto undo_sw2hw_eq;
  771. } else
  772. tprintf("cmd_map_eq() success");
  773. dev_ib_data.eq.eqn = eqn;
  774. dev_ib_data.eq.eq_buf = eq_buf;
  775. dev_ib_data.eq.cons_counter = 0;
  776. dev_ib_data.eq.eq_size = 1 << LOG2_EQ_SZ;
  777. bus_addr =
  778. ((unsigned long)((u64) qfw.eq_ci_table.addr_h << 32) | qfw.
  779. eq_ci_table.addr_l)
  780. + eqn * 8;
  781. dev_ib_data.eq.ci_base_base_addr = bus_to_virt(bus_addr);
  782. *eq_p = &dev_ib_data.eq;
  783. prep_init_ib_buf(inprm);
  784. rc = cmd_init_ib(port, inprm, INIT_IB_IBUF_SZ);
  785. if (rc) {
  786. ret = -1;
  787. eprintf("");
  788. goto undo_sw2hw_eq;
  789. } else
  790. tprintf("cmd_init_ib() success");
  791. init_av_array();
  792. tprintf("init_av_array() done");
  793. /* set the qp and cq numbers according
  794. to the results of query_dev_lim */
  795. dev_ib_data.mads_qp.qpn = (1 << dev_lim.log2_rsvd_qps) +
  796. +QPN_BASE + MADS_QPN_SN;
  797. dev_ib_data.ipoib_qp.qpn = (1 << dev_lim.log2_rsvd_qps) +
  798. +QPN_BASE + IPOIB_QPN_SN;
  799. dev_ib_data.mads_qp.snd_cq.cqn = (1 << dev_lim.log2_rsvd_cqs) +
  800. MADS_SND_CQN_SN;
  801. dev_ib_data.mads_qp.rcv_cq.cqn = (1 << dev_lim.log2_rsvd_cqs) +
  802. MADS_RCV_CQN_SN;
  803. dev_ib_data.ipoib_qp.snd_cq.cqn = (1 << dev_lim.log2_rsvd_cqs) +
  804. IPOIB_SND_CQN_SN;
  805. dev_ib_data.ipoib_qp.rcv_cq.cqn = (1 << dev_lim.log2_rsvd_cqs) +
  806. IPOIB_RCV_CQN_SN;
  807. init_uar_context(uar_context_va);
  808. ret = 0;
  809. goto exit;
  810. undo_sw2hw_eq:
  811. rc = cmd_hw2sw_eq(eqn);
  812. if (rc)
  813. eprintf("");
  814. else
  815. tprintf("cmd_hw2sw_eq() success");
  816. undo_sw2hw_mpt:
  817. rc = cmd_hw2sw_mpt(tmp_key);
  818. if (rc)
  819. eprintf("");
  820. undo_map_fa:
  821. rc = cmd_unmap_fa();
  822. if (rc)
  823. eprintf("");
  824. exit:
  825. return ret;
  826. }
  827. static void *get_inprm_buf(void)
  828. {
  829. return dev_buffers_p->inprm_buf;
  830. }
  831. static void *get_outprm_buf(void)
  832. {
  833. return dev_buffers_p->outprm_buf;
  834. }
  835. static void *get_send_wqe_buf(void *wqe, __u8 index)
  836. {
  837. struct ud_send_wqe_st *snd_wqe = wqe;
  838. return bus_to_virt(be32_to_cpu(snd_wqe->mpointer[index].local_addr_l));
  839. }
  840. static void *get_rcv_wqe_buf(void *wqe, __u8 index)
  841. {
  842. struct recv_wqe_st *rcv_wqe = wqe;
  843. return bus_to_virt(be32_to_cpu(rcv_wqe->mpointer[index].local_addr_l));
  844. }
  845. static void modify_av_params(struct ud_av_st *av,
  846. __u16 dlid,
  847. __u8 g,
  848. __u8 sl, __u8 rate, union ib_gid_u *gid, __u32 qpn)
  849. {
  850. memset(&av->av, 0, sizeof av->av);
  851. INS_FLD_TO_BE(dev_ib_data.port, &av->av, arbelprm_ud_address_vector_st,
  852. port_number);
  853. INS_FLD_TO_BE(dev_ib_data.pd, &av->av, arbelprm_ud_address_vector_st,
  854. pd);
  855. INS_FLD_TO_BE(dlid, &av->av, arbelprm_ud_address_vector_st, rlid);
  856. INS_FLD_TO_BE(g, &av->av, arbelprm_ud_address_vector_st, g);
  857. INS_FLD_TO_BE(sl, &av->av, arbelprm_ud_address_vector_st, sl);
  858. INS_FLD_TO_BE(3, &av->av, arbelprm_ud_address_vector_st, msg);
  859. if (rate >= 3)
  860. INS_FLD_TO_BE(0, &av->av, arbelprm_ud_address_vector_st, max_stat_rate); /* 4x */
  861. else
  862. INS_FLD_TO_BE(1, &av->av, arbelprm_ud_address_vector_st, max_stat_rate); /* 1x */
  863. if (g) {
  864. if (gid) {
  865. INS_FLD(*((__u32 *) (&gid->raw[0])), &av->av,
  866. arbelprm_ud_address_vector_st, rgid_127_96);
  867. INS_FLD(*((__u32 *) (&gid->raw[4])), &av->av,
  868. arbelprm_ud_address_vector_st, rgid_95_64);
  869. INS_FLD(*((__u32 *) (&gid->raw[8])), &av->av,
  870. arbelprm_ud_address_vector_st, rgid_63_32);
  871. INS_FLD(*((__u32 *) (&gid->raw[12])), &av->av,
  872. arbelprm_ud_address_vector_st, rgid_31_0);
  873. } else {
  874. INS_FLD(0, &av->av, arbelprm_ud_address_vector_st,
  875. rgid_127_96);
  876. INS_FLD(0, &av->av, arbelprm_ud_address_vector_st,
  877. rgid_95_64);
  878. INS_FLD(0, &av->av, arbelprm_ud_address_vector_st,
  879. rgid_63_32);
  880. INS_FLD(0, &av->av, arbelprm_ud_address_vector_st,
  881. rgid_31_0);
  882. }
  883. } else {
  884. INS_FLD(0, &av->av, arbelprm_ud_address_vector_st, rgid_127_96);
  885. INS_FLD(0, &av->av, arbelprm_ud_address_vector_st, rgid_95_64);
  886. INS_FLD(0, &av->av, arbelprm_ud_address_vector_st, rgid_63_32);
  887. INS_FLD(2, &av->av, arbelprm_ud_address_vector_st, rgid_31_0);
  888. }
  889. av->dest_qp = qpn;
  890. av->qkey = dev_ib_data.qkey;
  891. }
  892. static void init_cq_buf(union cqe_st *cq_buf, __u8 num_cqes)
  893. {
  894. int i;
  895. memset(cq_buf, 0, sizeof(union cqe_st) * num_cqes);
  896. for (i = 0; i < num_cqes; ++i) {
  897. WRITE_BYTE_VOL(&cq_buf[i], CQE_OWNER_OFFSET, CQE_OWNER_VAL_HW);
  898. }
  899. }
  900. static int post_rcv_buf(struct udqp_st *qp, struct recv_wqe_st *rcv_wqe)
  901. {
  902. int i;
  903. /* put a valid lkey */
  904. for (i = 0; i < MAX_SCATTER; ++i) {
  905. rcv_wqe->mpointer[i].lkey = cpu_to_be32(dev_ib_data.mkey);
  906. }
  907. qp->post_rcv_counter++;
  908. WRITE_WORD_VOL(qp->rcv_uar_context, 2, htons(qp->post_rcv_counter));
  909. return 0;
  910. }
  911. static int post_send_req(void *qph, void *wqeh, __u8 num_gather)
  912. {
  913. int rc;
  914. struct udqp_st *qp = qph;
  915. struct ud_send_wqe_st *snd_wqe = wqeh;
  916. struct send_doorbell_st dbell;
  917. __u32 nds;
  918. qp->post_send_counter++;
  919. WRITE_WORD_VOL(qp->send_uar_context, 2, htons(qp->post_send_counter));
  920. memset(&dbell, 0, sizeof dbell);
  921. INS_FLD(XDEV_NOPCODE_SEND, &dbell, arbelprm_send_doorbell_st, nopcode);
  922. INS_FLD(1, &dbell, arbelprm_send_doorbell_st, f);
  923. INS_FLD(qp->post_send_counter - 1, &dbell, arbelprm_send_doorbell_st,
  924. wqe_counter);
  925. INS_FLD(1, &dbell, arbelprm_send_doorbell_st, wqe_cnt);
  926. nds = (sizeof(snd_wqe->next) +
  927. sizeof(snd_wqe->udseg) +
  928. sizeof(snd_wqe->mpointer[0]) * num_gather) >> 4;
  929. INS_FLD(nds, &dbell, arbelprm_send_doorbell_st, nds);
  930. INS_FLD(qp->qpn, &dbell, arbelprm_send_doorbell_st, qpn);
  931. if (qp->last_posted_snd_wqe) {
  932. INS_FLD_TO_BE(nds,
  933. &qp->last_posted_snd_wqe->next.next,
  934. arbelprm_wqe_segment_next_st, nds);
  935. INS_FLD_TO_BE(1,
  936. &qp->last_posted_snd_wqe->next.next,
  937. arbelprm_wqe_segment_next_st, f);
  938. INS_FLD_TO_BE(XDEV_NOPCODE_SEND,
  939. &qp->last_posted_snd_wqe->next.next,
  940. arbelprm_wqe_segment_next_st, nopcode);
  941. }
  942. rc = cmd_post_doorbell(&dbell, POST_SND_OFFSET);
  943. if (!rc) {
  944. qp->last_posted_snd_wqe = snd_wqe;
  945. }
  946. return rc;
  947. }
  948. static int create_mads_qp(void **qp_pp, void **snd_cq_pp, void **rcv_cq_pp)
  949. {
  950. __u8 i, next_i, j, k;
  951. int rc;
  952. struct udqp_st *qp;
  953. __u32 bus_addr;
  954. __u8 nds;
  955. void *ptr;
  956. qp = &dev_ib_data.mads_qp;
  957. /* set the pointer to the receive WQEs buffer */
  958. qp->rcv_wq = dev_buffers_p->mads_qp_rcv_queue;
  959. qp->send_buf_sz = MAD_BUF_SZ;
  960. qp->rcv_buf_sz = MAD_BUF_SZ;
  961. qp->max_recv_wqes = NUM_MADS_RCV_WQES; /* max wqes in this work queue */
  962. qp->recv_wqe_cur_free = NUM_MADS_RCV_WQES; /* current free wqes */
  963. qp->recv_wqe_alloc_idx = 0; /* index from wqes can be allocated if there are free wqes */
  964. qp->rcv_uar_context =
  965. dev_ib_data.uar_context_base + 8 * MADS_RCV_QP_DB_IDX;
  966. qp->send_uar_context =
  967. dev_ib_data.uar_context_base + 8 * MADS_SND_QP_DB_IDX;
  968. memset(&qp->rcv_wq[0], 0, NUM_MADS_RCV_WQES * sizeof(qp->rcv_wq[0]));
  969. nds = sizeof(qp->rcv_wq[0].wqe) >> 4;
  970. /* iterrate through the list */
  971. for (j = 0, i = 0, next_i = 1;
  972. j < NUM_MADS_RCV_WQES;
  973. MOD_INC(i, NUM_MADS_RCV_WQES), MOD_INC(next_i, NUM_MADS_RCV_WQES),
  974. ++j) {
  975. qp->rcv_bufs[i] = ib_buffers.rcv_mad_buf[i];
  976. /* link the WQE to the next one */
  977. bus_addr = virt_to_bus(&qp->rcv_wq[next_i].wqe);
  978. ptr = qp->rcv_wq[i].wqe.control +
  979. MT_BYTE_OFFSET(arbelprm_wqe_segment_ctrl_recv_st,
  980. wqe_segment_next);
  981. INS_FLD(bus_addr >> 6, ptr, arbelprm_recv_wqe_segment_next_st,
  982. nda_31_6);
  983. INS_FLD(nds, ptr, arbelprm_recv_wqe_segment_next_st, nds);
  984. /* set the allocated buffers */
  985. qp->rcv_bufs[i] = ib_buffers.rcv_mad_buf[i];
  986. bus_addr = virt_to_bus(qp->rcv_bufs[i]);
  987. qp->rcv_wq[i].wqe.mpointer[0].local_addr_l = bus_addr;
  988. qp->rcv_wq[i].wqe.mpointer[0].byte_count = GRH_SIZE;
  989. bus_addr = virt_to_bus(qp->rcv_bufs[i] + GRH_SIZE);
  990. qp->rcv_wq[i].wqe.mpointer[1].local_addr_l = bus_addr;
  991. qp->rcv_wq[i].wqe.mpointer[1].byte_count = MAD_BUF_SZ;
  992. for (k = 0; k < (((sizeof(qp->rcv_wq[i])) >> 4) - 1); ++k) {
  993. qp->rcv_wq[i].wqe.mpointer[k].lkey = INVALID_WQE_LKEY;
  994. }
  995. }
  996. cpu_to_be_buf(&qp->rcv_wq[0],
  997. NUM_MADS_RCV_WQES * sizeof(qp->rcv_wq[0]));
  998. for (i = 0; i < qp->max_recv_wqes; ++i) {
  999. qp->rcv_wq[i].wqe_cont.qp = qp;
  1000. }
  1001. /* set the pointer to the send WQEs buffer */
  1002. qp->snd_wq = dev_buffers_p->mads_qp_snd_queue;
  1003. qp->snd_wqe_alloc_idx = 0;
  1004. qp->max_snd_wqes = NUM_MADS_SND_WQES;
  1005. qp->snd_wqe_cur_free = NUM_MADS_SND_WQES;
  1006. memset(&qp->snd_wq[0], 0, NUM_MADS_SND_WQES * sizeof(qp->snd_wq[i]));
  1007. /* iterrate through the list */
  1008. for (j = 0, i = 0, next_i = 1;
  1009. j < NUM_MADS_RCV_WQES;
  1010. MOD_INC(i, NUM_MADS_SND_WQES), MOD_INC(next_i, NUM_MADS_SND_WQES),
  1011. ++j) {
  1012. /* link the WQE to the next one */
  1013. bus_addr = virt_to_bus(&qp->snd_wq[next_i].wqe_cont.wqe);
  1014. INS_FLD(bus_addr >> 6, &qp->snd_wq[i].wqe_cont.wqe.next.next,
  1015. arbelprm_wqe_segment_next_st, nda_31_6);
  1016. /* set the allocated buffers */
  1017. qp->snd_bufs[i] = ib_buffers.send_mad_buf[i];
  1018. bus_addr = virt_to_bus(qp->snd_bufs[i]);
  1019. qp->snd_wq[i].wqe_cont.wqe.mpointer[0].local_addr_l = bus_addr;
  1020. qp->snd_wq[i].wqe_cont.wqe.mpointer[0].lkey = dev_ib_data.mkey;
  1021. qp->snd_wq[i].wqe_cont.wqe.mpointer[0].byte_count =
  1022. qp->send_buf_sz;
  1023. }
  1024. cpu_to_be_buf(&qp->snd_wq[0],
  1025. NUM_MADS_SND_WQES * sizeof(qp->snd_wq[i]));
  1026. for (i = 0; i < qp->max_snd_wqes; ++i) {
  1027. qp->snd_wq[i].wqe_cont.qp = qp;
  1028. }
  1029. /* qp number and cq numbers are already set up */
  1030. qp->snd_cq.cq_buf = dev_buffers_p->mads_snd_cq_buf;
  1031. qp->rcv_cq.cq_buf = dev_buffers_p->mads_rcv_cq_buf;
  1032. qp->snd_cq.num_cqes = NUM_MADS_SND_CQES;
  1033. qp->rcv_cq.num_cqes = NUM_MADS_RCV_CQES;
  1034. qp->snd_cq.arm_db_ctx_idx = MADS_SND_CQ_ARM_DB_IDX;
  1035. qp->snd_cq.ci_db_ctx_idx = MADS_SND_CQ_CI_DB_IDX;
  1036. qp->rcv_cq.arm_db_ctx_idx = MADS_RCV_CQ_ARM_DB_IDX;
  1037. qp->rcv_cq.ci_db_ctx_idx = MADS_RCV_CQ_CI_DB_IDX;
  1038. qp->rcv_db_record_index = MADS_RCV_QP_DB_IDX;
  1039. qp->snd_db_record_index = MADS_SND_QP_DB_IDX;
  1040. qp->qkey = GLOBAL_QKEY;
  1041. rc = create_udqp(qp);
  1042. if (!rc) {
  1043. *qp_pp = qp;
  1044. *snd_cq_pp = &qp->snd_cq;
  1045. *rcv_cq_pp = &qp->rcv_cq;
  1046. }
  1047. return rc;
  1048. }
  1049. static int create_ipoib_qp(void **qp_pp,
  1050. void **snd_cq_pp, void **rcv_cq_pp, __u32 qkey)
  1051. {
  1052. __u8 i, next_i, j, k;
  1053. int rc;
  1054. struct udqp_st *qp;
  1055. __u32 bus_addr;
  1056. __u8 nds;
  1057. void *ptr;
  1058. qp = &dev_ib_data.ipoib_qp;
  1059. /* set the pointer to the receive WQEs buffer */
  1060. qp->rcv_wq = dev_buffers_p->ipoib_qp_rcv_queue;
  1061. qp->send_buf_sz = IPOIB_SND_BUF_SZ;
  1062. qp->rcv_buf_sz = IPOIB_RCV_BUF_SZ;
  1063. qp->max_recv_wqes = NUM_IPOIB_RCV_WQES;
  1064. qp->recv_wqe_cur_free = NUM_IPOIB_RCV_WQES;
  1065. qp->rcv_uar_context =
  1066. dev_ib_data.uar_context_base + 8 * IPOIB_RCV_QP_DB_IDX;
  1067. qp->send_uar_context =
  1068. dev_ib_data.uar_context_base + 8 * IPOIB_SND_QP_DB_IDX;
  1069. memset(&qp->rcv_wq[0], 0, NUM_IPOIB_RCV_WQES * sizeof(qp->rcv_wq[0]));
  1070. nds = sizeof(qp->rcv_wq[0].wqe) >> 4;
  1071. /* iterrate through the list */
  1072. for (j = 0, i = 0, next_i = 1;
  1073. j < NUM_IPOIB_RCV_WQES;
  1074. MOD_INC(i, NUM_IPOIB_RCV_WQES), MOD_INC(next_i,
  1075. NUM_IPOIB_RCV_WQES), ++j) {
  1076. /* link the WQE to the next one */
  1077. bus_addr = virt_to_bus(&qp->rcv_wq[next_i].wqe);
  1078. ptr = qp->rcv_wq[i].wqe.control +
  1079. MT_BYTE_OFFSET(arbelprm_wqe_segment_ctrl_recv_st,
  1080. wqe_segment_next);
  1081. INS_FLD(bus_addr >> 6, ptr, arbelprm_recv_wqe_segment_next_st,
  1082. nda_31_6);
  1083. INS_FLD(nds, ptr, arbelprm_recv_wqe_segment_next_st, nds);
  1084. /* set the allocated buffers */
  1085. qp->rcv_bufs[i] = ib_buffers.ipoib_rcv_buf[i];
  1086. bus_addr = virt_to_bus(qp->rcv_bufs[i]);
  1087. qp->rcv_wq[i].wqe.mpointer[0].local_addr_l = bus_addr;
  1088. qp->rcv_wq[i].wqe.mpointer[0].byte_count = GRH_SIZE;
  1089. bus_addr = virt_to_bus(qp->rcv_bufs[i] + GRH_SIZE);
  1090. qp->rcv_wq[i].wqe.mpointer[1].local_addr_l = bus_addr;
  1091. qp->rcv_wq[i].wqe.mpointer[1].byte_count = IPOIB_RCV_BUF_SZ;
  1092. for (k = 0; k < (((sizeof(qp->rcv_wq[i].wqe)) >> 4) - 1); ++k) {
  1093. qp->rcv_wq[i].wqe.mpointer[k].lkey = INVALID_WQE_LKEY;
  1094. }
  1095. }
  1096. cpu_to_be_buf(&qp->rcv_wq[0],
  1097. NUM_IPOIB_RCV_WQES * sizeof(qp->rcv_wq[0]));
  1098. for (i = 0; i < qp->max_recv_wqes; ++i) {
  1099. qp->rcv_wq[i].wqe_cont.qp = qp;
  1100. }
  1101. /* set the pointer to the send WQEs buffer */
  1102. qp->snd_wq = dev_buffers_p->ipoib_qp_snd_queue;
  1103. qp->snd_wqe_alloc_idx = 0;
  1104. qp->max_snd_wqes = NUM_IPOIB_SND_WQES;
  1105. qp->snd_wqe_cur_free = NUM_IPOIB_SND_WQES;
  1106. memset(&qp->snd_wq[0], 0, NUM_IPOIB_SND_WQES * sizeof(qp->snd_wq[i]));
  1107. /* iterrate through the list */
  1108. for (j = 0, i = 0, next_i = 1;
  1109. j < NUM_IPOIB_RCV_WQES;
  1110. MOD_INC(i, NUM_IPOIB_SND_WQES), MOD_INC(next_i,
  1111. NUM_IPOIB_SND_WQES), ++j) {
  1112. /* link the WQE to the next one */
  1113. bus_addr = virt_to_bus(&qp->snd_wq[next_i].wqe_cont.wqe);
  1114. INS_FLD(bus_addr >> 6, &qp->snd_wq[i].wqe_cont.wqe.next.next,
  1115. arbelprm_wqe_segment_next_st, nda_31_6);
  1116. /* set the allocated buffers */
  1117. qp->snd_bufs[i] = ib_buffers.send_ipoib_buf[i];
  1118. bus_addr = virt_to_bus(qp->snd_bufs[i]);
  1119. qp->snd_wq[i].wqe_cont.wqe.mpointer[0].local_addr_l = bus_addr;
  1120. qp->snd_wq[i].wqe_cont.wqe.mpointer[0].lkey = dev_ib_data.mkey;
  1121. }
  1122. cpu_to_be_buf(&qp->snd_wq[0],
  1123. NUM_IPOIB_SND_WQES * sizeof(qp->snd_wq[i]));
  1124. for (i = 0; i < qp->max_snd_wqes; ++i) {
  1125. qp->snd_wq[i].wqe_cont.qp = qp;
  1126. }
  1127. /* qp number and cq numbers are already set up */
  1128. qp->snd_cq.cq_buf = dev_buffers_p->ipoib_snd_cq_buf;
  1129. qp->rcv_cq.cq_buf = dev_buffers_p->ipoib_rcv_cq_buf;
  1130. qp->snd_cq.num_cqes = NUM_IPOIB_SND_CQES;
  1131. qp->rcv_cq.num_cqes = NUM_IPOIB_RCV_CQES;
  1132. qp->snd_cq.arm_db_ctx_idx = IPOIB_SND_CQ_ARM_DB_IDX;
  1133. qp->snd_cq.ci_db_ctx_idx = IPOIB_SND_CQ_CI_DB_IDX;
  1134. qp->rcv_cq.arm_db_ctx_idx = IPOIB_RCV_CQ_ARM_DB_IDX;
  1135. qp->rcv_cq.ci_db_ctx_idx = IPOIB_RCV_CQ_CI_DB_IDX;
  1136. qp->rcv_db_record_index = IPOIB_RCV_QP_DB_IDX;
  1137. qp->snd_db_record_index = IPOIB_SND_QP_DB_IDX;
  1138. qp->qkey = qkey;
  1139. rc = create_udqp(qp);
  1140. if (!rc) {
  1141. *qp_pp = qp;
  1142. *snd_cq_pp = &qp->snd_cq;
  1143. *rcv_cq_pp = &qp->rcv_cq;
  1144. }
  1145. return rc;
  1146. }
  1147. static int create_udqp(struct udqp_st *qp)
  1148. {
  1149. int rc, ret = 0;
  1150. void *inprm;
  1151. struct recv_wqe_st *rcv_wqe;
  1152. inprm = dev_buffers_p->inprm_buf;
  1153. qp->rcv_cq.arm_db_ctx_pointer =
  1154. dev_ib_data.uar_context_base + 8 * qp->rcv_cq.arm_db_ctx_idx;
  1155. qp->rcv_cq.ci_db_ctx_pointer =
  1156. dev_ib_data.uar_context_base + 8 * qp->rcv_cq.ci_db_ctx_idx;
  1157. qp->snd_cq.arm_db_ctx_pointer =
  1158. dev_ib_data.uar_context_base + 8 * qp->snd_cq.arm_db_ctx_idx;
  1159. qp->snd_cq.ci_db_ctx_pointer =
  1160. dev_ib_data.uar_context_base + 8 * qp->snd_cq.ci_db_ctx_idx;
  1161. /* create send CQ */
  1162. init_cq_buf(qp->snd_cq.cq_buf, qp->snd_cq.num_cqes);
  1163. qp->snd_cq.cons_counter = 0;
  1164. prep_sw2hw_cq_buf(inprm,
  1165. dev_ib_data.eq.eqn,
  1166. qp->snd_cq.cqn,
  1167. qp->snd_cq.cq_buf,
  1168. qp->snd_cq.ci_db_ctx_idx, qp->snd_cq.arm_db_ctx_idx);
  1169. rc = cmd_sw2hw_cq(qp->snd_cq.cqn, inprm, SW2HW_CQ_IBUF_SZ);
  1170. if (rc) {
  1171. ret = -1;
  1172. eprintf("");
  1173. goto exit;
  1174. }
  1175. /* create receive CQ */
  1176. init_cq_buf(qp->rcv_cq.cq_buf, qp->rcv_cq.num_cqes);
  1177. qp->rcv_cq.cons_counter = 0;
  1178. memset(inprm, 0, SW2HW_CQ_IBUF_SZ);
  1179. prep_sw2hw_cq_buf(inprm,
  1180. dev_ib_data.eq.eqn,
  1181. qp->rcv_cq.cqn,
  1182. qp->rcv_cq.cq_buf,
  1183. qp->rcv_cq.ci_db_ctx_idx, qp->rcv_cq.arm_db_ctx_idx);
  1184. rc = cmd_sw2hw_cq(qp->rcv_cq.cqn, inprm, SW2HW_CQ_IBUF_SZ);
  1185. if (rc) {
  1186. ret = -1;
  1187. eprintf("");
  1188. goto undo_snd_cq;
  1189. }
  1190. prep_rst2init_qpee_buf(inprm,
  1191. qp->snd_cq.cqn,
  1192. qp->rcv_cq.cqn,
  1193. qp->qkey,
  1194. my_log2(qp->max_recv_wqes),
  1195. my_log2(sizeof(qp->rcv_wq[0])) - 4,
  1196. my_log2(qp->max_snd_wqes),
  1197. my_log2(sizeof(qp->snd_wq[0])) - 4,
  1198. virt_to_bus(qp->snd_wq),
  1199. qp->snd_db_record_index,
  1200. virt_to_bus(qp->rcv_wq),
  1201. qp->rcv_db_record_index);
  1202. rc = cmd_rst2init_qpee(qp->qpn, inprm, QPCTX_IBUF_SZ);
  1203. if (rc) {
  1204. ret = -1;
  1205. eprintf("");
  1206. goto undo_rcv_cq;
  1207. }
  1208. qp->last_posted_rcv_wqe = NULL;
  1209. qp->last_posted_snd_wqe = NULL;
  1210. /* post all the buffers to the receive queue */
  1211. while (1) {
  1212. /* allocate wqe */
  1213. rcv_wqe = alloc_rcv_wqe(qp);
  1214. if (!rcv_wqe)
  1215. break;
  1216. /* post the buffer */
  1217. rc = post_rcv_buf(qp, rcv_wqe);
  1218. if (rc) {
  1219. ret = -1;
  1220. eprintf("");
  1221. goto undo_rcv_cq;
  1222. }
  1223. }
  1224. prep_init2rtr_qpee_buf(inprm);
  1225. rc = cmd_init2rtr_qpee(qp->qpn, inprm, QPCTX_IBUF_SZ);
  1226. if (rc) {
  1227. ret = -1;
  1228. eprintf("");
  1229. goto undo_rcv_cq;
  1230. }
  1231. memset(inprm, 0, QPCTX_IBUF_SZ);
  1232. rc = cmd_rtr2rts_qpee(qp->qpn, inprm, QPCTX_IBUF_SZ);
  1233. if (rc) {
  1234. ret = -1;
  1235. eprintf("");
  1236. goto undo_rcv_cq;
  1237. }
  1238. goto exit;
  1239. undo_rcv_cq:
  1240. rc = cmd_hw2sw_cq(qp->rcv_cq.cqn);
  1241. if (rc)
  1242. eprintf("");
  1243. undo_snd_cq:
  1244. rc = cmd_hw2sw_cq(qp->snd_cq.cqn);
  1245. if (rc)
  1246. eprintf("");
  1247. exit:
  1248. return ret;
  1249. }
  1250. static int destroy_udqp(struct udqp_st *qp)
  1251. {
  1252. int rc;
  1253. rc = cmd_2err_qpee(qp->qpn);
  1254. if (rc) {
  1255. eprintf("");
  1256. return rc;
  1257. }
  1258. tprintf("cmd_2err_qpee(0x%lx) success", qp->qpn);
  1259. rc = cmd_2rst_qpee(qp->qpn);
  1260. if (rc) {
  1261. eprintf("");
  1262. return rc;
  1263. }
  1264. tprintf("cmd_2rst_qpee(0x%lx) success", qp->qpn);
  1265. rc = cmd_hw2sw_cq(qp->rcv_cq.cqn);
  1266. if (rc) {
  1267. eprintf("");
  1268. return rc;
  1269. }
  1270. tprintf("cmd_hw2sw_cq(0x%lx) success", qp->snd_cq.cqn);
  1271. rc = cmd_hw2sw_cq(qp->snd_cq.cqn);
  1272. if (rc) {
  1273. eprintf("");
  1274. return rc;
  1275. }
  1276. tprintf("cmd_hw2sw_cq(0x%lx) success", qp->rcv_cq.cqn);
  1277. return rc;
  1278. }
  1279. static void prep_send_wqe_buf(void *qph,
  1280. void *avh,
  1281. void *wqeh,
  1282. const void *buf,
  1283. unsigned int offset, __u16 len, __u8 e)
  1284. {
  1285. struct ud_send_wqe_st *snd_wqe = wqeh;
  1286. struct ud_av_st *av = avh;
  1287. if (qph) {
  1288. }
  1289. /* suppress warnings */
  1290. INS_FLD_TO_BE(e, &snd_wqe->next.control,
  1291. arbelprm_wqe_segment_ctrl_send_st, e);
  1292. INS_FLD_TO_BE(1, &snd_wqe->next.control,
  1293. arbelprm_wqe_segment_ctrl_send_st, always1);
  1294. INS_FLD_TO_BE(1, &snd_wqe->next.next, arbelprm_wqe_segment_next_st,
  1295. always1);
  1296. memcpy(&snd_wqe->udseg, &av->av, sizeof av->av);
  1297. INS_FLD_TO_BE(av->dest_qp, snd_wqe->udseg.av,
  1298. arbelprm_wqe_segment_ud_st, destination_qp);
  1299. INS_FLD_TO_BE(av->qkey, snd_wqe->udseg.av, arbelprm_wqe_segment_ud_st,
  1300. q_key);
  1301. if (buf) {
  1302. memcpy(bus_to_virt
  1303. (be32_to_cpu(snd_wqe->mpointer[0].local_addr_l)) +
  1304. offset, buf, len);
  1305. len += offset;
  1306. }
  1307. snd_wqe->mpointer[0].byte_count = cpu_to_be32(len);
  1308. }
  1309. static void *alloc_ud_av(void)
  1310. {
  1311. u8 next_free;
  1312. if (dev_ib_data.udav.udav_next_free == FL_EOL) {
  1313. return NULL;
  1314. }
  1315. next_free = dev_ib_data.udav.udav_next_free;
  1316. dev_ib_data.udav.udav_next_free =
  1317. dev_buffers_p->av_array[next_free].ud_av.next_free;
  1318. tprintf("allocated udav %d", next_free);
  1319. return &dev_buffers_p->av_array[next_free].ud_av;
  1320. }
  1321. static void free_ud_av(void *avh)
  1322. {
  1323. union ud_av_u *avu;
  1324. __u8 idx, old_idx;
  1325. struct ud_av_st *av = avh;
  1326. avu = (union ud_av_u *)av;
  1327. idx = avu - dev_buffers_p->av_array;
  1328. tprintf("freeing udav idx=%d", idx);
  1329. old_idx = dev_ib_data.udav.udav_next_free;
  1330. dev_ib_data.udav.udav_next_free = idx;
  1331. avu->ud_av.next_free = old_idx;
  1332. }
  1333. static int update_cq_cons_idx(struct cq_st *cq)
  1334. {
  1335. /* write doorbell record */
  1336. WRITE_DWORD_VOL(cq->ci_db_ctx_pointer, 0, htonl(cq->cons_counter));
  1337. /*
  1338. INS_FLD_TO_BE(cq->cons_counter,
  1339. cq->ci_db_ctx_pointer,
  1340. arbelprm_cq_arm_db_record_st,
  1341. counter);
  1342. INS_FLD_TO_BE(cq->cqn,
  1343. cq->ci_db_ctx_pointer,
  1344. arbelprm_cq_arm_db_record_st,
  1345. cq_number);
  1346. INS_FLD_TO_BE(1,
  1347. cq->ci_db_ctx_pointer,
  1348. arbelprm_cq_arm_db_record_st,
  1349. res); */
  1350. return 0;
  1351. }
  1352. static int poll_cq(void *cqh, union cqe_st *cqe_p, u8 * num_cqes)
  1353. {
  1354. union cqe_st cqe;
  1355. int rc;
  1356. u32 *ptr;
  1357. struct cq_st *cq = cqh;
  1358. __u32 cons_idx = cq->cons_counter & (cq->num_cqes - 1);
  1359. ptr = (u32 *) (&(cq->cq_buf[cons_idx]));
  1360. barrier();
  1361. if ((ptr[7] & 0x80000000) == 0) {
  1362. cqe = cq->cq_buf[cons_idx];
  1363. be_to_cpu_buf(&cqe, sizeof(cqe));
  1364. *cqe_p = cqe;
  1365. ptr[7] = 0x80000000;
  1366. barrier();
  1367. cq->cons_counter++;
  1368. rc = update_cq_cons_idx(cq);
  1369. if (rc) {
  1370. return rc;
  1371. }
  1372. *num_cqes = 1;
  1373. } else
  1374. *num_cqes = 0;
  1375. return 0;
  1376. }
  1377. static void dev2ib_cqe(struct ib_cqe_st *ib_cqe_p, union cqe_st *cqe_p)
  1378. {
  1379. __u8 opcode;
  1380. __u32 wqe_addr_ba;
  1381. opcode =
  1382. EX_FLD(cqe_p->good_cqe, arbelprm_completion_queue_entry_st, opcode);
  1383. if (opcode >= CQE_ERROR_OPCODE)
  1384. ib_cqe_p->is_error = 1;
  1385. else
  1386. ib_cqe_p->is_error = 0;
  1387. ib_cqe_p->is_send =
  1388. EX_FLD(cqe_p->good_cqe, arbelprm_completion_queue_entry_st, s);
  1389. wqe_addr_ba =
  1390. EX_FLD(cqe_p->good_cqe, arbelprm_completion_queue_entry_st,
  1391. wqe_adr) << 6;
  1392. ib_cqe_p->wqe = bus_to_virt(wqe_addr_ba);
  1393. ib_cqe_p->count =
  1394. EX_FLD(cqe_p->good_cqe, arbelprm_completion_queue_entry_st,
  1395. byte_cnt);
  1396. }
  1397. static int ib_poll_cq(void *cqh, struct ib_cqe_st *ib_cqe_p, u8 * num_cqes)
  1398. {
  1399. int rc;
  1400. union cqe_st cqe;
  1401. struct cq_st *cq = cqh;
  1402. __u8 opcode;
  1403. rc = poll_cq(cq, &cqe, num_cqes);
  1404. if (rc || ((*num_cqes) == 0)) {
  1405. return rc;
  1406. }
  1407. dev2ib_cqe(ib_cqe_p, &cqe);
  1408. opcode =
  1409. EX_FLD(cqe.good_cqe, arbelprm_completion_queue_entry_st, opcode);
  1410. if (opcode >= CQE_ERROR_OPCODE) {
  1411. struct ud_send_wqe_st *wqe_p, wqe;
  1412. __u32 *ptr;
  1413. unsigned int i;
  1414. wqe_p =
  1415. bus_to_virt(EX_FLD
  1416. (cqe.error_cqe,
  1417. arbelprm_completion_with_error_st,
  1418. wqe_addr) << 6);
  1419. eprintf("syndrome=0x%lx",
  1420. EX_FLD(cqe.error_cqe, arbelprm_completion_with_error_st,
  1421. syndrome));
  1422. eprintf("vendor_syndrome=0x%lx",
  1423. EX_FLD(cqe.error_cqe, arbelprm_completion_with_error_st,
  1424. vendor_code));
  1425. eprintf("wqe_addr=0x%lx", wqe_p);
  1426. eprintf("myqpn=0x%lx",
  1427. EX_FLD(cqe.error_cqe, arbelprm_completion_with_error_st,
  1428. myqpn));
  1429. memcpy(&wqe, wqe_p, sizeof wqe);
  1430. be_to_cpu_buf(&wqe, sizeof wqe);
  1431. eprintf("dumping wqe...");
  1432. ptr = (__u32 *) (&wqe);
  1433. for (i = 0; i < sizeof wqe; i += 4) {
  1434. printf("%lx : ", ptr[i >> 2]);
  1435. }
  1436. }
  1437. return rc;
  1438. }
  1439. /* always work on ipoib qp */
  1440. static int add_qp_to_mcast_group(union ib_gid_u mcast_gid, __u8 add)
  1441. {
  1442. void *mg;
  1443. __u8 *tmp;
  1444. int rc;
  1445. __u16 mgid_hash;
  1446. void *mgmqp_p;
  1447. tmp = dev_buffers_p->inprm_buf;
  1448. memcpy(tmp, mcast_gid.raw, 16);
  1449. be_to_cpu_buf(tmp, 16);
  1450. rc = cmd_mgid_hash(tmp, &mgid_hash);
  1451. if (!rc) {
  1452. mg = (void *)dev_buffers_p->inprm_buf;
  1453. memset(mg, 0, MT_STRUCT_SIZE(arbelprm_mgm_entry_st));
  1454. INS_FLD(mcast_gid.as_u32.dw[0], mg, arbelprm_mgm_entry_st,
  1455. mgid_128_96);
  1456. INS_FLD(mcast_gid.as_u32.dw[1], mg, arbelprm_mgm_entry_st,
  1457. mgid_95_64);
  1458. INS_FLD(mcast_gid.as_u32.dw[2], mg, arbelprm_mgm_entry_st,
  1459. mgid_63_32);
  1460. INS_FLD(mcast_gid.as_u32.dw[3], mg, arbelprm_mgm_entry_st,
  1461. mgid_31_0);
  1462. be_to_cpu_buf(mg +
  1463. MT_BYTE_OFFSET(arbelprm_mgm_entry_st,
  1464. mgid_128_96), 16);
  1465. mgmqp_p = mg + MT_BYTE_OFFSET(arbelprm_mgm_entry_st, mgmqp_0);
  1466. INS_FLD(dev_ib_data.ipoib_qp.qpn, mgmqp_p, arbelprm_mgmqp_st,
  1467. qpn_i);
  1468. INS_FLD(add, mgmqp_p, arbelprm_mgmqp_st, qi);
  1469. rc = cmd_write_mgm(mg, mgid_hash);
  1470. }
  1471. return rc;
  1472. }
  1473. static int clear_interrupt(void)
  1474. {
  1475. writel(dev_ib_data.clr_int_data, dev_ib_data.clr_int_addr);
  1476. return 0;
  1477. }
  1478. static struct ud_send_wqe_st *alloc_send_wqe(udqp_t qph)
  1479. {
  1480. struct udqp_st *qp = qph;
  1481. __u32 idx;
  1482. if (qp->snd_wqe_cur_free) {
  1483. qp->snd_wqe_cur_free--;
  1484. idx = qp->snd_wqe_alloc_idx;
  1485. qp->snd_wqe_alloc_idx =
  1486. (qp->snd_wqe_alloc_idx + 1) & (qp->max_snd_wqes - 1);
  1487. return &qp->snd_wq[idx].wqe_cont.wqe;
  1488. }
  1489. return NULL;
  1490. }
  1491. static struct recv_wqe_st *alloc_rcv_wqe(struct udqp_st *qp)
  1492. {
  1493. __u32 idx;
  1494. if (qp->recv_wqe_cur_free) {
  1495. qp->recv_wqe_cur_free--;
  1496. idx = qp->recv_wqe_alloc_idx;
  1497. qp->recv_wqe_alloc_idx =
  1498. (qp->recv_wqe_alloc_idx + 1) & (qp->max_recv_wqes - 1);
  1499. return &qp->rcv_wq[idx].wqe_cont.wqe;
  1500. }
  1501. return NULL;
  1502. }
  1503. static int free_send_wqe(struct ud_send_wqe_st *wqe)
  1504. {
  1505. struct udqp_st *qp = ((struct ude_send_wqe_cont_st *)wqe)->qp;
  1506. qp->snd_wqe_cur_free++;
  1507. return 0;
  1508. }
  1509. static int free_rcv_wqe(struct recv_wqe_st *wqe)
  1510. {
  1511. struct udqp_st *qp = ((struct recv_wqe_cont_st *)wqe)->qp;
  1512. qp->recv_wqe_cur_free++;
  1513. return 0;
  1514. }
  1515. static int free_wqe(void *wqe)
  1516. {
  1517. int rc = 0;
  1518. struct recv_wqe_st *rcv_wqe;
  1519. // tprintf("free wqe= 0x%x", wqe);
  1520. if ((wqe >= (void *)(dev_ib_data.ipoib_qp.rcv_wq)) &&
  1521. (wqe <
  1522. (void *)(&dev_ib_data.ipoib_qp.rcv_wq[NUM_IPOIB_RCV_WQES]))) {
  1523. /* ipoib receive wqe */
  1524. free_rcv_wqe(wqe);
  1525. rcv_wqe = alloc_rcv_wqe(&dev_ib_data.ipoib_qp);
  1526. if (rcv_wqe) {
  1527. rc = post_rcv_buf(&dev_ib_data.ipoib_qp, rcv_wqe);
  1528. if (rc) {
  1529. eprintf("");
  1530. }
  1531. }
  1532. } else if (wqe >= (void *)(dev_ib_data.ipoib_qp.snd_wq) &&
  1533. wqe <
  1534. (void *)(&dev_ib_data.ipoib_qp.snd_wq[NUM_IPOIB_SND_WQES])) {
  1535. /* ipoib send wqe */
  1536. free_send_wqe(wqe);
  1537. } else if (wqe >= (void *)(dev_ib_data.mads_qp.rcv_wq) &&
  1538. wqe <
  1539. (void *)(&dev_ib_data.mads_qp.rcv_wq[NUM_MADS_RCV_WQES])) {
  1540. /* mads receive wqe */
  1541. free_rcv_wqe(wqe);
  1542. rcv_wqe = alloc_rcv_wqe(&dev_ib_data.mads_qp);
  1543. if (rcv_wqe) {
  1544. rc = post_rcv_buf(&dev_ib_data.mads_qp, rcv_wqe);
  1545. if (rc) {
  1546. eprintf("");
  1547. }
  1548. }
  1549. } else if (wqe >= (void *)(dev_ib_data.mads_qp.snd_wq) &&
  1550. wqe <
  1551. (void *)(&dev_ib_data.mads_qp.snd_wq[NUM_MADS_SND_WQES])) {
  1552. /* mads send wqe */
  1553. free_send_wqe(wqe);
  1554. } else {
  1555. rc = -1;
  1556. eprintf("");
  1557. }
  1558. return rc;
  1559. }
  1560. static int update_eq_cons_idx(struct eq_st *eq)
  1561. {
  1562. writel(eq->cons_counter, eq->ci_base_base_addr);
  1563. return 0;
  1564. }
  1565. static void dev2ib_eqe(struct ib_eqe_st *ib_eqe_p, struct eqe_t *eqe_p)
  1566. {
  1567. void *tmp;
  1568. ib_eqe_p->event_type =
  1569. EX_FLD(eqe_p, arbelprm_event_queue_entry_st, event_type);
  1570. tmp = eqe_p + MT_BYTE_OFFSET(arbelprm_event_queue_entry_st, event_data);
  1571. ib_eqe_p->cqn = EX_FLD(tmp, arbelprm_completion_event_st, cqn);
  1572. }
  1573. static int poll_eq(struct ib_eqe_st *ib_eqe_p, __u8 * num_eqes)
  1574. {
  1575. struct eqe_t eqe;
  1576. u8 owner;
  1577. int rc;
  1578. u32 *ptr;
  1579. struct eq_st *eq = &dev_ib_data.eq;
  1580. __u32 cons_idx = eq->cons_counter & (eq->eq_size - 1);
  1581. ptr = (u32 *) (&(eq->eq_buf[cons_idx]));
  1582. owner = (ptr[7] & 0x80000000) ? OWNER_HW : OWNER_SW;
  1583. if (owner == OWNER_SW) {
  1584. eqe = eq->eq_buf[cons_idx];
  1585. be_to_cpu_buf(&eqe, sizeof(eqe));
  1586. dev2ib_eqe(ib_eqe_p, &eqe);
  1587. ptr[7] |= 0x80000000;
  1588. eq->eq_buf[cons_idx] = eqe;
  1589. eq->cons_counter++;
  1590. rc = update_eq_cons_idx(eq);
  1591. if (rc) {
  1592. return -1;
  1593. }
  1594. *num_eqes = 1;
  1595. } else {
  1596. *num_eqes = 0;
  1597. }
  1598. return 0;
  1599. }
  1600. static int ib_device_close(void)
  1601. {
  1602. iounmap(memfree_pci_dev.uar);
  1603. iounmap(memfree_pci_dev.cr_space);
  1604. return 0;
  1605. }
  1606. static __u32 dev_get_qpn(void *qph)
  1607. {
  1608. struct udqp_st *qp = qph;
  1609. return qp->qpn;
  1610. }
  1611. static void dev_post_dbell(void *dbell, __u32 offset)
  1612. {
  1613. __u32 *ptr;
  1614. unsigned long address;
  1615. ptr = dbell;
  1616. if (((ptr[0] >> 24) & 0xff) != 1) {
  1617. eprintf("");
  1618. }
  1619. tprintf("ptr[0]= 0x%lx", ptr[0]);
  1620. tprintf("ptr[1]= 0x%lx", ptr[1]);
  1621. address = (unsigned long)(memfree_pci_dev.uar) + offset;
  1622. tprintf("va=0x%lx pa=0x%lx", address,
  1623. virt_to_bus((const void *)address));
  1624. writel(htonl(ptr[0]), memfree_pci_dev.uar + offset);
  1625. barrier();
  1626. address += 4;
  1627. tprintf("va=0x%lx pa=0x%lx", address,
  1628. virt_to_bus((const void *)address));
  1629. writel(htonl(ptr[1]), address /*memfree_pci_dev.uar + offset + 4 */ );
  1630. }