Você não pode selecionar mais de 25 tópicos Os tópicos devem começar com uma letra ou um número, podem incluir traços ('-') e podem ter até 35 caracteres.

ib_mt23108.c 43KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701
  1. /*
  2. This software is available to you under a choice of one of two
  3. licenses. You may choose to be licensed under the terms of the GNU
  4. General Public License (GPL) Version 2, available at
  5. <http://www.fsf.org/copyleft/gpl.html>, or the OpenIB.org BSD
  6. license, available in the LICENSE.TXT file accompanying this
  7. software. These details are also available at
  8. <http://openib.org/license.html>.
  9. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  10. EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  11. MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  12. NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  13. BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  14. ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  15. CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  16. SOFTWARE.
  17. Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
  18. */
  19. #include "mt23108.h"
  20. #include "ib_driver.h"
  21. #include <gpxe/pci.h>
  22. struct device_buffers_st {
  23. union recv_wqe_u mads_qp_rcv_queue[NUM_MADS_RCV_WQES]
  24. __attribute__ ((aligned(RECV_WQE_U_ALIGN)));
  25. union recv_wqe_u ipoib_qp_rcv_queue[NUM_IPOIB_RCV_WQES]
  26. __attribute__ ((aligned(RECV_WQE_U_ALIGN)));
  27. union ud_send_wqe_u mads_qp_snd_queue[NUM_MADS_SND_WQES]
  28. __attribute__ ((aligned(UD_SEND_WQE_U_ALIGN)));
  29. union ud_send_wqe_u ipoib_qp_snd_queue[NUM_IPOIB_SND_WQES]
  30. __attribute__ ((aligned(UD_SEND_WQE_U_ALIGN)));
  31. u8 inprm_buf[INPRM_BUF_SZ] __attribute__ ((aligned(INPRM_BUF_ALIGN)));
  32. u8 outprm_buf[OUTPRM_BUF_SZ]
  33. __attribute__ ((aligned(OUTPRM_BUF_ALIGN)));
  34. struct eqe_t eq_buf[1 << LOG2_EQ_SZ]
  35. __attribute__ ((aligned(sizeof(struct eqe_t))));
  36. union cqe_st mads_snd_cq_buf[NUM_MADS_SND_CQES]
  37. __attribute__ ((aligned(sizeof(union cqe_st))));
  38. union cqe_st ipoib_snd_cq_buf[NUM_IPOIB_SND_CQES]
  39. __attribute__ ((aligned(sizeof(union cqe_st))));
  40. union cqe_st mads_rcv_cq_buf[NUM_MADS_RCV_CQES]
  41. __attribute__ ((aligned(sizeof(union cqe_st))));
  42. union cqe_st ipoib_rcv_cq_buf[NUM_IPOIB_RCV_CQES]
  43. __attribute__ ((aligned(sizeof(union cqe_st))));
  44. union ud_av_u av_array[NUM_AVS]
  45. __attribute__ ((aligned(ADDRESS_VECTOR_ST_ALIGN)));
  46. } __attribute__ ((packed));
  47. #define STRUCT_ALIGN_SZ 4096
  48. #define SRC_BUF_SZ (sizeof(struct device_buffers_st) + STRUCT_ALIGN_SZ - 1)
  49. /* the following must be kept in this order
  50. for the memory region to cover the buffers */
  51. static u8 src_buf[SRC_BUF_SZ];
  52. static struct ib_buffers_st ib_buffers;
  53. static __u32 memreg_size;
  54. /* end of order constraint */
  55. static struct dev_pci_struct tavor_pci_dev;
  56. static struct device_buffers_st *dev_buffers_p;
  57. static struct device_ib_data_st dev_ib_data;
  58. static int gw_write_cr(__u32 addr, __u32 data)
  59. {
  60. writel(htonl(data), tavor_pci_dev.cr_space + addr);
  61. return 0;
  62. }
  63. static int gw_read_cr(__u32 addr, __u32 * result)
  64. {
  65. *result = ntohl(readl(tavor_pci_dev.cr_space + addr));
  66. return 0;
  67. }
  68. static int reset_hca(void)
  69. {
  70. return gw_write_cr(TAVOR_RESET_OFFSET, 1);
  71. }
  72. static int find_mlx_bridge(__u8 hca_bus, __u8 * br_bus_p, __u8 * br_devfn_p)
  73. {
  74. int bus;
  75. int dev;
  76. int devfn;
  77. int rc;
  78. __u16 vendor, dev_id;
  79. __u8 sec_bus;
  80. for (bus = 0; bus < 256; ++bus) {
  81. for (dev = 0; dev < 32; ++dev) {
  82. devfn = (dev << 3);
  83. rc = pcibios_read_config_word(bus, devfn, PCI_VENDOR_ID,
  84. &vendor);
  85. if (rc)
  86. return rc;
  87. if (vendor != MELLANOX_VENDOR_ID)
  88. continue;
  89. rc = pcibios_read_config_word(bus, devfn, PCI_DEVICE_ID,
  90. &dev_id);
  91. if (rc)
  92. return rc;
  93. if (dev_id != TAVOR_BRIDGE_DEVICE_ID)
  94. continue;
  95. rc = pcibios_read_config_byte(bus, devfn,
  96. PCI_SECONDARY_BUS,
  97. &sec_bus);
  98. if (rc)
  99. return rc;
  100. if (sec_bus == hca_bus) {
  101. *br_bus_p = bus;
  102. *br_devfn_p = devfn;
  103. return 0;
  104. }
  105. }
  106. }
  107. return -1;
  108. }
  109. static int ib_device_init(struct pci_device *dev)
  110. {
  111. int i;
  112. int rc;
  113. __u8 br_bus, br_devfn;
  114. tprintf("");
  115. memset(&dev_ib_data, 0, sizeof dev_ib_data);
  116. /* save bars */
  117. tprintf("bus=%d devfn=0x%x", dev->bus, dev->devfn);
  118. for (i = 0; i < 6; ++i) {
  119. tavor_pci_dev.dev.bar[i] =
  120. pci_bar_start(dev, PCI_BASE_ADDRESS_0 + (i << 2));
  121. tprintf("bar[%d]= 0x%08lx", i, tavor_pci_dev.dev.bar[i]);
  122. }
  123. tprintf("");
  124. /* save config space */
  125. for (i = 0; i < 64; ++i) {
  126. rc = pci_read_config_dword(dev, i << 2,
  127. &tavor_pci_dev.dev.
  128. dev_config_space[i]);
  129. if (rc) {
  130. eprintf("");
  131. return rc;
  132. }
  133. tprintf("config[%d]= 0x%08lx", i << 2,
  134. tavor_pci_dev.dev.dev_config_space[i]);
  135. }
  136. tprintf("");
  137. tavor_pci_dev.dev.dev = dev;
  138. tprintf("");
  139. if (dev->dev_id == TAVOR_DEVICE_ID) {
  140. rc = find_mlx_bridge(dev->bus, &br_bus, &br_devfn);
  141. if (rc) {
  142. eprintf("");
  143. return rc;
  144. }
  145. tavor_pci_dev.br.bus = br_bus;
  146. tavor_pci_dev.br.devfn = br_devfn;
  147. tprintf("bus=%d devfn=0x%x", br_bus, br_devfn);
  148. /* save config space */
  149. for (i = 0; i < 64; ++i) {
  150. rc = pcibios_read_config_dword(br_bus, br_devfn, i << 2,
  151. &tavor_pci_dev.br.
  152. dev_config_space[i]);
  153. if (rc) {
  154. eprintf("");
  155. return rc;
  156. }
  157. tprintf("config[%d]= 0x%08lx", i << 2,
  158. tavor_pci_dev.br.dev_config_space[i]);
  159. }
  160. }
  161. tprintf("");
  162. /* map cr-space */
  163. tavor_pci_dev.cr_space = ioremap(tavor_pci_dev.dev.bar[0], 0x100000);
  164. if (!tavor_pci_dev.cr_space) {
  165. eprintf("");
  166. return -1;
  167. }
  168. /* map uar */
  169. tavor_pci_dev.uar =
  170. ioremap(tavor_pci_dev.dev.bar[2] + UAR_IDX * 0x1000, 0x1000);
  171. if (!tavor_pci_dev.uar) {
  172. eprintf("");
  173. return -1;
  174. }
  175. tprintf("uar_base (pa:va) = 0x%lx 0x%lx",
  176. tavor_pci_dev.dev.bar[2] + UAR_IDX * 0x1000, tavor_pci_dev.uar);
  177. tprintf("");
  178. return 0;
  179. }
  180. static inline unsigned long lalign(unsigned long buf, unsigned long align)
  181. {
  182. return (unsigned long)((buf + align - 1) &
  183. (~(((unsigned long)align) - 1)));
  184. }
  185. static int init_dev_data(void)
  186. {
  187. unsigned long tmp;
  188. tmp = lalign(virt_to_bus(src_buf), STRUCT_ALIGN_SZ);
  189. dev_buffers_p = bus_to_virt(tmp);
  190. memreg_size = (__u32) (&memreg_size) - (__u32) dev_buffers_p;
  191. tprintf("src_buf=0x%lx, dev_buffers_p=0x%lx, memreg_size=0x%x", src_buf,
  192. dev_buffers_p, memreg_size);
  193. return 0;
  194. }
  195. static int restore_config(void)
  196. {
  197. int i;
  198. int rc;
  199. if (tavor_pci_dev.dev.dev->dev_id == TAVOR_DEVICE_ID) {
  200. for (i = 0; i < 64; ++i) {
  201. rc = pcibios_write_config_dword(tavor_pci_dev.br.bus,
  202. tavor_pci_dev.br.devfn,
  203. i << 2,
  204. tavor_pci_dev.br.
  205. dev_config_space[i]);
  206. if (rc) {
  207. return rc;
  208. }
  209. }
  210. }
  211. for (i = 0; i < 64; ++i) {
  212. if (i != 22 && i != 23) {
  213. rc = pci_write_config_dword(tavor_pci_dev.dev.dev,
  214. i << 2,
  215. tavor_pci_dev.dev.
  216. dev_config_space[i]);
  217. if (rc) {
  218. return rc;
  219. }
  220. }
  221. }
  222. return 0;
  223. }
  224. static void prep_init_hca_buf(const struct init_hca_st *init_hca_p, void *buf)
  225. {
  226. /*struct init_hca_param_st */ void *p = buf;
  227. void *tmp;
  228. memset(buf, 0, MT_STRUCT_SIZE(tavorprm_init_hca_st));
  229. tmp =
  230. p + MT_BYTE_OFFSET(tavorprm_init_hca_st,
  231. qpc_eec_cqc_eqc_rdb_parameters);
  232. INS_FLD(init_hca_p->qpc_base_addr_h, tmp, tavorprm_qpcbaseaddr_st,
  233. qpc_base_addr_h);
  234. INS_FLD(init_hca_p->
  235. qpc_base_addr_l >> (32 -
  236. (MT_BIT_SIZE
  237. (tavorprm_qpcbaseaddr_st,
  238. qpc_base_addr_l))), tmp,
  239. tavorprm_qpcbaseaddr_st, qpc_base_addr_l);
  240. INS_FLD(init_hca_p->log_num_of_qp, tmp, tavorprm_qpcbaseaddr_st,
  241. log_num_of_qp);
  242. INS_FLD(init_hca_p->cqc_base_addr_h, tmp, tavorprm_qpcbaseaddr_st,
  243. cqc_base_addr_h);
  244. INS_FLD(init_hca_p->
  245. cqc_base_addr_l >> (32 -
  246. (MT_BIT_SIZE
  247. (tavorprm_qpcbaseaddr_st,
  248. cqc_base_addr_l))), tmp,
  249. tavorprm_qpcbaseaddr_st, cqc_base_addr_l);
  250. INS_FLD(init_hca_p->log_num_of_cq, tmp, tavorprm_qpcbaseaddr_st,
  251. log_num_of_cq);
  252. INS_FLD(init_hca_p->eqc_base_addr_h, tmp, tavorprm_qpcbaseaddr_st,
  253. eqc_base_addr_h);
  254. INS_FLD(init_hca_p->
  255. eqc_base_addr_l >> (32 -
  256. (MT_BIT_SIZE
  257. (tavorprm_qpcbaseaddr_st,
  258. eqc_base_addr_l))), tmp,
  259. tavorprm_qpcbaseaddr_st, eqc_base_addr_l);
  260. INS_FLD(LOG2_EQS, tmp, tavorprm_qpcbaseaddr_st, log_num_eq);
  261. INS_FLD(init_hca_p->srqc_base_addr_h, tmp, tavorprm_qpcbaseaddr_st,
  262. srqc_base_addr_h);
  263. INS_FLD(init_hca_p->
  264. srqc_base_addr_l >> (32 -
  265. (MT_BIT_SIZE
  266. (tavorprm_qpcbaseaddr_st,
  267. srqc_base_addr_l))), tmp,
  268. tavorprm_qpcbaseaddr_st, srqc_base_addr_l);
  269. INS_FLD(init_hca_p->log_num_of_srq, tmp, tavorprm_qpcbaseaddr_st,
  270. log_num_of_srq);
  271. INS_FLD(init_hca_p->eqpc_base_addr_h, tmp, tavorprm_qpcbaseaddr_st,
  272. eqpc_base_addr_h);
  273. INS_FLD(init_hca_p->eqpc_base_addr_l, tmp, tavorprm_qpcbaseaddr_st,
  274. eqpc_base_addr_l);
  275. INS_FLD(init_hca_p->eeec_base_addr_h, tmp, tavorprm_qpcbaseaddr_st,
  276. eeec_base_addr_h);
  277. INS_FLD(init_hca_p->eeec_base_addr_l, tmp, tavorprm_qpcbaseaddr_st,
  278. eeec_base_addr_l);
  279. tmp = p + MT_BYTE_OFFSET(tavorprm_init_hca_st, multicast_parameters);
  280. INS_FLD(init_hca_p->mc_base_addr_h, tmp, tavorprm_multicastparam_st,
  281. mc_base_addr_h);
  282. INS_FLD(init_hca_p->mc_base_addr_l, tmp, tavorprm_multicastparam_st,
  283. mc_base_addr_l);
  284. INS_FLD(init_hca_p->log_mc_table_entry_sz, tmp,
  285. tavorprm_multicastparam_st, log_mc_table_entry_sz);
  286. INS_FLD(init_hca_p->log_mc_table_sz, tmp, tavorprm_multicastparam_st,
  287. log_mc_table_sz);
  288. INS_FLD(init_hca_p->mc_table_hash_sz, tmp, tavorprm_multicastparam_st,
  289. mc_table_hash_sz);
  290. tmp = p + MT_BYTE_OFFSET(tavorprm_init_hca_st, tpt_parameters);
  291. INS_FLD(init_hca_p->mpt_base_addr_h, tmp, tavorprm_tptparams_st,
  292. mpt_base_adr_h);
  293. INS_FLD(init_hca_p->mpt_base_addr_l, tmp, tavorprm_tptparams_st,
  294. mpt_base_adr_l);
  295. INS_FLD(init_hca_p->log_mpt_sz, tmp, tavorprm_tptparams_st, log_mpt_sz);
  296. INS_FLD(init_hca_p->mtt_base_addr_h, tmp, tavorprm_tptparams_st,
  297. mtt_base_addr_h);
  298. INS_FLD(init_hca_p->mtt_base_addr_l, tmp, tavorprm_tptparams_st,
  299. mtt_base_addr_l);
  300. tmp = p + MT_BYTE_OFFSET(tavorprm_init_hca_st, uar_parameters);
  301. INS_FLD(tavor_pci_dev.dev.bar[3], tmp, tavorprm_uar_params_st,
  302. uar_base_addr_h);
  303. INS_FLD(tavor_pci_dev.dev.bar[2] & 0xfff00000, tmp,
  304. tavorprm_uar_params_st, uar_base_addr_l);
  305. }
  306. static void prep_sw2hw_mpt_buf(void *buf, __u32 mkey)
  307. {
  308. INS_FLD(1, buf, tavorprm_mpt_st, m_io);
  309. INS_FLD(1, buf, tavorprm_mpt_st, lw);
  310. INS_FLD(1, buf, tavorprm_mpt_st, lr);
  311. INS_FLD(1, buf, tavorprm_mpt_st, pa);
  312. INS_FLD(1, buf, tavorprm_mpt_st, r_w);
  313. INS_FLD(mkey, buf, tavorprm_mpt_st, mem_key);
  314. INS_FLD(GLOBAL_PD, buf, tavorprm_mpt_st, pd);
  315. INS_FLD(virt_to_bus(dev_buffers_p), buf, tavorprm_mpt_st,
  316. start_address_l);
  317. INS_FLD(memreg_size, buf, tavorprm_mpt_st, reg_wnd_len_l);
  318. }
  319. static void prep_sw2hw_eq_buf(void *buf, struct eqe_t *eq)
  320. {
  321. memset(buf, 0, MT_STRUCT_SIZE(tavorprm_eqc_st));
  322. INS_FLD(2, buf, tavorprm_eqc_st, st); /* fired */
  323. INS_FLD(virt_to_bus(eq), buf, tavorprm_eqc_st, start_address_l);
  324. INS_FLD(LOG2_EQ_SZ, buf, tavorprm_eqc_st, log_eq_size);
  325. INS_FLD(UAR_IDX, buf, tavorprm_eqc_st, usr_page);
  326. INS_FLD(GLOBAL_PD, buf, tavorprm_eqc_st, pd);
  327. INS_FLD(dev_ib_data.mkey, buf, tavorprm_eqc_st, lkey);
  328. }
  329. static void init_eq_buf(void *eq_buf)
  330. {
  331. int num_eqes = 1 << LOG2_EQ_SZ;
  332. memset(eq_buf, 0xff, num_eqes * sizeof(struct eqe_t));
  333. }
  334. static void prep_init_ib_buf(void *buf)
  335. {
  336. __u32 *ptr = (__u32 *) buf;
  337. ptr[0] = 0x4310;
  338. ptr[1] = 1;
  339. ptr[2] = 64;
  340. }
  341. static void prep_sw2hw_cq_buf(void *buf, __u8 eqn, __u32 cqn,
  342. union cqe_st *cq_buf)
  343. {
  344. __u32 *ptr = (__u32 *) buf;
  345. ptr[2] = virt_to_bus(cq_buf);
  346. ptr[3] = (LOG2_CQ_SZ << 24) | UAR_IDX;
  347. ptr[4] = eqn;
  348. ptr[5] = eqn;
  349. ptr[6] = dev_ib_data.pd;
  350. ptr[7] = dev_ib_data.mkey;
  351. ptr[12] = cqn;
  352. }
  353. static void prep_rst2init_qpee_buf(void *buf, __u32 snd_cqn, __u32 rcv_cqn,
  354. __u32 qkey)
  355. {
  356. struct qp_ee_state_tarnisition_st *prm;
  357. void *tmp;
  358. prm = (struct qp_ee_state_tarnisition_st *)buf;
  359. INS_FLD(3, &prm->ctx, tavorprm_queue_pair_ee_context_entry_st, st); /* service type = UD */
  360. INS_FLD(3, &prm->ctx, tavorprm_queue_pair_ee_context_entry_st, pm_state); /* required for UD QP */
  361. INS_FLD(UAR_IDX, &prm->ctx, tavorprm_queue_pair_ee_context_entry_st,
  362. usr_page);
  363. INS_FLD(dev_ib_data.pd, &prm->ctx,
  364. tavorprm_queue_pair_ee_context_entry_st, pd);
  365. INS_FLD(dev_ib_data.mkey, &prm->ctx,
  366. tavorprm_queue_pair_ee_context_entry_st, wqe_lkey);
  367. INS_FLD(1, &prm->ctx, tavorprm_queue_pair_ee_context_entry_st, ssc); /* generate send CQE */
  368. INS_FLD(1, &prm->ctx, tavorprm_queue_pair_ee_context_entry_st, rsc); /* generate receive CQE */
  369. INS_FLD(snd_cqn, &prm->ctx, tavorprm_queue_pair_ee_context_entry_st,
  370. cqn_snd);
  371. INS_FLD(rcv_cqn, &prm->ctx, tavorprm_queue_pair_ee_context_entry_st,
  372. cqn_rcv);
  373. INS_FLD(qkey, &prm->ctx, tavorprm_queue_pair_ee_context_entry_st,
  374. q_key);
  375. tmp =
  376. (void *)(&prm->ctx) +
  377. MT_BYTE_OFFSET(tavorprm_queue_pair_ee_context_entry_st,
  378. primary_address_path);
  379. INS_FLD(dev_ib_data.port, tmp, tavorprm_address_path_st, port_number);
  380. INS_FLD(4, &prm->ctx, tavorprm_queue_pair_ee_context_entry_st, mtu);
  381. INS_FLD(0xb, &prm->ctx, tavorprm_queue_pair_ee_context_entry_st,
  382. msg_max);
  383. }
  384. static void prep_init2rtr_qpee_buf(void *buf)
  385. {
  386. struct qp_ee_state_tarnisition_st *prm;
  387. prm = (struct qp_ee_state_tarnisition_st *)buf;
  388. INS_FLD(4, &prm->ctx, tavorprm_queue_pair_ee_context_entry_st, mtu);
  389. INS_FLD(0xb, &prm->ctx, tavorprm_queue_pair_ee_context_entry_st,
  390. msg_max);
  391. }
  392. static void init_av_array()
  393. {
  394. int i;
  395. dev_ib_data.udav.av_array = dev_buffers_p->av_array;
  396. dev_ib_data.udav.udav_next_free = FL_EOL;
  397. for (i = 0; i < NUM_AVS; ++i) {
  398. dev_ib_data.udav.av_array[i].ud_av.next_free =
  399. dev_ib_data.udav.udav_next_free;
  400. dev_ib_data.udav.udav_next_free = i;
  401. }
  402. tprintf("dev_ib_data.udav.udav_next_free=%d", i);
  403. }
  404. static int setup_hca(__u8 port, void **eq_p)
  405. {
  406. int rc;
  407. __u32 key, in_key;
  408. __u32 *inprm;
  409. struct eqe_t *eq_buf;
  410. __u32 event_mask;
  411. void *cfg;
  412. int ret = 0;
  413. __u8 eqn;
  414. struct dev_lim_st dev_lim;
  415. struct init_hca_st init_hca;
  416. __u32 offset, base_h, base_l;
  417. const __u32 delta = 0x400000;
  418. struct query_fw_st qfw;
  419. tprintf("called");
  420. init_dev_data();
  421. rc = reset_hca();
  422. if (rc) {
  423. ret = -1;
  424. eprintf("");
  425. goto exit;
  426. } else {
  427. tprintf("reset_hca() success");
  428. }
  429. mdelay(1000); /* wait for 1 sec */
  430. rc = restore_config();
  431. if (rc) {
  432. ret = -1;
  433. eprintf("");
  434. goto exit;
  435. } else {
  436. tprintf("restore_config() success");
  437. }
  438. dev_ib_data.pd = GLOBAL_PD;
  439. dev_ib_data.port = port;
  440. /* execute system enable command */
  441. rc = cmd_sys_en();
  442. if (rc) {
  443. ret = -1;
  444. eprintf("");
  445. goto exit;
  446. } else {
  447. tprintf("cmd_sys_en() success");
  448. }
  449. rc= cmd_query_fw(&qfw);
  450. if (rc) {
  451. ret = -1;
  452. eprintf("");
  453. goto exit;
  454. } else {
  455. tprintf("cmd_query_fw() success");
  456. if (print_info) {
  457. printf("FW ver = %d.%d.%d\n",
  458. qfw.fw_rev_major,
  459. qfw.fw_rev_minor,
  460. qfw.fw_rev_subminor);
  461. }
  462. tprintf("fw_rev_major=%d", qfw.fw_rev_major);
  463. tprintf("fw_rev_minor=%d", qfw.fw_rev_minor);
  464. tprintf("fw_rev_subminor=%d", qfw.fw_rev_subminor);
  465. tprintf("error_buf_start_h=0x%x", qfw.error_buf_start_h);
  466. tprintf("error_buf_start_l=0x%x", qfw.error_buf_start_l);
  467. tprintf("error_buf_size=%d", qfw.error_buf_size);
  468. }
  469. if (qfw.error_buf_start_h) {
  470. eprintf("too high physical address");
  471. ret = -1;
  472. goto exit;
  473. }
  474. dev_ib_data.error_buf_addr= ioremap(qfw.error_buf_start_l,
  475. qfw.error_buf_size*4);
  476. dev_ib_data.error_buf_size= qfw.error_buf_size;
  477. if (!dev_ib_data.error_buf_addr) {
  478. eprintf("");
  479. ret = -1;
  480. goto exit;
  481. }
  482. rc = cmd_query_dev_lim(&dev_lim);
  483. if (rc) {
  484. ret = -1;
  485. eprintf("");
  486. goto exit;
  487. } else {
  488. tprintf("cmd_query_dev_lim() success");
  489. tprintf("log2_rsvd_qps=%x", dev_lim.log2_rsvd_qps);
  490. tprintf("qpc_entry_sz=%x", dev_lim.qpc_entry_sz);
  491. tprintf("log2_rsvd_srqs=%x", dev_lim.log2_rsvd_srqs);
  492. tprintf("srq_entry_sz=%x", dev_lim.srq_entry_sz);
  493. tprintf("log2_rsvd_ees=%x", dev_lim.log2_rsvd_ees);
  494. tprintf("eec_entry_sz=%x", dev_lim.eec_entry_sz);
  495. tprintf("log2_rsvd_cqs=%x", dev_lim.log2_rsvd_cqs);
  496. tprintf("cqc_entry_sz=%x", dev_lim.cqc_entry_sz);
  497. tprintf("log2_rsvd_mtts=%x", dev_lim.log2_rsvd_mtts);
  498. tprintf("mtt_entry_sz=%x", dev_lim.mtt_entry_sz);
  499. tprintf("log2_rsvd_mrws=%x", dev_lim.log2_rsvd_mrws);
  500. tprintf("mpt_entry_sz=%x", dev_lim.mpt_entry_sz);
  501. tprintf("eqc_entry_sz=%x", dev_lim.eqc_entry_sz);
  502. }
  503. /* set the qp and cq numbers according
  504. to the results of query_dev_lim */
  505. dev_ib_data.mads_qp.qpn = (1 << dev_lim.log2_rsvd_qps) +
  506. +QPN_BASE + MADS_QPN_SN;
  507. dev_ib_data.ipoib_qp.qpn = (1 << dev_lim.log2_rsvd_qps) +
  508. +QPN_BASE + IPOIB_QPN_SN;
  509. dev_ib_data.mads_qp.snd_cq.cqn = (1 << dev_lim.log2_rsvd_cqs) +
  510. MADS_SND_CQN_SN;
  511. dev_ib_data.mads_qp.rcv_cq.cqn = (1 << dev_lim.log2_rsvd_cqs) +
  512. MADS_RCV_CQN_SN;
  513. dev_ib_data.ipoib_qp.snd_cq.cqn = (1 << dev_lim.log2_rsvd_cqs) +
  514. IPOIB_SND_CQN_SN;
  515. dev_ib_data.ipoib_qp.rcv_cq.cqn = (1 << dev_lim.log2_rsvd_cqs) +
  516. IPOIB_RCV_CQN_SN;
  517. /* disable SRQ */
  518. cfg = (void *)dev_buffers_p->inprm_buf;
  519. memset(cfg, 0, MT_STRUCT_SIZE(tavorprm_mod_stat_cfg_st));
  520. INS_FLD(1, cfg, tavorprm_mod_stat_cfg_st, srq_m); //cfg->srq_m = 1;
  521. rc = cmd_mod_stat_cfg(cfg);
  522. if (rc) {
  523. ret = -1;
  524. eprintf("");
  525. goto exit;
  526. } else {
  527. tprintf("cmd_mod_stat_cfg() success");
  528. }
  529. /* prepare the init_hca params to pass
  530. to prep_init_hca_buf */
  531. memset(&init_hca, 0, sizeof init_hca);
  532. offset = 0;
  533. base_h = tavor_pci_dev.dev.bar[5] & 0xfffffff0;
  534. base_l = tavor_pci_dev.dev.bar[4] & 0xfffffff0;
  535. tprintf("base_h=0x%lx, base_l=0x%lx", base_h, base_l);
  536. init_hca.qpc_base_addr_h = base_h;
  537. init_hca.qpc_base_addr_l = base_l + offset;
  538. init_hca.log_num_of_qp = dev_lim.log2_rsvd_qps + 1;
  539. offset += delta;
  540. init_hca.eec_base_addr_h = base_h;
  541. init_hca.eec_base_addr_l = base_l + offset;
  542. init_hca.log_num_of_ee = dev_lim.log2_rsvd_ees;
  543. offset += delta;
  544. init_hca.srqc_base_addr_h = base_h;
  545. init_hca.srqc_base_addr_l = base_l + offset;
  546. init_hca.log_num_of_srq = dev_lim.log2_rsvd_srqs;
  547. offset += delta;
  548. init_hca.cqc_base_addr_h = base_h;
  549. init_hca.cqc_base_addr_l = base_l + offset;
  550. init_hca.log_num_of_cq = dev_lim.log2_rsvd_cqs + 1;
  551. offset += delta;
  552. init_hca.eqpc_base_addr_h = base_h;
  553. init_hca.eqpc_base_addr_l = base_l + offset;
  554. offset += delta;
  555. init_hca.eeec_base_addr_h = base_h;
  556. init_hca.eeec_base_addr_l = base_l + offset;
  557. offset += delta;
  558. init_hca.eqc_base_addr_h = base_h;
  559. init_hca.eqc_base_addr_l = base_l + offset;
  560. init_hca.log_num_of_eq = LOG2_EQS;
  561. offset += delta;
  562. init_hca.rdb_base_addr_h = base_h;
  563. init_hca.rdb_base_addr_l = base_l + offset;
  564. offset += delta;
  565. init_hca.mc_base_addr_h = base_h;
  566. init_hca.mc_base_addr_l = base_l + offset;
  567. init_hca.log_mc_table_entry_sz = LOG2_MC_ENTRY;
  568. init_hca.mc_table_hash_sz = 0;
  569. init_hca.log_mc_table_sz = LOG2_MC_GROUPS;
  570. offset += delta;
  571. init_hca.mpt_base_addr_h = base_h;
  572. init_hca.mpt_base_addr_l = base_l + offset;
  573. init_hca.log_mpt_sz = dev_lim.log2_rsvd_mrws + 1;
  574. offset += delta;
  575. init_hca.mtt_base_addr_h = base_h;
  576. init_hca.mtt_base_addr_l = base_l + offset;
  577. /* this buffer is used for all the commands */
  578. inprm = (void *)dev_buffers_p->inprm_buf;
  579. /* excute init_hca command */
  580. prep_init_hca_buf(&init_hca, inprm);
  581. rc = cmd_init_hca(inprm, MT_STRUCT_SIZE(tavorprm_init_hca_st));
  582. if (rc) {
  583. ret = -1;
  584. eprintf("");
  585. goto undo_sys_en;
  586. } else
  587. tprintf("cmd_init_hca() success");
  588. /* register a single memory region which covers
  589. 4 GB of the address space which will be used
  590. throughout the driver */
  591. memset(inprm, 0, SW2HW_MPT_IBUF_SZ);
  592. in_key = MKEY_PREFIX + (1 << dev_lim.log2_rsvd_mrws);
  593. prep_sw2hw_mpt_buf(inprm, in_key);
  594. rc = cmd_sw2hw_mpt(&key, in_key, inprm, SW2HW_MPT_IBUF_SZ);
  595. if (rc) {
  596. ret = -1;
  597. eprintf("");
  598. goto undo_init_hca;
  599. } else {
  600. tprintf("cmd_sw2hw_mpt() success, key=0x%lx", key);
  601. }
  602. dev_ib_data.mkey = key;
  603. eqn = EQN;
  604. /* allocate a single EQ which will receive
  605. all the events */
  606. eq_buf = dev_buffers_p->eq_buf;
  607. init_eq_buf(eq_buf); /* put in HW ownership */
  608. prep_sw2hw_eq_buf(inprm, eq_buf);
  609. rc = cmd_sw2hw_eq(SW2HW_EQ_IBUF_SZ);
  610. if (rc) {
  611. ret = -1;
  612. eprintf("");
  613. goto undo_sw2hw_mpt;
  614. } else
  615. tprintf("cmd_sw2hw_eq() success");
  616. event_mask = (1 << XDEV_EV_TYPE_CQ_COMP) |
  617. (1 << XDEV_EV_TYPE_CQ_ERR) |
  618. (1 << XDEV_EV_TYPE_LOCAL_WQ_CATAS_ERR) |
  619. (1 << XDEV_EV_TYPE_PORT_ERR) |
  620. (1 << XDEV_EV_TYPE_LOCAL_WQ_INVALID_REQ_ERR) |
  621. (1 << XDEV_EV_TYPE_LOCAL_WQ_ACCESS_VIOL_ERR) |
  622. (1 << TAVOR_IF_EV_TYPE_OVERRUN);
  623. rc = cmd_map_eq(eqn, event_mask, 1);
  624. if (rc) {
  625. ret = -1;
  626. eprintf("");
  627. goto undo_sw2hw_eq;
  628. } else
  629. tprintf("cmd_map_eq() success");
  630. dev_ib_data.eq.eqn = eqn;
  631. dev_ib_data.eq.eq_buf = eq_buf;
  632. dev_ib_data.eq.cons_idx = 0;
  633. dev_ib_data.eq.eq_size = 1 << LOG2_EQ_SZ;
  634. *eq_p = &dev_ib_data.eq;
  635. memset(inprm, 0, INIT_IB_IBUF_SZ);
  636. prep_init_ib_buf(inprm);
  637. rc = cmd_init_ib(port, inprm, INIT_IB_IBUF_SZ);
  638. if (rc) {
  639. ret = -1;
  640. eprintf("");
  641. goto undo_sw2hw_eq;
  642. } else
  643. tprintf("cmd_init_ib() success");
  644. init_av_array();
  645. tprintf("init_av_array() done");
  646. goto exit;
  647. undo_sw2hw_eq:
  648. rc = cmd_hw2sw_eq(EQN);
  649. if (rc) {
  650. eprintf("");
  651. } else
  652. tprintf("cmd_hw2sw_eq() success");
  653. undo_sw2hw_mpt:
  654. rc = cmd_hw2sw_mpt(key);
  655. if (rc)
  656. eprintf("");
  657. else
  658. tprintf("cmd_hw2sw_mpt() success key=0x%lx", key);
  659. undo_init_hca:
  660. rc = cmd_close_hca(0);
  661. if (rc) {
  662. eprintf("");
  663. goto undo_sys_en;
  664. } else
  665. tprintf("cmd_close_hca() success");
  666. undo_sys_en:
  667. rc = cmd_sys_dis();
  668. if (rc) {
  669. eprintf("");
  670. goto undo_sys_en;
  671. } else
  672. tprintf("cmd_sys_dis() success");
  673. goto exit;
  674. exit:
  675. return ret;
  676. }
  677. static void *get_inprm_buf(void)
  678. {
  679. return dev_buffers_p->inprm_buf;
  680. }
  681. static void *get_outprm_buf(void)
  682. {
  683. return dev_buffers_p->outprm_buf;
  684. }
  685. static void *get_send_wqe_buf(void *wqe, __u8 index)
  686. {
  687. struct ud_send_wqe_st *snd_wqe = wqe;
  688. return bus_to_virt(snd_wqe->mpointer[index].local_addr_l);
  689. }
  690. static void *get_rcv_wqe_buf(void *wqe, __u8 index)
  691. {
  692. struct recv_wqe_st *rcv_wqe = wqe;
  693. return bus_to_virt(be32_to_cpu(rcv_wqe->mpointer[index].local_addr_l));
  694. }
  695. static void modify_av_params(struct ud_av_st *av,
  696. __u16 dlid,
  697. __u8 g,
  698. __u8 sl, __u8 rate, union ib_gid_u *gid, __u32 qpn)
  699. {
  700. memset(&av->av, 0, sizeof av->av);
  701. INS_FLD(dev_ib_data.port, &av->av, tavorprm_ud_address_vector_st,
  702. port_number);
  703. INS_FLD(dev_ib_data.pd, &av->av, tavorprm_ud_address_vector_st, pd);
  704. INS_FLD(dlid, &av->av, tavorprm_ud_address_vector_st, rlid);
  705. INS_FLD(g, &av->av, tavorprm_ud_address_vector_st, g);
  706. INS_FLD(sl, &av->av, tavorprm_ud_address_vector_st, sl);
  707. INS_FLD(3, &av->av, tavorprm_ud_address_vector_st, msg);
  708. if (rate >= 3)
  709. INS_FLD(0, &av->av, tavorprm_ud_address_vector_st, max_stat_rate); /* 4x */
  710. else
  711. INS_FLD(1, &av->av, tavorprm_ud_address_vector_st, max_stat_rate); /* 1x */
  712. cpu_to_be_buf(&av->av, sizeof(av->av));
  713. if (g) {
  714. if (gid) {
  715. INS_FLD(*((__u32 *) (&gid->raw[0])), &av->av,
  716. tavorprm_ud_address_vector_st, rgid_127_96);
  717. INS_FLD(*((__u32 *) (&gid->raw[4])), &av->av,
  718. tavorprm_ud_address_vector_st, rgid_95_64);
  719. INS_FLD(*((__u32 *) (&gid->raw[8])), &av->av,
  720. tavorprm_ud_address_vector_st, rgid_63_32);
  721. INS_FLD(*((__u32 *) (&gid->raw[12])), &av->av,
  722. tavorprm_ud_address_vector_st, rgid_31_0);
  723. } else {
  724. INS_FLD(0, &av->av, tavorprm_ud_address_vector_st,
  725. rgid_127_96);
  726. INS_FLD(0, &av->av, tavorprm_ud_address_vector_st,
  727. rgid_95_64);
  728. INS_FLD(0, &av->av, tavorprm_ud_address_vector_st,
  729. rgid_63_32);
  730. INS_FLD(0, &av->av, tavorprm_ud_address_vector_st,
  731. rgid_31_0);
  732. }
  733. } else {
  734. INS_FLD(0, &av->av, tavorprm_ud_address_vector_st, rgid_127_96);
  735. INS_FLD(0, &av->av, tavorprm_ud_address_vector_st, rgid_95_64);
  736. INS_FLD(0, &av->av, tavorprm_ud_address_vector_st, rgid_63_32);
  737. INS_FLD(2, &av->av, tavorprm_ud_address_vector_st, rgid_31_0);
  738. }
  739. av->dest_qp = qpn;
  740. }
  741. static void init_cq_buf(union cqe_st *cq_buf, __u8 num_cqes)
  742. {
  743. memset(cq_buf, 0xff, num_cqes * sizeof cq_buf[0]);
  744. }
  745. static int post_rcv_buf(struct udqp_st *qp, struct recv_wqe_st *rcv_wqe)
  746. {
  747. struct recv_doorbell_st dbell;
  748. int rc;
  749. __u32 tmp[2];
  750. struct recv_wqe_st *tmp_wqe = (struct recv_wqe_st *)tmp;
  751. __u32 *ptr_dst;
  752. memset(&dbell, 0, sizeof dbell);
  753. INS_FLD(sizeof(*rcv_wqe) >> 4, &dbell, tavorprm_receive_doorbell_st,
  754. nds);
  755. INS_FLD(virt_to_bus(rcv_wqe) >> 6, &dbell, tavorprm_receive_doorbell_st,
  756. nda);
  757. INS_FLD(qp->qpn, &dbell, tavorprm_receive_doorbell_st, qpn);
  758. INS_FLD(1, &dbell, tavorprm_receive_doorbell_st, credits);
  759. if (qp->last_posted_rcv_wqe) {
  760. memcpy(tmp, qp->last_posted_rcv_wqe, sizeof(tmp));
  761. be_to_cpu_buf(tmp, sizeof(tmp));
  762. INS_FLD(1, tmp_wqe->next, wqe_segment_next_st, dbd);
  763. INS_FLD(sizeof(*rcv_wqe) >> 4, tmp_wqe->next,
  764. wqe_segment_next_st, nds);
  765. INS_FLD(virt_to_bus(rcv_wqe) >> 6, tmp_wqe->next,
  766. wqe_segment_next_st, nda_31_6);
  767. /* this is not really opcode but since the struct
  768. is used for both send and receive, in receive this bit must be 1
  769. which coinsides with nopcode */
  770. INS_FLD(1, tmp_wqe->next, wqe_segment_next_st, nopcode);
  771. cpu_to_be_buf(tmp, sizeof(tmp));
  772. ptr_dst = (__u32 *) (qp->last_posted_rcv_wqe);
  773. ptr_dst[0] = tmp[0];
  774. ptr_dst[1] = tmp[1];
  775. }
  776. rc = cmd_post_doorbell(&dbell, POST_RCV_OFFSET);
  777. if (!rc) {
  778. qp->last_posted_rcv_wqe = rcv_wqe;
  779. }
  780. return rc;
  781. }
  782. static int post_send_req(void *qph, void *wqeh, __u8 num_gather)
  783. {
  784. struct send_doorbell_st dbell;
  785. int rc;
  786. struct udqp_st *qp = qph;
  787. struct ud_send_wqe_st *snd_wqe = wqeh;
  788. struct next_control_seg_st tmp;
  789. __u32 *psrc, *pdst;
  790. __u32 nds;
  791. tprintf("snd_wqe=0x%lx, virt_to_bus(snd_wqe)=0x%lx", snd_wqe,
  792. virt_to_bus(snd_wqe));
  793. memset(&dbell, 0, sizeof dbell);
  794. INS_FLD(XDEV_NOPCODE_SEND, &dbell, tavorprm_send_doorbell_st, nopcode);
  795. INS_FLD(1, &dbell, tavorprm_send_doorbell_st, f);
  796. INS_FLD(virt_to_bus(snd_wqe) >> 6, &dbell, tavorprm_send_doorbell_st,
  797. nda);
  798. nds =
  799. (sizeof(snd_wqe->next) + sizeof(snd_wqe->udseg) +
  800. sizeof(snd_wqe->mpointer[0]) * num_gather) >> 4;
  801. INS_FLD(nds, &dbell, tavorprm_send_doorbell_st, nds);
  802. INS_FLD(qp->qpn, &dbell, tavorprm_send_doorbell_st, qpn);
  803. tprintf("0= %lx", ((__u32 *) ((void *)(&dbell)))[0]);
  804. tprintf("1= %lx", ((__u32 *) ((void *)(&dbell)))[1]);
  805. if (qp->last_posted_snd_wqe) {
  806. memcpy(&tmp, &qp->last_posted_snd_wqe->next, sizeof tmp);
  807. be_to_cpu_buf(&tmp, sizeof tmp);
  808. INS_FLD(1, &tmp, wqe_segment_next_st, dbd);
  809. INS_FLD(virt_to_bus(snd_wqe) >> 6, &tmp, wqe_segment_next_st,
  810. nda_31_6);
  811. INS_FLD(nds, &tmp, wqe_segment_next_st, nds);
  812. psrc = (__u32 *) (&tmp);
  813. pdst = (__u32 *) (&qp->last_posted_snd_wqe->next);
  814. pdst[0] = htonl(psrc[0]);
  815. pdst[1] = htonl(psrc[1]);
  816. }
  817. rc = cmd_post_doorbell(&dbell, POST_SND_OFFSET);
  818. if (!rc) {
  819. qp->last_posted_snd_wqe = snd_wqe;
  820. }
  821. return rc;
  822. }
  823. static int create_mads_qp(void **qp_pp, void **snd_cq_pp, void **rcv_cq_pp)
  824. {
  825. __u8 i;
  826. int rc;
  827. struct udqp_st *qp;
  828. qp = &dev_ib_data.mads_qp;
  829. /* set the pointer to the receive WQEs buffer */
  830. qp->rcv_wq = dev_buffers_p->mads_qp_rcv_queue;
  831. qp->send_buf_sz = MAD_BUF_SZ;
  832. qp->rcv_buf_sz = MAD_BUF_SZ;
  833. qp->recv_wqe_alloc_idx = 0;
  834. qp->max_recv_wqes = NUM_MADS_RCV_WQES;
  835. qp->recv_wqe_cur_free = NUM_MADS_RCV_WQES;
  836. /* iterrate through the list */
  837. for (i = 0; i < NUM_MADS_RCV_WQES; ++i) {
  838. /* clear the WQE */
  839. memset(&qp->rcv_wq[i], 0, sizeof(qp->rcv_wq[i]));
  840. qp->rcv_wq[i].wqe_cont.qp = qp;
  841. qp->rcv_bufs[i] = ib_buffers.rcv_mad_buf[i];
  842. }
  843. /* set the pointer to the send WQEs buffer */
  844. qp->snd_wq = dev_buffers_p->mads_qp_snd_queue;
  845. qp->snd_wqe_alloc_idx = 0;
  846. qp->max_snd_wqes = NUM_MADS_SND_WQES;
  847. qp->snd_wqe_cur_free = NUM_MADS_SND_WQES;
  848. /* iterrate through the list */
  849. for (i = 0; i < NUM_MADS_SND_WQES; ++i) {
  850. /* clear the WQE */
  851. memset(&qp->snd_wq[i], 0, sizeof(qp->snd_wq[i]));
  852. /* link the WQE to the free list */
  853. qp->snd_wq[i].wqe_cont.qp = qp;
  854. qp->snd_bufs[i] = ib_buffers.send_mad_buf[i];
  855. }
  856. /* qp number and cq numbers are already set up */
  857. qp->snd_cq.cq_buf = dev_buffers_p->mads_snd_cq_buf;
  858. qp->rcv_cq.cq_buf = dev_buffers_p->mads_rcv_cq_buf;
  859. qp->snd_cq.num_cqes = NUM_MADS_SND_CQES;
  860. qp->rcv_cq.num_cqes = NUM_MADS_RCV_CQES;
  861. qp->qkey = GLOBAL_QKEY;
  862. rc = create_udqp(qp);
  863. if (!rc) {
  864. *qp_pp = qp;
  865. *snd_cq_pp = &qp->snd_cq;
  866. *rcv_cq_pp = &qp->rcv_cq;
  867. }
  868. return rc;
  869. }
  870. static int create_ipoib_qp(void **qp_pp,
  871. void **snd_cq_pp, void **rcv_cq_pp, __u32 qkey)
  872. {
  873. __u8 i;
  874. int rc;
  875. struct udqp_st *qp;
  876. qp = &dev_ib_data.ipoib_qp;
  877. /* set the pointer to the receive WQEs buffer */
  878. qp->rcv_wq = dev_buffers_p->ipoib_qp_rcv_queue;
  879. qp->rcv_buf_sz = IPOIB_RCV_BUF_SZ;
  880. qp->recv_wqe_alloc_idx = 0;
  881. qp->max_recv_wqes = NUM_IPOIB_RCV_WQES;
  882. qp->recv_wqe_cur_free = NUM_IPOIB_RCV_WQES;
  883. /* iterrate through the list */
  884. for (i = 0; i < NUM_IPOIB_RCV_WQES; ++i) {
  885. /* clear the WQE */
  886. memset(&qp->rcv_wq[i], 0, sizeof(qp->rcv_wq[i]));
  887. /* update data */
  888. qp->rcv_wq[i].wqe_cont.qp = qp;
  889. qp->rcv_bufs[i] = ib_buffers.ipoib_rcv_buf[i];
  890. tprintf("rcv_buf=%lx", qp->rcv_bufs[i]);
  891. }
  892. /* init send queue WQEs list */
  893. /* set the list empty */
  894. qp->snd_wqe_alloc_idx = 0;
  895. qp->max_snd_wqes = NUM_IPOIB_SND_WQES;
  896. qp->snd_wqe_cur_free = NUM_IPOIB_SND_WQES;
  897. /* set the pointer to the send WQEs buffer */
  898. qp->snd_wq = dev_buffers_p->ipoib_qp_snd_queue;
  899. /* iterrate through the list */
  900. for (i = 0; i < NUM_IPOIB_SND_WQES; ++i) {
  901. /* clear the WQE */
  902. memset(&qp->snd_wq[i], 0, sizeof(qp->snd_wq[i]));
  903. /* update data */
  904. qp->snd_wq[i].wqe_cont.qp = qp;
  905. qp->snd_bufs[i] = ib_buffers.send_ipoib_buf[i];
  906. qp->send_buf_sz = 4;
  907. }
  908. /* qp number and cq numbers are already set up */
  909. qp->snd_cq.cq_buf = dev_buffers_p->ipoib_snd_cq_buf;
  910. qp->rcv_cq.cq_buf = dev_buffers_p->ipoib_rcv_cq_buf;
  911. qp->snd_cq.num_cqes = NUM_IPOIB_SND_CQES;
  912. qp->rcv_cq.num_cqes = NUM_IPOIB_RCV_CQES;
  913. qp->qkey = qkey;
  914. rc = create_udqp(qp);
  915. if (!rc) {
  916. *qp_pp = qp;
  917. *snd_cq_pp = &qp->snd_cq;
  918. *rcv_cq_pp = &qp->rcv_cq;
  919. }
  920. return rc;
  921. }
  922. static int create_udqp(struct udqp_st *qp)
  923. {
  924. int rc, ret = 0;
  925. void *inprm;
  926. struct recv_wqe_st *rcv_wqe;
  927. inprm = dev_buffers_p->inprm_buf;
  928. /* create send CQ */
  929. init_cq_buf(qp->snd_cq.cq_buf, qp->snd_cq.num_cqes);
  930. qp->snd_cq.cons_idx = 0;
  931. memset(inprm, 0, SW2HW_CQ_IBUF_SZ);
  932. prep_sw2hw_cq_buf(inprm, dev_ib_data.eq.eqn, qp->snd_cq.cqn,
  933. qp->snd_cq.cq_buf);
  934. rc = cmd_sw2hw_cq(qp->snd_cq.cqn, inprm, SW2HW_CQ_IBUF_SZ);
  935. if (rc) {
  936. ret = -1;
  937. eprintf("");
  938. goto exit;
  939. }
  940. /* create receive CQ */
  941. init_cq_buf(qp->rcv_cq.cq_buf, qp->rcv_cq.num_cqes);
  942. qp->rcv_cq.cons_idx = 0;
  943. memset(inprm, 0, SW2HW_CQ_IBUF_SZ);
  944. prep_sw2hw_cq_buf(inprm, dev_ib_data.eq.eqn, qp->rcv_cq.cqn,
  945. qp->rcv_cq.cq_buf);
  946. rc = cmd_sw2hw_cq(qp->rcv_cq.cqn, inprm, SW2HW_CQ_IBUF_SZ);
  947. if (rc) {
  948. ret = -1;
  949. eprintf("");
  950. goto undo_snd_cq;
  951. }
  952. memset(inprm, 0, QPCTX_IBUF_SZ);
  953. prep_rst2init_qpee_buf(inprm, qp->snd_cq.cqn, qp->rcv_cq.cqn, qp->qkey);
  954. rc = cmd_rst2init_qpee(qp->qpn, inprm, QPCTX_IBUF_SZ);
  955. if (rc) {
  956. ret = -1;
  957. eprintf("");
  958. goto undo_rcv_cq;
  959. }
  960. qp->last_posted_rcv_wqe = NULL;
  961. qp->last_posted_snd_wqe = NULL;
  962. /* post all the buffers to the receive queue */
  963. while (1) {
  964. /* allocate wqe */
  965. rcv_wqe = alloc_rcv_wqe(qp);
  966. if (!rcv_wqe)
  967. break;
  968. /* post the buffer */
  969. rc = post_rcv_buf(qp, rcv_wqe);
  970. if (rc) {
  971. ret = -1;
  972. eprintf("");
  973. goto undo_rcv_cq;
  974. }
  975. }
  976. memset(inprm, 0, QPCTX_IBUF_SZ);
  977. prep_init2rtr_qpee_buf(inprm);
  978. rc = cmd_init2rtr_qpee(qp->qpn, inprm, QPCTX_IBUF_SZ);
  979. if (rc) {
  980. ret = -1;
  981. eprintf("");
  982. goto undo_rcv_cq;
  983. }
  984. memset(inprm, 0, QPCTX_IBUF_SZ);
  985. rc = cmd_rtr2rts_qpee(qp->qpn, inprm, QPCTX_IBUF_SZ);
  986. if (rc) {
  987. ret = -1;
  988. eprintf("");
  989. goto undo_rcv_cq;
  990. }
  991. goto exit;
  992. undo_rcv_cq:
  993. rc = cmd_hw2sw_cq(qp->rcv_cq.cqn);
  994. if (rc)
  995. eprintf("");
  996. undo_snd_cq:
  997. rc = cmd_hw2sw_cq(qp->snd_cq.cqn);
  998. if (rc)
  999. eprintf("");
  1000. exit:
  1001. return ret;
  1002. }
  1003. static int destroy_udqp(struct udqp_st *qp)
  1004. {
  1005. int rc;
  1006. rc = cmd_2err_qpee(qp->qpn);
  1007. if (rc) {
  1008. eprintf("");
  1009. return rc;
  1010. }
  1011. tprintf("cmd_2err_qpee(0x%lx) success", qp->qpn);
  1012. rc = cmd_2rst_qpee(qp->qpn);
  1013. if (rc) {
  1014. eprintf("");
  1015. return rc;
  1016. }
  1017. tprintf("cmd_2rst_qpee(0x%lx) success", qp->qpn);
  1018. rc = cmd_hw2sw_cq(qp->rcv_cq.cqn);
  1019. if (rc) {
  1020. eprintf("");
  1021. return rc;
  1022. }
  1023. tprintf("cmd_hw2sw_cq(0x%lx) success", qp->snd_cq.cqn);
  1024. rc = cmd_hw2sw_cq(qp->snd_cq.cqn);
  1025. if (rc) {
  1026. eprintf("");
  1027. return rc;
  1028. }
  1029. tprintf("cmd_hw2sw_cq(0x%lx) success", qp->rcv_cq.cqn);
  1030. return rc;
  1031. }
  1032. static void prep_send_wqe_buf(void *qph,
  1033. void *avh,
  1034. void *wqeh,
  1035. const void *buf,
  1036. unsigned int offset, __u16 len, __u8 e)
  1037. {
  1038. struct udqp_st *qp = qph;
  1039. struct ud_av_st *av = avh;
  1040. struct ud_send_wqe_st *wqe = wqeh;
  1041. INS_FLD(e, wqe->next.control, wqe_segment_ctrl_send_st, e);
  1042. INS_FLD(1, wqe->next.control, wqe_segment_ctrl_send_st, always1);
  1043. wqe->udseg.av_add_h = 0;
  1044. wqe->udseg.av_add_l = virt_to_bus(&av->av);
  1045. wqe->udseg.dest_qp = av->dest_qp;
  1046. wqe->udseg.lkey = dev_ib_data.mkey;
  1047. wqe->udseg.qkey = qp->qkey;
  1048. if (buf) {
  1049. memcpy(bus_to_virt(wqe->mpointer[0].local_addr_l) + offset, buf,
  1050. len);
  1051. len += offset;
  1052. }
  1053. wqe->mpointer[0].byte_count = len;
  1054. wqe->mpointer[0].lkey = dev_ib_data.mkey;
  1055. cpu_to_be_buf(wqe, sizeof *wqe);
  1056. }
  1057. static void *alloc_ud_av(void)
  1058. {
  1059. u8 next_free;
  1060. if (dev_ib_data.udav.udav_next_free == FL_EOL) {
  1061. return NULL;
  1062. }
  1063. next_free = dev_ib_data.udav.udav_next_free;
  1064. dev_ib_data.udav.udav_next_free =
  1065. dev_buffers_p->av_array[next_free].ud_av.next_free;
  1066. tprintf("allocated udav %d", next_free);
  1067. return &dev_buffers_p->av_array[next_free].ud_av;
  1068. }
  1069. static void free_ud_av(void *avh)
  1070. {
  1071. union ud_av_u *avu;
  1072. __u8 idx, old_idx;
  1073. struct ud_av_st *av = avh;
  1074. avu = (union ud_av_u *)av;
  1075. idx = avu - dev_buffers_p->av_array;
  1076. tprintf("freeing udav idx=%d", idx);
  1077. old_idx = dev_ib_data.udav.udav_next_free;
  1078. dev_ib_data.udav.udav_next_free = idx;
  1079. avu->ud_av.next_free = old_idx;
  1080. }
  1081. static int update_cq_cons_idx(struct cq_st *cq)
  1082. {
  1083. struct cq_dbell_st dbell;
  1084. int rc;
  1085. memset(&dbell, 0, sizeof dbell);
  1086. INS_FLD(cq->cqn, &dbell, tavorprm_cq_cmd_doorbell_st, cqn);
  1087. INS_FLD(CQ_DBELL_CMD_INC_CONS_IDX, &dbell, tavorprm_cq_cmd_doorbell_st,
  1088. cq_cmd);
  1089. rc = cmd_post_doorbell(&dbell, CQ_DBELL_OFFSET);
  1090. return rc;
  1091. }
  1092. static int poll_cq(void *cqh, union cqe_st *cqe_p, u8 * num_cqes)
  1093. {
  1094. union cqe_st cqe;
  1095. int rc;
  1096. u32 *ptr;
  1097. struct cq_st *cq = cqh;
  1098. if (cq->cqn < 0x80 || cq->cqn > 0x83) {
  1099. eprintf("");
  1100. return -1;
  1101. }
  1102. ptr = (u32 *) (&(cq->cq_buf[cq->cons_idx]));
  1103. barrier();
  1104. if ((ptr[7] & 0x80000000) == 0) {
  1105. cqe = cq->cq_buf[cq->cons_idx];
  1106. be_to_cpu_buf(&cqe, sizeof(cqe));
  1107. *cqe_p = cqe;
  1108. ptr[7] = 0x80000000;
  1109. barrier();
  1110. cq->cons_idx = (cq->cons_idx + 1) % cq->num_cqes;
  1111. rc = update_cq_cons_idx(cq);
  1112. if (rc) {
  1113. return rc;
  1114. }
  1115. *num_cqes = 1;
  1116. } else
  1117. *num_cqes = 0;
  1118. return 0;
  1119. }
  1120. static void dev2ib_cqe(struct ib_cqe_st *ib_cqe_p, union cqe_st *cqe_p)
  1121. {
  1122. __u8 opcode;
  1123. __u32 wqe_addr_ba;
  1124. opcode =
  1125. EX_FLD(cqe_p->good_cqe, tavorprm_completion_queue_entry_st, opcode);
  1126. if (opcode >= CQE_ERROR_OPCODE)
  1127. ib_cqe_p->is_error = 1;
  1128. else
  1129. ib_cqe_p->is_error = 0;
  1130. ib_cqe_p->is_send =
  1131. EX_FLD(cqe_p->good_cqe, tavorprm_completion_queue_entry_st, s);
  1132. wqe_addr_ba =
  1133. EX_FLD(cqe_p->good_cqe, tavorprm_completion_queue_entry_st,
  1134. wqe_adr) << 6;
  1135. ib_cqe_p->wqe = bus_to_virt(wqe_addr_ba);
  1136. // if (ib_cqe_p->is_send) {
  1137. // be_to_cpu_buf(ib_cqe_p->wqe, sizeof(struct ud_send_wqe_st));
  1138. // }
  1139. // else {
  1140. // be_to_cpu_buf(ib_cqe_p->wqe, sizeof(struct recv_wqe_st));
  1141. // }
  1142. ib_cqe_p->count =
  1143. EX_FLD(cqe_p->good_cqe, tavorprm_completion_queue_entry_st,
  1144. byte_cnt);
  1145. }
  1146. static int ib_poll_cq(void *cqh, struct ib_cqe_st *ib_cqe_p, u8 * num_cqes)
  1147. {
  1148. int rc;
  1149. union cqe_st cqe;
  1150. struct cq_st *cq = cqh;
  1151. __u8 opcode;
  1152. rc = poll_cq(cq, &cqe, num_cqes);
  1153. if (rc || ((*num_cqes) == 0)) {
  1154. return rc;
  1155. }
  1156. dev2ib_cqe(ib_cqe_p, &cqe);
  1157. opcode =
  1158. EX_FLD(cqe.good_cqe, tavorprm_completion_queue_entry_st, opcode);
  1159. if (opcode >= CQE_ERROR_OPCODE) {
  1160. struct ud_send_wqe_st *wqe_p, wqe;
  1161. __u32 *ptr;
  1162. unsigned int i;
  1163. wqe_p =
  1164. bus_to_virt(EX_FLD
  1165. (cqe.error_cqe,
  1166. tavorprm_completion_with_error_st,
  1167. wqe_addr) << 6);
  1168. eprintf("syndrome=0x%lx",
  1169. EX_FLD(cqe.error_cqe, tavorprm_completion_with_error_st,
  1170. syndrome));
  1171. eprintf("wqe_addr=0x%lx", wqe_p);
  1172. eprintf("wqe_size=0x%lx",
  1173. EX_FLD(cqe.error_cqe, tavorprm_completion_with_error_st,
  1174. wqe_size));
  1175. eprintf("myqpn=0x%lx",
  1176. EX_FLD(cqe.error_cqe, tavorprm_completion_with_error_st,
  1177. myqpn));
  1178. eprintf("db_cnt=0x%lx",
  1179. EX_FLD(cqe.error_cqe, tavorprm_completion_with_error_st,
  1180. db_cnt));
  1181. memcpy(&wqe, wqe_p, sizeof wqe);
  1182. be_to_cpu_buf(&wqe, sizeof wqe);
  1183. eprintf("dumping wqe...");
  1184. ptr = (__u32 *) (&wqe);
  1185. for (i = 0; i < sizeof wqe; i += 4) {
  1186. printf("%lx : ", ptr[i >> 2]);
  1187. }
  1188. }
  1189. return rc;
  1190. }
  1191. /* always work on ipoib qp */
  1192. static int add_qp_to_mcast_group(union ib_gid_u mcast_gid, __u8 add)
  1193. {
  1194. void *mg;
  1195. __u8 *tmp;
  1196. int rc;
  1197. __u16 mgid_hash;
  1198. void *mgmqp_p;
  1199. tmp = dev_buffers_p->inprm_buf;
  1200. memcpy(tmp, mcast_gid.raw, 16);
  1201. be_to_cpu_buf(tmp, 16);
  1202. rc = cmd_mgid_hash(tmp, &mgid_hash);
  1203. if (!rc) {
  1204. mg = (void *)dev_buffers_p->inprm_buf;
  1205. memset(mg, 0, MT_STRUCT_SIZE(tavorprm_mgm_entry_st));
  1206. INS_FLD(mcast_gid.as_u32.dw[0], mg, tavorprm_mgm_entry_st, mgid_128_96); // memcpy(&mg->mgid_128_96, &mcast_gid.raw[0], 4);
  1207. INS_FLD(mcast_gid.as_u32.dw[1], mg, tavorprm_mgm_entry_st, mgid_95_64); // memcpy(&mg->mgid_95_64, &mcast_gid.raw[4], 4);
  1208. INS_FLD(mcast_gid.as_u32.dw[2], mg, tavorprm_mgm_entry_st, mgid_63_32); //memcpy(&mg->mgid_63_32, &mcast_gid.raw[8], 4);
  1209. INS_FLD(mcast_gid.as_u32.dw[3], mg, tavorprm_mgm_entry_st, mgid_31_0); //memcpy(&mg->mgid_31_0, &mcast_gid.raw[12], 4);
  1210. be_to_cpu_buf(mg + MT_BYTE_OFFSET(tavorprm_mgm_entry_st, mgid_128_96), 16); //be_to_cpu_buf(&mg->mgid_128_96, 16);
  1211. mgmqp_p = mg + MT_BYTE_OFFSET(tavorprm_mgm_entry_st, mgmqp_0);
  1212. INS_FLD(dev_ib_data.ipoib_qp.qpn, mgmqp_p, tavorprm_mgmqp_st, qpn_i); //mg->mgmqp[0].qpn = dev_ib_data.ipoib_qp.qpn;
  1213. INS_FLD(add, mgmqp_p, tavorprm_mgmqp_st, qi); //mg->mgmqp[0].valid = add ? 1 : 0;
  1214. rc = cmd_write_mgm(mg, mgid_hash);
  1215. }
  1216. return rc;
  1217. }
  1218. static int clear_interrupt(void)
  1219. {
  1220. __u32 ecr;
  1221. int ret = 0;
  1222. if (gw_read_cr(0x80704, &ecr)) {
  1223. eprintf("");
  1224. } else {
  1225. if (ecr) {
  1226. ret = 1;
  1227. }
  1228. }
  1229. gw_write_cr(0xf00d8, 0x80000000); /* clear int */
  1230. gw_write_cr(0x8070c, 0xffffffff);
  1231. return ret;
  1232. }
  1233. static struct ud_send_wqe_st *alloc_send_wqe(udqp_t qph)
  1234. {
  1235. struct udqp_st *qp = qph;
  1236. __u8 new_entry;
  1237. struct ud_send_wqe_st *wqe;
  1238. if (qp->snd_wqe_cur_free == 0) {
  1239. return NULL;
  1240. }
  1241. new_entry = qp->snd_wqe_alloc_idx;
  1242. wqe = &qp->snd_wq[new_entry].wqe;
  1243. qp->snd_wqe_cur_free--;
  1244. qp->snd_wqe_alloc_idx = (qp->snd_wqe_alloc_idx + 1) % qp->max_snd_wqes;
  1245. memset(wqe, 0, sizeof *wqe);
  1246. wqe->mpointer[0].local_addr_l = virt_to_bus(qp->snd_bufs[new_entry]);
  1247. return wqe;
  1248. }
  1249. /*
  1250. * alloc_rcv_wqe
  1251. *
  1252. * Note: since we work directly on the work queue, wqes
  1253. * are left in big endian
  1254. */
  1255. static struct recv_wqe_st *alloc_rcv_wqe(struct udqp_st *qp)
  1256. {
  1257. __u8 new_entry;
  1258. struct recv_wqe_st *wqe;
  1259. if (qp->recv_wqe_cur_free == 0) {
  1260. return NULL;
  1261. }
  1262. new_entry = qp->recv_wqe_alloc_idx;
  1263. wqe = &qp->rcv_wq[new_entry].wqe;
  1264. qp->recv_wqe_cur_free--;
  1265. qp->recv_wqe_alloc_idx =
  1266. (qp->recv_wqe_alloc_idx + 1) % qp->max_recv_wqes;
  1267. memset(wqe, 0, sizeof *wqe);
  1268. /* GRH is always required */
  1269. wqe->mpointer[0].local_addr_h = 0;
  1270. wqe->mpointer[0].local_addr_l = virt_to_bus(qp->rcv_bufs[new_entry]);
  1271. wqe->mpointer[0].lkey = dev_ib_data.mkey;
  1272. wqe->mpointer[0].byte_count = GRH_SIZE;
  1273. wqe->mpointer[1].local_addr_h = 0;
  1274. wqe->mpointer[1].local_addr_l =
  1275. virt_to_bus(qp->rcv_bufs[new_entry] + GRH_SIZE);
  1276. wqe->mpointer[1].lkey = dev_ib_data.mkey;
  1277. wqe->mpointer[1].byte_count = qp->rcv_buf_sz;
  1278. tprintf("rcv_buf=%lx\n", qp->rcv_bufs[new_entry]);
  1279. /* we do it only on the data segment since the control
  1280. segment is always owned by HW */
  1281. cpu_to_be_buf(wqe, sizeof *wqe);
  1282. // tprintf("alloc wqe= 0x%x", wqe);
  1283. return wqe;
  1284. }
  1285. static int free_send_wqe(struct ud_send_wqe_st *wqe)
  1286. {
  1287. union ud_send_wqe_u *wqe_u;
  1288. struct udqp_st *qp;
  1289. wqe_u = (union ud_send_wqe_u *)wqe;
  1290. qp = wqe_u->wqe_cont.qp;
  1291. if (qp->snd_wqe_cur_free >= qp->max_snd_wqes) {
  1292. return -1;
  1293. }
  1294. qp->snd_wqe_cur_free++;
  1295. return 0;
  1296. }
  1297. static int free_rcv_wqe(struct recv_wqe_st *wqe)
  1298. {
  1299. union recv_wqe_u *wqe_u;
  1300. struct udqp_st *qp;
  1301. wqe_u = (union recv_wqe_u *)wqe;
  1302. qp = wqe_u->wqe_cont.qp;
  1303. if (qp->recv_wqe_cur_free >= qp->max_recv_wqes) {
  1304. return -1;
  1305. }
  1306. qp->recv_wqe_cur_free++;
  1307. return 0;
  1308. }
  1309. static int free_wqe(void *wqe)
  1310. {
  1311. int rc = 0;
  1312. struct recv_wqe_st *rcv_wqe;
  1313. // tprintf("free wqe= 0x%x", wqe);
  1314. if ((wqe >= (void *)(dev_ib_data.ipoib_qp.rcv_wq)) &&
  1315. (wqe <
  1316. (void *)(&dev_ib_data.ipoib_qp.rcv_wq[NUM_IPOIB_RCV_WQES]))) {
  1317. /* ipoib receive wqe */
  1318. free_rcv_wqe(wqe);
  1319. rcv_wqe = alloc_rcv_wqe(&dev_ib_data.ipoib_qp);
  1320. if (rcv_wqe) {
  1321. rc = post_rcv_buf(&dev_ib_data.ipoib_qp, rcv_wqe);
  1322. if (rc) {
  1323. eprintf("");
  1324. }
  1325. }
  1326. } else if (wqe >= (void *)(dev_ib_data.ipoib_qp.snd_wq) &&
  1327. wqe <
  1328. (void *)(&dev_ib_data.ipoib_qp.snd_wq[NUM_IPOIB_SND_WQES])) {
  1329. /* ipoib send wqe */
  1330. free_send_wqe(wqe);
  1331. } else if (wqe >= (void *)(dev_ib_data.mads_qp.rcv_wq) &&
  1332. wqe <
  1333. (void *)(&dev_ib_data.mads_qp.rcv_wq[NUM_MADS_RCV_WQES])) {
  1334. /* mads receive wqe */
  1335. free_rcv_wqe(wqe);
  1336. rcv_wqe = alloc_rcv_wqe(&dev_ib_data.mads_qp);
  1337. if (rcv_wqe) {
  1338. rc = post_rcv_buf(&dev_ib_data.mads_qp, rcv_wqe);
  1339. if (rc) {
  1340. eprintf("");
  1341. }
  1342. }
  1343. } else if (wqe >= (void *)(dev_ib_data.mads_qp.snd_wq) &&
  1344. wqe <
  1345. (void *)(&dev_ib_data.mads_qp.snd_wq[NUM_MADS_SND_WQES])) {
  1346. /* mads send wqe */
  1347. free_send_wqe(wqe);
  1348. } else {
  1349. rc = -1;
  1350. eprintf("");
  1351. }
  1352. return rc;
  1353. }
  1354. static int update_eq_cons_idx(struct eq_st *eq)
  1355. {
  1356. struct eq_dbell_st dbell;
  1357. int rc;
  1358. memset(&dbell, 0, sizeof dbell);
  1359. INS_FLD(dev_ib_data.eq.eqn, &dbell, tavorprm_eq_cmd_doorbell_st, eqn);
  1360. INS_FLD(EQ_DBELL_CMD_SET_CONS_IDX, &dbell, tavorprm_eq_cmd_doorbell_st,
  1361. eq_cmd);
  1362. INS_FLD(eq->cons_idx, &dbell, tavorprm_eq_cmd_doorbell_st, eq_param);
  1363. rc = cmd_post_doorbell(&dbell, EQ_DBELL_OFFSET);
  1364. return rc;
  1365. }
  1366. static void dev2ib_eqe(struct ib_eqe_st *ib_eqe_p, void *eqe_p)
  1367. {
  1368. void *tmp;
  1369. ib_eqe_p->event_type =
  1370. EX_FLD(eqe_p, tavorprm_event_queue_entry_st, event_type);
  1371. tmp = eqe_p + MT_BYTE_OFFSET(tavorprm_event_queue_entry_st, event_data);
  1372. ib_eqe_p->cqn = EX_FLD(tmp, tavorprm_completion_event_st, cqn);
  1373. }
  1374. static int poll_eq(struct ib_eqe_st *ib_eqe_p, __u8 * num_eqes)
  1375. {
  1376. struct eqe_t eqe;
  1377. __u8 owner;
  1378. int rc;
  1379. __u32 *ptr;
  1380. struct eq_st *eq = &dev_ib_data.eq;
  1381. ptr = (__u32 *) (&(eq->eq_buf[eq->cons_idx]));
  1382. tprintf("cons)idx=%d, addr(eqe)=%x, val=0x%x", eq->cons_idx, virt_to_bus(ptr), ptr[7]);
  1383. owner = (ptr[7] & 0x80000000) ? OWNER_HW : OWNER_SW;
  1384. if (owner == OWNER_SW) {
  1385. tprintf("got eqe");
  1386. eqe = eq->eq_buf[eq->cons_idx];
  1387. be_to_cpu_buf(&eqe, sizeof(eqe));
  1388. dev2ib_eqe(ib_eqe_p, &eqe);
  1389. ptr[7] |= 0x80000000;
  1390. eq->eq_buf[eq->cons_idx] = eqe;
  1391. eq->cons_idx = (eq->cons_idx + 1) % eq->eq_size;
  1392. rc = update_eq_cons_idx(eq);
  1393. if (rc) {
  1394. return -1;
  1395. }
  1396. *num_eqes = 1;
  1397. } else {
  1398. *num_eqes = 0;
  1399. }
  1400. return 0;
  1401. }
  1402. static int ib_device_close(void)
  1403. {
  1404. iounmap(tavor_pci_dev.uar);
  1405. iounmap(tavor_pci_dev.cr_space);
  1406. iounmap(dev_ib_data.error_buf_addr);
  1407. return 0;
  1408. }
  1409. static __u32 dev_get_qpn(void *qph)
  1410. {
  1411. struct udqp_st *qp = qph;
  1412. return qp->qpn;
  1413. }
  1414. static void dev_post_dbell(void *dbell, __u32 offset)
  1415. {
  1416. __u32 *ptr;
  1417. unsigned long address;
  1418. ptr = dbell;
  1419. tprintf("ptr[0]= 0x%lx", ptr[0]);
  1420. tprintf("ptr[1]= 0x%lx", ptr[1]);
  1421. address = (unsigned long)(tavor_pci_dev.uar) + offset;
  1422. tprintf("va=0x%lx pa=0x%lx", address,
  1423. virt_to_bus((const void *)address));
  1424. writel(htonl(ptr[0]), tavor_pci_dev.uar + offset);
  1425. barrier();
  1426. address += 4;
  1427. tprintf("va=0x%lx pa=0x%lx", address,
  1428. virt_to_bus((const void *)address));
  1429. writel(htonl(ptr[1]), tavor_pci_dev.uar + offset + 4);
  1430. }