You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

mt25218.h 12KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546
  1. #ifndef __mt25218_h__
  2. #define __mt25218_h__
  3. #include "MT25218_PRM.h"
  4. #include "ib_mad.h"
  5. #define TAVOR_DEVICE_ID 0x5a44
  6. #define TAVOR_BRIDGE_DEVICE_ID 0x5a46
  7. #define ARTAVOR_DEVICE_ID 0x6278
  8. #define MEMFREE_RESET_OFFSET 0xF0010
  9. #define INVALID_WQE_LKEY 0x00000100
  10. /*
  11. * memfree specific command
  12. *
  13. */
  14. #define MEMFREE_CMD_QUERY_ADAPTER 0x006
  15. #define MEMFREE_CMD_WRITE_MGM 0x026
  16. #define MEMFREE_CMD_MOD_STAT_CFG 0x034
  17. #define MEMFREE_CMD_QUERY_FW 0x004
  18. #define MEMFREE_CMD_ENABLE_LAM 0xff8
  19. #define MEMFREE_CMD_MAP_FA 0xfff
  20. #define MEMFREE_CMD_UNMAP_FA 0xffe
  21. #define MEMFREE_CMD_RUN_FW 0xff6
  22. #define MEMFREE_CMD_SET_ICM_SIZE 0xffd
  23. #define MEMFREE_CMD_MAP_ICM_AUX 0xffc
  24. #define MEMFREE_CMD_MAP_ICM 0xffa
  25. #define MEMFREE_CMD_QUERY_DEV_LIM 0x003
  26. /*
  27. * Tavor specific event types
  28. * Only event types that are specific to Tavor
  29. * and are used by the driver are listed here
  30. */
  31. #define TAVOR_IF_EV_TYPE_OVERRUN 0x0F
  32. /*
  33. * EQ doorbel commands
  34. */
  35. #define EQ_DBELL_CMD_INC_CONS_IDX 1 /* increment Consumer_indx by one */
  36. #define EQ_DBELL_CMD_ARM_EQ 2 /* Request notifcation for next event (Arm EQ) */
  37. #define EQ_DBELL_CMD_DISARM_CQ 3 /* Disarm CQ (CQ number is specified in EQ_param) */
  38. #define EQ_DBELL_CMD_SET_CONS_IDX 4 /* set Consumer_indx to value of EQ_param */
  39. #define EQ_DBELL_CMD_ALWAYS_ARM 5 /* move EQ to Always Armed state */
  40. /*
  41. * CQ doorbel commands
  42. */
  43. #define CQ_DBELL_CMD_INC_CONS_IDX 1
  44. #define CQ_DBELL_CMD_REQ_NOTIF_SOL_UNSOL 2
  45. #define CQ_DBELL_CMD_REQ_NOTIF_SOL 3
  46. #define CQ_DBELL_CMD_SET_CONS_IDX 4
  47. #define CQ_DBELL_CMD_REQ_NOTIF_MULT 5
  48. #define INPRM_BUF_SZ 4096
  49. #define INPRM_BUF_ALIGN 4096
  50. #define OUTPRM_BUF_SZ 4096
  51. #define OUTPRM_BUF_ALIGN 4096
  52. /*
  53. * sizes of parameter blocks used in certain
  54. * commands.
  55. * TODO: replace them with sizeof
  56. * operators of the appropriate structs
  57. */
  58. #define SW2HW_MPT_IBUF_SZ MT_STRUCT_SIZE(arbelprm_mpt_st)
  59. #define SW2HW_EQ_IBUF_SZ MT_STRUCT_SIZE(arbelprm_eqc_st)
  60. #define INIT_IB_IBUF_SZ MT_STRUCT_SIZE(arbelprm_init_ib_st)
  61. #define SW2HW_CQ_IBUF_SZ MT_STRUCT_SIZE(arbelprm_completion_queue_context_st)
  62. #define QPCTX_IBUF_SZ MT_STRUCT_SIZE(arbelprm_queue_pair_ee_context_entry_st)
  63. #define EQN 0
  64. #define UAR_IDX 1
  65. #define QPC_OFFSET 0
  66. #define CQC_OFFSET (QPC_OFFSET + 0x100000)
  67. #define EQPC_OFFSET (CQC_OFFSET + 0x100000)
  68. #define EQC_OFFSET (EQPC_OFFSET + 0x100000)
  69. #define MC_BASE_OFFSET (EQC_OFFSET + 0x100000)
  70. #define MPT_BASE_OFFSET (MC_BASE_OFFSET + 0x100000)
  71. #define MTT_BASE_OFFSET (MPT_BASE_OFFSET + 0x100000)
  72. #define LOG2_QPS 7
  73. #define LOG2_CQS 8
  74. #define LOG2_EQS 6
  75. #define LOG2_MC_ENTRY 6 /* 8 QPs per group */
  76. #define LOG2_MC_GROUPS 3 /* 8 groups */
  77. #define LOG2_MPT_ENTRIES 5
  78. #define LOG2_EQ_SZ 5
  79. #define LOG2_CQ_SZ 5
  80. #define NUM_PORTS 2
  81. #define EQE_OWNER_OFFSET 31
  82. #define EQE_OWNER_VAL_HW 0x80
  83. #define CQE_OWNER_OFFSET 31
  84. #define CQE_OWNER_VAL_HW 0x80
  85. #define POST_RCV_OFFSET 0x18
  86. #define POST_SND_OFFSET 0x10
  87. #define CQ_DBELL_OFFSET 0x20
  88. #define EQ_DBELL_OFFSET 0x28
  89. #define CQE_ERROR_OPCODE 0xfe
  90. #define OWNER_HW 1
  91. #define OWNER_SW 0
  92. #define MAX_GATHER 1 /* max gather entries used in send */
  93. #define MAX_SCATTER 2
  94. #define LOG2_MADS_SND_CQ_SZ LOG2_CQ_SZ
  95. #define LOG2_MADS_RCV_CQ_SZ LOG2_CQ_SZ
  96. #define LOG2_IPOIB_SND_CQ_SZ LOG2_CQ_SZ
  97. #define LOG2_IPOIB_RCV_CQ_SZ LOG2_CQ_SZ
  98. #define NUM_MADS_SND_CQES (1<<LOG2_MADS_SND_CQ_SZ)
  99. #define NUM_MADS_RCV_CQES (1<<LOG2_MADS_RCV_CQ_SZ)
  100. #define NUM_IPOIB_SND_CQES (1<<LOG2_IPOIB_SND_CQ_SZ)
  101. #define NUM_IPOIB_RCV_CQES (1<<LOG2_IPOIB_RCV_CQ_SZ)
  102. /* work queues must be 2^n size with n=0.. */
  103. #define NUM_MADS_RCV_WQES (1<<1)
  104. #define NUM_IPOIB_RCV_WQES (1<<1)
  105. #if NUM_MADS_RCV_WQES > NUM_IPOIB_RCV_WQES
  106. #define MAX_RCV_WQES NUM_MADS_RCV_WQES
  107. #else
  108. #define MAX_RCV_WQES NUM_IPOIB_RCV_WQES
  109. #endif
  110. #define NUM_MADS_SND_WQES (1<<1)
  111. #define NUM_IPOIB_SND_WQES (1<<1)
  112. #if NUM_MADS_SND_WQES > NUM_IPOIB_SND_WQES
  113. #define MAX_SND_WQES NUM_MADS_SND_WQES
  114. #else
  115. #define MAX_SND_WQES NUM_IPOIB_SND_WQES
  116. #endif
  117. /* uar context indexes */
  118. enum {
  119. MADS_RCV_CQ_ARM_DB_IDX,
  120. MADS_SND_CQ_ARM_DB_IDX,
  121. IPOIB_RCV_CQ_ARM_DB_IDX,
  122. IPOIB_SND_CQ_ARM_DB_IDX,
  123. MADS_SND_QP_DB_IDX,
  124. IPOIB_SND_QP_DB_IDX,
  125. GROUP_SEP_IDX,
  126. START_UNMAPPED_DB_IDX,
  127. /* --------------------------
  128. unmapped doorbell records
  129. -------------------------- */
  130. END_UNMAPPED_DB_IDX = 505,
  131. MADS_RCV_QP_DB_IDX = 506,
  132. IPOIB_RCV_QP_DB_IDX = 507,
  133. MADS_RCV_CQ_CI_DB_IDX = 508,
  134. MADS_SND_CQ_CI_DB_IDX = 509,
  135. IPOIB_RCV_CQ_CI_DB_IDX = 510,
  136. IPOIB_SND_CQ_CI_DB_IDX = 511
  137. };
  138. /* uar resources types */
  139. enum {
  140. UAR_RES_INVALID = 0x0, /* Invalid (not allocated) DoorBell record */
  141. UAR_RES_CQ_SET_CI = 0x1, /* CQ SET_CI DoorBell record */
  142. UAR_RES_CQ_ARM = 0x2, /* CQ ARM DoorBell record */
  143. UAR_RES_SQ_DBELL = 0x3, /* Send Queue DoorBell record */
  144. UAR_RES_RQ_DBELL = 0x4, /* Receive Queue DoorBell record */
  145. UAR_RES_SRQ_DBELL = 0x5, /* Shared Receive Queue DoorBell record */
  146. UAR_RES_GROUP_SEP = 0x7 /* Group Separator record */
  147. };
  148. enum {
  149. TS_RC,
  150. TS_UC,
  151. TS_RD,
  152. TS_UD,
  153. TS_MLX
  154. };
  155. enum {
  156. PM_STATE_ARMED = 0,
  157. PM_STATE_REARM = 1,
  158. PM_STATE_MIGRATED = 3
  159. };
  160. enum {
  161. DOORBEL_RES_SQ = 3,
  162. DOORBEL_RES_RQ = 4,
  163. DOORBEL_RES_SRQ = 5
  164. };
  165. struct ib_buffers_st {
  166. __u8 send_mad_buf[NUM_MADS_SND_WQES][MAD_BUF_SZ];
  167. __u8 rcv_mad_buf[NUM_MADS_RCV_WQES][MAD_BUF_SZ + GRH_SIZE];
  168. __u8 ipoib_rcv_buf[NUM_IPOIB_RCV_WQES][IPOIB_RCV_BUF_SZ + GRH_SIZE];
  169. __u8 ipoib_rcv_grh_buf[NUM_IPOIB_RCV_WQES][IPOIB_RCV_BUF_SZ];
  170. __u8 send_ipoib_buf[NUM_IPOIB_SND_WQES][IPOIB_SND_BUF_SZ];
  171. };
  172. struct pcidev {
  173. unsigned long bar[6];
  174. __u32 dev_config_space[64];
  175. struct pci_device *dev;
  176. __u8 bus;
  177. __u8 devfn;
  178. };
  179. struct dev_pci_struct {
  180. struct pcidev dev;
  181. struct pcidev br;
  182. void *cr_space;
  183. void *uar;
  184. };
  185. struct eq_st {
  186. __u8 eqn;
  187. __u32 cons_counter;
  188. __u32 eq_size;
  189. void *ci_base_base_addr;
  190. struct eqe_t *eq_buf;
  191. };
  192. struct eqe_t {
  193. __u8 raw[MT_STRUCT_SIZE(arbelprm_event_queue_entry_st)];
  194. } __attribute__ ((packed));
  195. enum qp_state_e {
  196. QP_STATE_RST = 0,
  197. QP_STATE_INIT = 1,
  198. QP_STATE_RTR = 2,
  199. QP_STATE_RTS = 3,
  200. QP_STATE_SQEr = 4,
  201. QP_STATE_SQD = 5,
  202. QP_STATE_ERR = 6,
  203. QP_STATE_SQDING = 7,
  204. QP_STATE_SUSPEND = 9
  205. };
  206. struct memory_pointer_st {
  207. __u32 byte_count;
  208. __u32 lkey;
  209. __u32 local_addr_h;
  210. __u32 local_addr_l;
  211. } __attribute__ ((packed));
  212. /* receive wqe descriptor */
  213. struct recv_wqe_st {
  214. /* part referenced by hardware */
  215. __u8 control[MT_STRUCT_SIZE(arbelprm_wqe_segment_ctrl_recv_st)];
  216. struct memory_pointer_st mpointer[MAX_SCATTER];
  217. } __attribute__ ((packed));
  218. struct recv_wqe_cont_st {
  219. struct recv_wqe_st wqe;
  220. struct udqp_st *qp; /* qp this wqe is used with */
  221. } __attribute__ ((packed));
  222. #define RECV_WQE_U_ALIGN 64
  223. union recv_wqe_u {
  224. __u8 align[RECV_WQE_U_ALIGN]; /* this ensures proper alignment */
  225. struct recv_wqe_st wqe;
  226. struct recv_wqe_cont_st wqe_cont;
  227. } __attribute__ ((packed));
  228. struct send_doorbell_st {
  229. __u8 raw[MT_STRUCT_SIZE(arbelprm_send_doorbell_st)];
  230. } __attribute__ ((packed));
  231. struct next_control_seg_st {
  232. __u8 next[MT_STRUCT_SIZE(arbelprm_wqe_segment_next_st)];
  233. __u8 control[MT_STRUCT_SIZE(arbelprm_wqe_segment_ctrl_send_st)];
  234. } __attribute__ ((packed));
  235. struct ud_seg_st {
  236. __u8 av[MT_STRUCT_SIZE(arbelprm_wqe_segment_ud_st)];
  237. } __attribute__ ((packed));
  238. struct ud_send_wqe_st {
  239. struct next_control_seg_st next; /* 16 bytes */
  240. struct ud_seg_st udseg; /* 48 bytes */
  241. struct memory_pointer_st mpointer[MAX_GATHER]; /* 16 * MAX_GATHER bytes */
  242. } __attribute__ ((packed));
  243. struct ude_send_wqe_cont_st {
  244. struct ud_send_wqe_st wqe;
  245. struct udqp_st *qp; /* qp this wqe is used with */
  246. } __attribute__ ((packed));
  247. #define UD_SEND_WQE_U_ALIGN 128
  248. union ud_send_wqe_u {
  249. __u8 align[UD_SEND_WQE_U_ALIGN];
  250. struct ude_send_wqe_cont_st wqe_cont;
  251. } __attribute__ ((packed));
  252. struct address_vector_st {
  253. __u8 raw[MT_STRUCT_SIZE(arbelprm_ud_address_vector_st)];
  254. } __attribute__ ((packed));
  255. struct ud_av_st {
  256. struct address_vector_st av;
  257. __u32 dest_qp; /* destination qpn */
  258. __u32 qkey;
  259. __u8 next_free;
  260. } __attribute__ ((packed));
  261. union ud_av_u {
  262. struct ud_av_st ud_av;
  263. } __attribute__ ((packed));
  264. struct udav_st {
  265. union ud_av_u av_array[NUM_AVS];
  266. __u8 udav_next_free;
  267. };
  268. union cqe_st {
  269. __u8 good_cqe[MT_STRUCT_SIZE(arbelprm_completion_queue_entry_st)];
  270. __u8 error_cqe[MT_STRUCT_SIZE(arbelprm_completion_with_error_st)];
  271. } __attribute__ ((packed));
  272. struct qp_ee_ctx_t {
  273. __u8 raw[MT_STRUCT_SIZE(arbelprm_queue_pair_ee_context_entry_st)];
  274. } __attribute__ ((packed));
  275. struct qp_ee_state_tarnisition_st {
  276. __u32 opt_param_mask;
  277. __u32 r1;
  278. struct qp_ee_ctx_t ctx;
  279. __u32 r2[62];
  280. } __attribute__ ((packed));
  281. struct cq_dbell_st {
  282. __u8 raw[MT_STRUCT_SIZE(arbelprm_cq_cmd_doorbell_st)];
  283. } __attribute__ ((packed));
  284. struct mad_ifc_inprm_st {
  285. union mad_u mad;
  286. } __attribute__ ((packed));
  287. struct wqe_buf_st {
  288. struct ud_send_wqe_st *sndq;
  289. struct recv_wqe_st *rcvq;
  290. };
  291. struct mad_buffer_st {
  292. void *buf; /* pointer to a 256 byte buffer */
  293. __u8 owner; /* sw or hw ownership BUF_OWNER_SW or BUF_OWNER_HW */
  294. };
  295. struct rcv_buf_st {
  296. void *buf;
  297. __u8 busy;
  298. };
  299. struct ib_eqe_st {
  300. __u8 event_type;
  301. __u32 cqn;
  302. };
  303. struct cq_st {
  304. __u32 cqn;
  305. union cqe_st *cq_buf;
  306. __u32 cons_counter; /* consuner counter */
  307. __u8 num_cqes;
  308. __u32 arm_db_ctx_idx;
  309. void *arm_db_ctx_pointer;
  310. __u32 ci_db_ctx_idx;
  311. void *ci_db_ctx_pointer;
  312. };
  313. struct udqp_st {
  314. /* cq used by this QP */
  315. struct cq_st snd_cq;
  316. struct cq_st rcv_cq;
  317. /* QP related data */
  318. __u32 qpn; /* QP number */
  319. __u32 qkey;
  320. __u8 recv_wqe_cur_free;
  321. __u8 recv_wqe_alloc_idx;
  322. __u8 max_recv_wqes;
  323. void *rcv_bufs[MAX_RCV_WQES];
  324. union recv_wqe_u *rcv_wq; /* receive work queue */
  325. struct recv_wqe_st *last_posted_rcv_wqe;
  326. __u8 snd_wqe_cur_free;
  327. __u8 snd_wqe_alloc_idx;
  328. __u8 max_snd_wqes;
  329. void *snd_bufs[MAX_SND_WQES];
  330. __u16 send_buf_sz;
  331. __u16 rcv_buf_sz;
  332. union ud_send_wqe_u *snd_wq; /* send work queue */
  333. struct ud_send_wqe_st *last_posted_snd_wqe;
  334. /* pointers to uar context entries */
  335. void *send_uar_context;
  336. __u16 post_send_counter;
  337. void *rcv_uar_context;
  338. __u16 post_rcv_counter;
  339. __u32 snd_db_record_index;
  340. __u32 rcv_db_record_index;
  341. };
  342. struct device_ib_data_st {
  343. __u32 mkey;
  344. __u32 pd;
  345. __u8 port;
  346. __u32 qkey;
  347. struct eq_st eq;
  348. struct udav_st udav;
  349. struct udqp_st mads_qp;
  350. struct udqp_st ipoib_qp;
  351. void *clr_int_addr;
  352. __u32 clr_int_data;
  353. __u32 uar_idx;
  354. void *uar_context_base;
  355. void *error_buf_addr;
  356. __u32 error_buf_size;
  357. };
  358. struct query_fw_st {
  359. __u16 fw_rev_major;
  360. __u16 fw_rev_minor;
  361. __u16 fw_rev_subminor;
  362. __u32 error_buf_start_h;
  363. __u32 error_buf_start_l;
  364. __u32 error_buf_size;
  365. __u32 fw_pages;
  366. struct addr_64_st eq_ci_table;
  367. struct addr_64_st clear_int_addr;
  368. };
  369. struct query_adapter_st {
  370. __u8 intapin;
  371. };
  372. struct vpm_entry_st {
  373. __u32 va_h;
  374. __u32 va_l;
  375. __u32 pa_h;
  376. __u32 pa_l;
  377. __u8 log2_size;
  378. };
  379. #define MAX_VPM_PER_CALL 1
  380. struct map_icm_st {
  381. __u32 num_vpm;
  382. struct vpm_entry_st vpm_arr[MAX_VPM_PER_CALL];
  383. };
  384. struct init_hca_st {
  385. __u32 qpc_base_addr_h;
  386. __u32 qpc_base_addr_l;
  387. __u8 log_num_of_qp;
  388. __u32 eec_base_addr_h;
  389. __u32 eec_base_addr_l;
  390. __u8 log_num_of_ee;
  391. __u32 srqc_base_addr_h;
  392. __u32 srqc_base_addr_l;
  393. __u8 log_num_of_srq;
  394. __u32 cqc_base_addr_h;
  395. __u32 cqc_base_addr_l;
  396. __u8 log_num_of_cq;
  397. __u32 eqpc_base_addr_h;
  398. __u32 eqpc_base_addr_l;
  399. __u32 eeec_base_addr_h;
  400. __u32 eeec_base_addr_l;
  401. __u32 eqc_base_addr_h;
  402. __u32 eqc_base_addr_l;
  403. __u8 log_num_of_eq;
  404. __u32 rdb_base_addr_h;
  405. __u32 rdb_base_addr_l;
  406. __u32 mc_base_addr_h;
  407. __u32 mc_base_addr_l;
  408. __u16 log_mc_table_entry_sz;
  409. __u32 mc_table_hash_sz;
  410. __u8 log_mc_table_sz;
  411. __u32 mpt_base_addr_h;
  412. __u32 mpt_base_addr_l;
  413. __u8 log_mpt_sz;
  414. __u32 mtt_base_addr_h;
  415. __u32 mtt_base_addr_l;
  416. __u8 log_max_uars;
  417. };
  418. struct dev_lim_st {
  419. __u8 log2_rsvd_qps;
  420. __u16 qpc_entry_sz;
  421. __u8 log2_rsvd_srqs;
  422. __u16 srq_entry_sz;
  423. __u8 log2_rsvd_ees;
  424. __u16 eec_entry_sz;
  425. __u8 log2_rsvd_cqs;
  426. __u16 cqc_entry_sz;
  427. __u8 log2_rsvd_mtts;
  428. __u16 mtt_entry_sz;
  429. __u8 log2_rsvd_mrws;
  430. __u16 mpt_entry_sz;
  431. __u8 log2_rsvd_rdbs;
  432. __u16 eqc_entry_sz;
  433. __u32 max_icm_size_l;
  434. __u32 max_icm_size_h;
  435. __u8 uar_sz;
  436. __u8 num_rsvd_uars;
  437. };
  438. static int create_udqp(struct udqp_st *qp);
  439. static int destroy_udqp(struct udqp_st *qp);
  440. static void *get_send_wqe_buf(void *wqe, __u8 index);
  441. static void *get_rcv_wqe_buf(void *wqe, __u8 index);
  442. static struct recv_wqe_st *alloc_rcv_wqe(struct udqp_st *qp);
  443. static int free_wqe(void *wqe);
  444. static int poll_cq(void *cqh, union cqe_st *cqe_p, __u8 * num_cqes);
  445. static int poll_eq(struct ib_eqe_st *ib_eqe_p, __u8 * num_eqes);
  446. static int post_rcv_buf(struct udqp_st *qp, struct recv_wqe_st *rcv_wqe);
  447. static __u32 dev_get_qpn(void *qph);
  448. #endif /* __mt25218_h__ */